aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/ice/ice_main.c
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2022-04-19 17:26:01 +0200
committerTakashi Iwai <tiwai@suse.de>2022-04-19 17:26:01 +0200
commit0aea30a07ec6b50de0fc5f5b2ec34a68ead86b61 (patch)
treeee7d7d116570f39e47399c8f691a5a7565077eeb /drivers/net/ethernet/intel/ice/ice_main.c
parentALSA: usb-audio: add mapping for MSI MAG X570S Torpedo MAX. (diff)
parentfirmware: cs_dsp: Fix overrun of unterminated control name string (diff)
downloadlinux-dev-0aea30a07ec6b50de0fc5f5b2ec34a68ead86b61.tar.xz
linux-dev-0aea30a07ec6b50de0fc5f5b2ec34a68ead86b61.zip
Merge tag 'asoc-fix-v5.18-rc3' of https://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-linus
ASoC: Fixes for v5.18 A collection of fixes that came in since the merge window, plus one new device ID for an x86 laptop. Nothing that really stands out with particularly big impact outside of the affected device.
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_main.c')
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c538
1 files changed, 398 insertions, 140 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 30814435f779..b588d7995631 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -21,6 +21,7 @@
#include "ice_trace.h"
#include "ice_eswitch.h"
#include "ice_tc_lib.h"
+#include "ice_vsi_vlan_ops.h"
#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
static const char ice_driver_string[] = DRV_SUMMARY;
@@ -47,6 +48,21 @@ static DEFINE_IDA(ice_aux_ida);
DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
EXPORT_SYMBOL(ice_xdp_locking_key);
+/**
+ * ice_hw_to_dev - Get device pointer from the hardware structure
+ * @hw: pointer to the device HW structure
+ *
+ * Used to access the device pointer from compilation units which can't easily
+ * include the definition of struct ice_pf without leading to circular header
+ * dependencies.
+ */
+struct device *ice_hw_to_dev(struct ice_hw *hw)
+{
+ struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
+
+ return &pf->pdev->dev;
+}
+
static struct workqueue_struct *ice_wq;
static const struct net_device_ops ice_netdev_safe_mode_ops;
static const struct net_device_ops ice_netdev_ops;
@@ -244,7 +260,7 @@ static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
if (vsi->type != ICE_VSI_PF)
return 0;
- if (vsi->num_vlan > 1)
+ if (ice_vsi_has_non_zero_vlans(vsi))
status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m);
else
status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0);
@@ -264,7 +280,7 @@ static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
if (vsi->type != ICE_VSI_PF)
return 0;
- if (vsi->num_vlan > 1)
+ if (ice_vsi_has_non_zero_vlans(vsi))
status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m);
else
status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0);
@@ -279,6 +295,7 @@ static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
*/
static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
{
+ struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
struct device *dev = ice_pf_to_dev(vsi->back);
struct net_device *netdev = vsi->netdev;
bool promisc_forced_on = false;
@@ -352,7 +369,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
/* check for changes in promiscuous modes */
if (changed_flags & IFF_ALLMULTI) {
if (vsi->current_netdev_flags & IFF_ALLMULTI) {
- if (vsi->num_vlan > 1)
+ if (ice_vsi_has_non_zero_vlans(vsi))
promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
else
promisc_m = ICE_MCAST_PROMISC_BITS;
@@ -366,7 +383,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
}
} else {
/* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
- if (vsi->num_vlan > 1)
+ if (ice_vsi_has_non_zero_vlans(vsi))
promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
else
promisc_m = ICE_MCAST_PROMISC_BITS;
@@ -396,7 +413,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
goto out_promisc;
}
err = 0;
- ice_cfg_vlan_pruning(vsi, false);
+ vlan_ops->dis_rx_filtering(vsi);
}
} else {
/* Clear Rx filter to remove traffic from wire */
@@ -409,8 +426,9 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
IFF_PROMISC;
goto out_promisc;
}
- if (vsi->num_vlan > 1)
- ice_cfg_vlan_pruning(vsi, true);
+ if (vsi->current_netdev_flags &
+ NETIF_F_HW_VLAN_CTAG_FILTER)
+ vlan_ops->ena_rx_filtering(vsi);
}
}
}
@@ -502,7 +520,8 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
{
struct ice_hw *hw = &pf->hw;
struct ice_vsi *vsi;
- unsigned int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type);
@@ -517,8 +536,10 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
ice_vc_notify_reset(pf);
/* Disable VFs until reset is completed */
- ice_for_each_vf(pf, i)
- ice_set_vf_state_qs_dis(&pf->vf[i]);
+ mutex_lock(&pf->vfs.table_lock);
+ ice_for_each_vf(pf, bkt, vf)
+ ice_set_vf_state_qs_dis(vf);
+ mutex_unlock(&pf->vfs.table_lock);
if (ice_is_eswitch_mode_switchdev(pf)) {
if (reset_type != ICE_RESET_PFR)
@@ -565,6 +586,9 @@ skip:
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
ice_ptp_prepare_for_reset(pf);
+ if (ice_is_feature_supported(pf, ICE_F_GNSS))
+ ice_gnss_exit(pf);
+
if (hw->port_info)
ice_sched_clear_port(hw->port_info);
@@ -610,7 +634,7 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
clear_bit(ICE_PFR_REQ, pf->state);
wake_up(&pf->reset_wait_queue);
- ice_reset_all_vfs(pf, true);
+ ice_reset_all_vfs(pf);
}
}
@@ -661,7 +685,7 @@ static void ice_reset_subtask(struct ice_pf *pf)
clear_bit(ICE_CORER_REQ, pf->state);
clear_bit(ICE_GLOBR_REQ, pf->state);
wake_up(&pf->reset_wait_queue);
- ice_reset_all_vfs(pf, true);
+ ice_reset_all_vfs(pf);
}
return;
@@ -1660,7 +1684,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- unsigned int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
u32 reg;
if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
@@ -1748,47 +1773,46 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
/* Check to see if one of the VFs caused an MDD event, and then
* increment counters and set print pending
*/
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
-
- reg = rd32(hw, VP_MDET_TX_PQM(i));
+ mutex_lock(&pf->vfs.table_lock);
+ ice_for_each_vf(pf, bkt, vf) {
+ reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id));
if (reg & VP_MDET_TX_PQM_VALID_M) {
- wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
+ wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF);
vf->mdd_tx_events.count++;
set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
- i);
+ vf->vf_id);
}
- reg = rd32(hw, VP_MDET_TX_TCLAN(i));
+ reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id));
if (reg & VP_MDET_TX_TCLAN_VALID_M) {
- wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
+ wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF);
vf->mdd_tx_events.count++;
set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
- i);
+ vf->vf_id);
}
- reg = rd32(hw, VP_MDET_TX_TDPU(i));
+ reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id));
if (reg & VP_MDET_TX_TDPU_VALID_M) {
- wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
+ wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF);
vf->mdd_tx_events.count++;
set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
- i);
+ vf->vf_id);
}
- reg = rd32(hw, VP_MDET_RX(i));
+ reg = rd32(hw, VP_MDET_RX(vf->vf_id));
if (reg & VP_MDET_RX_VALID_M) {
- wr32(hw, VP_MDET_RX(i), 0xFFFF);
+ wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF);
vf->mdd_rx_events.count++;
set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
if (netif_msg_rx_err(pf))
dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
- i);
+ vf->vf_id);
/* Since the queue is disabled on VF Rx MDD events, the
* PF can be configured to reset the VF through ethtool
@@ -1799,10 +1823,11 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
* reset, so print the event prior to reset.
*/
ice_print_vf_rx_mdd_event(vf);
- ice_reset_vf(&pf->vf[i], false);
+ ice_reset_vf(vf, ICE_VF_RESET_LOCK);
}
}
}
+ mutex_unlock(&pf->vfs.table_lock);
ice_print_vfs_mdd_events(pf);
}
@@ -2253,6 +2278,43 @@ static void ice_service_task(struct work_struct *work)
return;
}
+ if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) {
+ struct iidc_event *event;
+
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (event) {
+ set_bit(IIDC_EVENT_CRIT_ERR, event->type);
+ /* report the entire OICR value to AUX driver */
+ swap(event->reg, pf->oicr_err_reg);
+ ice_send_event_to_aux(pf, event);
+ kfree(event);
+ }
+ }
+
+ if (test_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) {
+ /* Plug aux device per request */
+ ice_plug_aux_dev(pf);
+
+ /* Mark plugging as done but check whether unplug was
+ * requested during ice_plug_aux_dev() call
+ * (e.g. from ice_clear_rdma_cap()) and if so then
+ * plug aux device.
+ */
+ if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
+ ice_unplug_aux_dev(pf);
+ }
+
+ if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
+ struct iidc_event *event;
+
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (event) {
+ set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
+ ice_send_event_to_aux(pf, event);
+ kfree(event);
+ }
+ }
+
ice_clean_adminq_subtask(pf);
ice_check_media_subtask(pf);
ice_check_for_hang_subtask(pf);
@@ -2428,7 +2490,7 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
/* skip this unused q_vector */
continue;
}
- if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID)
+ if (vsi->type == ICE_VSI_CTRL && vsi->vf)
err = devm_request_irq(dev, irq_num, vsi->irq_handler,
IRQF_SHARED, q_vector->name,
q_vector);
@@ -2495,10 +2557,10 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
xdp_ring->vsi = vsi;
xdp_ring->netdev = NULL;
- xdp_ring->next_dd = ICE_TX_THRESH - 1;
- xdp_ring->next_rs = ICE_TX_THRESH - 1;
xdp_ring->dev = dev;
xdp_ring->count = vsi->num_tx_desc;
+ xdp_ring->next_dd = ICE_RING_QUARTER(xdp_ring) - 1;
+ xdp_ring->next_rs = ICE_RING_QUARTER(xdp_ring) - 1;
WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
if (ice_setup_tx_ring(xdp_ring))
goto free_xdp_rings;
@@ -3015,17 +3077,9 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
#define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
if (oicr & ICE_AUX_CRIT_ERR) {
- struct iidc_event *event;
-
+ pf->oicr_err_reg |= oicr;
+ set_bit(ICE_AUX_ERR_PENDING, pf->state);
ena_mask &= ~ICE_AUX_CRIT_ERR;
- event = kzalloc(sizeof(*event), GFP_KERNEL);
- if (event) {
- set_bit(IIDC_EVENT_CRIT_ERR, event->type);
- /* report the entire OICR value to AUX driver */
- event->reg = oicr;
- ice_send_event_to_aux(pf, event);
- kfree(event);
- }
}
/* Report any remaining unexpected interrupts */
@@ -3230,6 +3284,7 @@ static void ice_set_ops(struct net_device *netdev)
static void ice_set_netdev_features(struct net_device *netdev)
{
struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ bool is_dvm_ena = ice_is_dvm_ena(&pf->hw);
netdev_features_t csumo_features;
netdev_features_t vlano_features;
netdev_features_t dflt_features;
@@ -3256,6 +3311,10 @@ static void ice_set_netdev_features(struct net_device *netdev)
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
+ /* Enable CTAG/STAG filtering by default in Double VLAN Mode (DVM) */
+ if (is_dvm_ena)
+ vlano_features |= NETIF_F_HW_VLAN_STAG_FILTER;
+
tso_features = NETIF_F_TSO |
NETIF_F_TSO_ECN |
NETIF_F_TSO6 |
@@ -3287,6 +3346,15 @@ static void ice_set_netdev_features(struct net_device *netdev)
tso_features;
netdev->vlan_features |= dflt_features | csumo_features |
tso_features;
+
+ /* advertise support but don't enable by default since only one type of
+ * VLAN offload can be enabled at a time (i.e. CTAG or STAG). When one
+ * type turns on the other has to be turned off. This is enforced by the
+ * ice_fix_features() ndo callback.
+ */
+ if (is_dvm_ena)
+ netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX |
+ NETIF_F_HW_VLAN_STAG_TX;
}
/**
@@ -3361,14 +3429,14 @@ void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
static struct ice_vsi *
ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID, NULL);
+ return ice_vsi_setup(pf, pi, ICE_VSI_PF, NULL, NULL);
}
static struct ice_vsi *
ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
struct ice_channel *ch)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, ICE_INVAL_VFID, ch);
+ return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, NULL, ch);
}
/**
@@ -3382,7 +3450,7 @@ ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
static struct ice_vsi *
ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID, NULL);
+ return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, NULL, NULL);
}
/**
@@ -3396,40 +3464,37 @@ ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
struct ice_vsi *
ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID, NULL);
+ return ice_vsi_setup(pf, pi, ICE_VSI_LB, NULL, NULL);
}
/**
* ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
* @netdev: network interface to be adjusted
- * @proto: unused protocol
+ * @proto: VLAN TPID
* @vid: VLAN ID to be added
*
* net_device_ops implementation for adding VLAN IDs
*/
static int
-ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
- u16 vid)
+ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi_vlan_ops *vlan_ops;
struct ice_vsi *vsi = np->vsi;
+ struct ice_vlan vlan;
int ret;
/* VLAN 0 is added by default during load/reset */
if (!vid)
return 0;
- /* Enable VLAN pruning when a VLAN other than 0 is added */
- if (!ice_vsi_is_vlan_pruning_ena(vsi)) {
- ret = ice_cfg_vlan_pruning(vsi, true);
- if (ret)
- return ret;
- }
+ vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
/* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
* packets aren't pruned by the device's internal switch on Rx
*/
- ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
+ vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
+ ret = vlan_ops->add_vlan(vsi, &vlan);
if (!ret)
set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
@@ -3439,36 +3504,36 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
/**
* ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
* @netdev: network interface to be adjusted
- * @proto: unused protocol
+ * @proto: VLAN TPID
* @vid: VLAN ID to be removed
*
* net_device_ops implementation for removing VLAN IDs
*/
static int
-ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
- u16 vid)
+ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi_vlan_ops *vlan_ops;
struct ice_vsi *vsi = np->vsi;
+ struct ice_vlan vlan;
int ret;
/* don't allow removal of VLAN 0 */
if (!vid)
return 0;
- /* Make sure ice_vsi_kill_vlan is successful before updating VLAN
+ vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+
+ /* Make sure VLAN delete is successful before updating VLAN
* information
*/
- ret = ice_vsi_kill_vlan(vsi, vid);
+ vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
+ ret = vlan_ops->del_vlan(vsi, &vlan);
if (ret)
return ret;
- /* Disable pruning when VLAN 0 is the only VLAN rule */
- if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi))
- ret = ice_cfg_vlan_pruning(vsi, false);
-
set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
- return ret;
+ return 0;
}
/**
@@ -3537,12 +3602,17 @@ static int ice_tc_indir_block_register(struct ice_vsi *vsi)
static int ice_setup_pf_sw(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
+ bool dvm = ice_is_dvm_ena(&pf->hw);
struct ice_vsi *vsi;
int status;
if (ice_is_reset_in_progress(pf->state))
return -EBUSY;
+ status = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
+ if (status)
+ return -EIO;
+
vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
if (!vsi)
return -ENOMEM;
@@ -3653,6 +3723,7 @@ static void ice_deinit_pf(struct ice_pf *pf)
mutex_destroy(&pf->sw_mutex);
mutex_destroy(&pf->tc_mutex);
mutex_destroy(&pf->avail_q_mutex);
+ mutex_destroy(&pf->vfs.table_lock);
if (pf->avail_txqs) {
bitmap_free(pf->avail_txqs);
@@ -3677,19 +3748,16 @@ static void ice_set_pf_caps(struct ice_pf *pf)
struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
- clear_bit(ICE_FLAG_AUX_ENA, pf->flags);
- if (func_caps->common_cap.rdma) {
+ if (func_caps->common_cap.rdma)
set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
- set_bit(ICE_FLAG_AUX_ENA, pf->flags);
- }
clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
if (func_caps->common_cap.dcb)
set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
if (func_caps->common_cap.sr_iov_1_1) {
set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
- pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs,
- ICE_MAX_VF_COUNT);
+ pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs,
+ ICE_MAX_SRIOV_VFS);
}
clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
if (func_caps->common_cap.rss_table_size)
@@ -3755,6 +3823,9 @@ static int ice_init_pf(struct ice_pf *pf)
return -ENOMEM;
}
+ mutex_init(&pf->vfs.table_lock);
+ hash_init(pf->vfs.table);
+
return 0;
}
@@ -3809,7 +3880,7 @@ static int ice_ena_msix_range(struct ice_pf *pf)
v_left -= needed;
/* reserve vectors for RDMA auxiliary driver */
- if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
+ if (ice_is_rdma_ena(pf)) {
needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
if (v_left < needed)
goto no_hw_vecs_left_err;
@@ -3850,7 +3921,7 @@ static int ice_ena_msix_range(struct ice_pf *pf)
int v_remain = v_actual - v_other;
int v_rdma = 0, v_min_rdma = 0;
- if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
+ if (ice_is_rdma_ena(pf)) {
/* Need at least 1 interrupt in addition to
* AEQ MSIX
*/
@@ -3884,7 +3955,7 @@ static int ice_ena_msix_range(struct ice_pf *pf)
dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
pf->num_lan_msix);
- if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
+ if (ice_is_rdma_ena(pf))
dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
pf->num_rdma_msix);
}
@@ -4064,8 +4135,8 @@ static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
/* allow all VLANs on Tx and don't strip on Rx */
- ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL |
- ICE_AQ_VSI_VLAN_EMOD_NOTHING;
+ ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL |
+ ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
@@ -4074,7 +4145,7 @@ static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
} else {
vsi->info.sec_flags = ctxt->info.sec_flags;
vsi->info.sw_flags2 = ctxt->info.sw_flags2;
- vsi->info.vlan_flags = ctxt->info.vlan_flags;
+ vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
}
kfree(ctxt);
@@ -4459,8 +4530,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
/* set up for high or low DMA */
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
- if (err)
- err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (err) {
dev_err(dev, "DMA configuration failed: 0x%x\n", err);
return err;
@@ -4683,6 +4752,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
ice_ptp_init(pf);
+ if (ice_is_feature_supported(pf, ICE_F_GNSS))
+ ice_gnss_init(pf);
+
/* Note: Flow director init failure is non-fatal to load */
if (ice_init_fdir(pf))
dev_err(dev, "could not initialize flow director\n");
@@ -4712,7 +4784,7 @@ probe_done:
/* ready to go, so clear down state bit */
clear_bit(ICE_DOWN, pf->state);
- if (ice_is_aux_ena(pf)) {
+ if (ice_is_rdma_ena(pf)) {
pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL);
if (pf->aux_idx < 0) {
dev_err(dev, "Failed to allocate device ID for AUX driver\n");
@@ -4854,14 +4926,16 @@ static void ice_remove(struct pci_dev *pdev)
ice_devlink_unregister_params(pf);
set_bit(ICE_DOWN, pf->state);
- mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
ice_deinit_lag(pf);
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
ice_ptp_release(pf);
+ if (ice_is_feature_supported(pf, ICE_F_GNSS))
+ ice_gnss_exit(pf);
if (!ice_is_safe_mode(pf))
ice_remove_arfs(pf);
ice_setup_mc_magic_wake(pf);
ice_vsi_release_all(pf);
+ mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
ice_set_wake(pf);
ice_free_irq_msix_misc(pf);
ice_for_each_vsi(pf, i) {
@@ -5570,6 +5644,194 @@ ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
return err;
}
+#define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
+ NETIF_F_HW_VLAN_CTAG_TX | \
+ NETIF_F_HW_VLAN_STAG_RX | \
+ NETIF_F_HW_VLAN_STAG_TX)
+
+#define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
+ NETIF_F_HW_VLAN_STAG_FILTER)
+
+/**
+ * ice_fix_features - fix the netdev features flags based on device limitations
+ * @netdev: ptr to the netdev that flags are being fixed on
+ * @features: features that need to be checked and possibly fixed
+ *
+ * Make sure any fixups are made to features in this callback. This enables the
+ * driver to not have to check unsupported configurations throughout the driver
+ * because that's the responsiblity of this callback.
+ *
+ * Single VLAN Mode (SVM) Supported Features:
+ * NETIF_F_HW_VLAN_CTAG_FILTER
+ * NETIF_F_HW_VLAN_CTAG_RX
+ * NETIF_F_HW_VLAN_CTAG_TX
+ *
+ * Double VLAN Mode (DVM) Supported Features:
+ * NETIF_F_HW_VLAN_CTAG_FILTER
+ * NETIF_F_HW_VLAN_CTAG_RX
+ * NETIF_F_HW_VLAN_CTAG_TX
+ *
+ * NETIF_F_HW_VLAN_STAG_FILTER
+ * NETIF_HW_VLAN_STAG_RX
+ * NETIF_HW_VLAN_STAG_TX
+ *
+ * Features that need fixing:
+ * Cannot simultaneously enable CTAG and STAG stripping and/or insertion.
+ * These are mutually exlusive as the VSI context cannot support multiple
+ * VLAN ethertypes simultaneously for stripping and/or insertion. If this
+ * is not done, then default to clearing the requested STAG offload
+ * settings.
+ *
+ * All supported filtering has to be enabled or disabled together. For
+ * example, in DVM, CTAG and STAG filtering have to be enabled and disabled
+ * together. If this is not done, then default to VLAN filtering disabled.
+ * These are mutually exclusive as there is currently no way to
+ * enable/disable VLAN filtering based on VLAN ethertype when using VLAN
+ * prune rules.
+ */
+static netdev_features_t
+ice_fix_features(struct net_device *netdev, netdev_features_t features)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ netdev_features_t supported_vlan_filtering;
+ netdev_features_t requested_vlan_filtering;
+ struct ice_vsi *vsi = np->vsi;
+
+ requested_vlan_filtering = features & NETIF_VLAN_FILTERING_FEATURES;
+
+ /* make sure supported_vlan_filtering works for both SVM and DVM */
+ supported_vlan_filtering = NETIF_F_HW_VLAN_CTAG_FILTER;
+ if (ice_is_dvm_ena(&vsi->back->hw))
+ supported_vlan_filtering |= NETIF_F_HW_VLAN_STAG_FILTER;
+
+ if (requested_vlan_filtering &&
+ requested_vlan_filtering != supported_vlan_filtering) {
+ if (requested_vlan_filtering & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ netdev_warn(netdev, "cannot support requested VLAN filtering settings, enabling all supported VLAN filtering settings\n");
+ features |= supported_vlan_filtering;
+ } else {
+ netdev_warn(netdev, "cannot support requested VLAN filtering settings, clearing all supported VLAN filtering settings\n");
+ features &= ~supported_vlan_filtering;
+ }
+ }
+
+ if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
+ (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) {
+ netdev_warn(netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
+ features &= ~(NETIF_F_HW_VLAN_STAG_RX |
+ NETIF_F_HW_VLAN_STAG_TX);
+ }
+
+ return features;
+}
+
+/**
+ * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI
+ * @vsi: PF's VSI
+ * @features: features used to determine VLAN offload settings
+ *
+ * First, determine the vlan_ethertype based on the VLAN offload bits in
+ * features. Then determine if stripping and insertion should be enabled or
+ * disabled. Finally enable or disable VLAN stripping and insertion.
+ */
+static int
+ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features)
+{
+ bool enable_stripping = true, enable_insertion = true;
+ struct ice_vsi_vlan_ops *vlan_ops;
+ int strip_err = 0, insert_err = 0;
+ u16 vlan_ethertype = 0;
+
+ vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+
+ if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
+ vlan_ethertype = ETH_P_8021AD;
+ else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
+ vlan_ethertype = ETH_P_8021Q;
+
+ if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
+ enable_stripping = false;
+ if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
+ enable_insertion = false;
+
+ if (enable_stripping)
+ strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype);
+ else
+ strip_err = vlan_ops->dis_stripping(vsi);
+
+ if (enable_insertion)
+ insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype);
+ else
+ insert_err = vlan_ops->dis_insertion(vsi);
+
+ if (strip_err || insert_err)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI
+ * @vsi: PF's VSI
+ * @features: features used to determine VLAN filtering settings
+ *
+ * Enable or disable Rx VLAN filtering based on the VLAN filtering bits in the
+ * features.
+ */
+static int
+ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features)
+{
+ struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+ int err = 0;
+
+ /* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking
+ * if either bit is set
+ */
+ if (features &
+ (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER))
+ err = vlan_ops->ena_rx_filtering(vsi);
+ else
+ err = vlan_ops->dis_rx_filtering(vsi);
+
+ return err;
+}
+
+/**
+ * ice_set_vlan_features - set VLAN settings based on suggested feature set
+ * @netdev: ptr to the netdev being adjusted
+ * @features: the feature set that the stack is suggesting
+ *
+ * Only update VLAN settings if the requested_vlan_features are different than
+ * the current_vlan_features.
+ */
+static int
+ice_set_vlan_features(struct net_device *netdev, netdev_features_t features)
+{
+ netdev_features_t current_vlan_features, requested_vlan_features;
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ int err;
+
+ current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES;
+ requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES;
+ if (current_vlan_features ^ requested_vlan_features) {
+ err = ice_set_vlan_offload_features(vsi, features);
+ if (err)
+ return err;
+ }
+
+ current_vlan_features = netdev->features &
+ NETIF_VLAN_FILTERING_FEATURES;
+ requested_vlan_features = features & NETIF_VLAN_FILTERING_FEATURES;
+ if (current_vlan_features ^ requested_vlan_features) {
+ err = ice_set_vlan_filtering_features(vsi, features);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
/**
* ice_set_features - set the netdev feature flags
* @netdev: ptr to the netdev being adjusted
@@ -5604,26 +5866,9 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
netdev->features & NETIF_F_RXHASH)
ice_vsi_manage_rss_lut(vsi, false);
- if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
- !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
- ret = ice_vsi_manage_vlan_stripping(vsi, true);
- else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
- (netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
- ret = ice_vsi_manage_vlan_stripping(vsi, false);
-
- if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
- !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
- ret = ice_vsi_manage_vlan_insertion(vsi);
- else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
- (netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
- ret = ice_vsi_manage_vlan_insertion(vsi);
-
- if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
- !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
- ret = ice_cfg_vlan_pruning(vsi, true);
- else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
- (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
- ret = ice_cfg_vlan_pruning(vsi, false);
+ ret = ice_set_vlan_features(netdev, features);
+ if (ret)
+ return ret;
if ((features & NETIF_F_NTUPLE) &&
!(netdev->features & NETIF_F_NTUPLE)) {
@@ -5647,23 +5892,26 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
else
clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
- return ret;
+ return 0;
}
/**
- * ice_vsi_vlan_setup - Setup VLAN offload properties on a VSI
+ * ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI
* @vsi: VSI to setup VLAN properties for
*/
static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
{
- int ret = 0;
+ int err;
+
+ err = ice_set_vlan_offload_features(vsi, vsi->netdev->features);
+ if (err)
+ return err;
- if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
- ret = ice_vsi_manage_vlan_stripping(vsi, true);
- if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
- ret = ice_vsi_manage_vlan_insertion(vsi);
+ err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features);
+ if (err)
+ return err;
- return ret;
+ return ice_vsi_add_vlan_zero(vsi);
}
/**
@@ -5904,9 +6152,9 @@ int ice_up(struct ice_vsi *vsi)
* This function fetches stats from the ring considering the atomic operations
* that needs to be performed to read u64 values in 32 bit machine.
*/
-static void
-ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, struct ice_q_stats stats,
- u64 *pkts, u64 *bytes)
+void
+ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
+ struct ice_q_stats stats, u64 *pkts, u64 *bytes)
{
unsigned int start;
@@ -5936,8 +6184,9 @@ ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
u64 pkts = 0, bytes = 0;
ring = READ_ONCE(rings[i]);
- if (ring)
- ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
+ if (!ring)
+ continue;
+ ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
vsi_stats->tx_packets += pkts;
vsi_stats->tx_bytes += bytes;
vsi->tx_restart += ring->tx_stats.restart_q;
@@ -6264,11 +6513,12 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
*/
int ice_down(struct ice_vsi *vsi)
{
- int i, tx_err, rx_err, link_err = 0;
+ int i, tx_err, rx_err, link_err = 0, vlan_err = 0;
WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
if (vsi->netdev && vsi->type == ICE_VSI_PF) {
+ vlan_err = ice_vsi_del_vlan_zero(vsi);
if (!ice_is_e810(&vsi->back->hw))
ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
netif_carrier_off(vsi->netdev);
@@ -6310,7 +6560,7 @@ int ice_down(struct ice_vsi *vsi)
ice_for_each_rxq(vsi, i)
ice_clean_rx_ring(vsi->rx_rings[i]);
- if (tx_err || rx_err || link_err) {
+ if (tx_err || rx_err || link_err || vlan_err) {
netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
vsi->vsi_num, vsi->vsw->sw_id);
return -EIO;
@@ -6620,6 +6870,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
+ bool dvm;
int err;
if (test_bit(ICE_DOWN, pf->state))
@@ -6683,6 +6934,12 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_init_ctrlq;
}
+ dvm = ice_is_dvm_ena(hw);
+
+ err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
+ if (err)
+ goto err_init_ctrlq;
+
err = ice_sched_init_port(hw->port_info);
if (err)
goto err_sched_init_port;
@@ -6719,6 +6976,9 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
ice_ptp_reset(pf);
+ if (ice_is_feature_supported(pf, ICE_F_GNSS))
+ ice_gnss_init(pf);
+
/* rebuild PF VSI */
err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
if (err) {
@@ -6817,7 +7077,6 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
- struct iidc_event *event;
u8 count = 0;
int err = 0;
@@ -6852,14 +7111,6 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
return -EBUSY;
}
- event = kzalloc(sizeof(*event), GFP_KERNEL);
- if (!event)
- return -ENOMEM;
-
- set_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
- ice_send_event_to_aux(pf, event);
- clear_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
-
netdev->mtu = (unsigned int)new_mtu;
/* if VSI is up, bring it down and then back up */
@@ -6867,21 +7118,18 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
err = ice_down(vsi);
if (err) {
netdev_err(netdev, "change MTU if_down err %d\n", err);
- goto event_after;
+ return err;
}
err = ice_up(vsi);
if (err) {
netdev_err(netdev, "change MTU if_up err %d\n", err);
- goto event_after;
+ return err;
}
}
netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
-event_after:
- set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
- ice_send_event_to_aux(pf, event);
- kfree(event);
+ set_bit(ICE_FLAG_MTU_CHANGED, pf->flags);
return err;
}
@@ -8525,6 +8773,7 @@ ice_features_check(struct sk_buff *skb,
struct net_device __always_unused *netdev,
netdev_features_t features)
{
+ bool gso = skb_is_gso(skb);
size_t len;
/* No point in doing any of this if neither checksum nor GSO are
@@ -8537,24 +8786,32 @@ ice_features_check(struct sk_buff *skb,
/* We cannot support GSO if the MSS is going to be less than
* 64 bytes. If it is then we need to drop support for GSO.
*/
- if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
+ if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS))
features &= ~NETIF_F_GSO_MASK;
- len = skb_network_header(skb) - skb->data;
+ len = skb_network_offset(skb);
if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
goto out_rm_features;
- len = skb_transport_header(skb) - skb_network_header(skb);
+ len = skb_network_header_len(skb);
if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
goto out_rm_features;
if (skb->encapsulation) {
- len = skb_inner_network_header(skb) - skb_transport_header(skb);
- if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
- goto out_rm_features;
+ /* this must work for VXLAN frames AND IPIP/SIT frames, and in
+ * the case of IPIP frames, the transport header pointer is
+ * after the inner header! So check to make sure that this
+ * is a GRE or UDP_TUNNEL frame before doing that math.
+ */
+ if (gso && (skb_shinfo(skb)->gso_type &
+ (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) {
+ len = skb_inner_network_header(skb) -
+ skb_transport_header(skb);
+ if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
+ goto out_rm_features;
+ }
- len = skb_inner_transport_header(skb) -
- skb_inner_network_header(skb);
+ len = skb_inner_network_header_len(skb);
if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
goto out_rm_features;
}
@@ -8582,6 +8839,7 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_start_xmit = ice_start_xmit,
.ndo_select_queue = ice_select_queue,
.ndo_features_check = ice_features_check,
+ .ndo_fix_features = ice_fix_features,
.ndo_set_rx_mode = ice_set_rx_mode,
.ndo_set_mac_address = ice_set_mac_address,
.ndo_validate_addr = eth_validate_addr,