From 8343268ec3cf4e097aa8b2071f0cd6779e2c4953 Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Wed, 18 Aug 2021 14:19:13 +0300 Subject: net/mlx5: Bridge, fix uninitialized variable usage In some conditions variable 'err' is not assigned with value in mlx5_esw_bridge_port_obj_attr_set() and mlx5_esw_bridge_port_changeupper() functions after recent changes to support LAG. Initialize the variable with zero value in both cases. Reported-by: Colin King Reported-by: Tim Gardner Reported-by: Naresh Kamboju CC: linux-kernel@vger.kernel.org Fixes: ff9b7521468b ("net/mlx5: Bridge, support LAG") Signed-off-by: Vlad Buslov --- drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c index 0c38c2e319be..b5ddaa82755f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c @@ -137,7 +137,7 @@ static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr u16 vport_num, esw_owner_vhca_id; struct netlink_ext_ack *extack; int ifindex = upper->ifindex; - int err; + int err = 0; if (!netif_is_bridge_master(upper)) return 0; @@ -244,7 +244,7 @@ mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev, struct netlink_ext_ack *extack = switchdev_notifier_info_to_extack(&port_attr_info->info); const struct switchdev_attr *attr = port_attr_info->attr; u16 vport_num, esw_owner_vhca_id; - int err; + int err = 0; if (!mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num, &esw_owner_vhca_id)) -- cgit v1.2.3-59-g8ed1b From 897ae4b40e80be7dcbf2b3079d85fa6339a6b751 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Tue, 17 Aug 2021 15:59:17 +0300 Subject: net/mlx5: Fix rdma aux device on devlink reload RDMA auxdev parameter registration was skipped for eswitch manager PCI PF. Due to this when devlink parameter is read, it reads as false in below code flow. $ devlink dev reload pci/0000:06:00.0 devlink_reload() mlx5_load_one() mlx5_attach_device() is_ib_enabled() devlink_param_driverinit_value_get() Due to this, is_ib_enabled() returns false for the RDMA auxiliary device. This results into a skipping RDMA auxiliary device creation on reload. There is no need to check for eswitch manager capability to support RDMA auxiliary device. Hence, fix it by skipping eswitch manager capability. Fixes: 87158cedf00e ("net/mlx5: Support enable_rdma devlink dev param") Signed-off-by: Parav Pandit Reviewed-by: Shay Drory Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/devlink.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index e84287ffc7ce..dcf9f27ba2ef 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -658,11 +658,10 @@ static const struct devlink_param enable_rdma_param = static int mlx5_devlink_rdma_param_register(struct devlink *devlink) { - struct mlx5_core_dev *dev = devlink_priv(devlink); union devlink_param_value value; int err; - if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND) || MLX5_ESWITCH_MANAGER(dev)) + if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND)) return 0; err = devlink_param_register(devlink, &enable_rdma_param); @@ -679,9 +678,7 @@ static int mlx5_devlink_rdma_param_register(struct devlink *devlink) static void mlx5_devlink_rdma_param_unregister(struct devlink *devlink) { - struct mlx5_core_dev *dev = devlink_priv(devlink); - - if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND) || MLX5_ESWITCH_MANAGER(dev)) + if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND)) return; devlink_param_unpublish(devlink, &enable_rdma_param); -- cgit v1.2.3-59-g8ed1b From da8252d5805d4a80120a0c2151277e5fb9e8aa9e Mon Sep 17 00:00:00 2001 From: Mark Bloch Date: Wed, 11 Aug 2021 16:40:16 +0300 Subject: net/mlx5: Lag, don't update lag if lag isn't supported In NICs that don't support LAG, the LAG control structure won't be allocated. If it wasn't allocated it means LAG doesn't exists and can be skipped. Fixes: cac1eb2cf2e3 ("net/mlx5: Lag, properly lock eswitch if needed") Signed-off-by: Mark Bloch Reviewed-by: Maor Gottlieb Reviewed-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/lag.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c index 49ca57c6d31d..ca5690b0a7ab 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c @@ -927,9 +927,12 @@ void mlx5_lag_disable_change(struct mlx5_core_dev *dev) struct mlx5_core_dev *dev1; struct mlx5_lag *ldev; + ldev = mlx5_lag_dev(dev); + if (!ldev) + return; + mlx5_dev_list_lock(); - ldev = mlx5_lag_dev(dev); dev0 = ldev->pf[MLX5_LAG_P1].dev; dev1 = ldev->pf[MLX5_LAG_P2].dev; @@ -946,8 +949,11 @@ void mlx5_lag_enable_change(struct mlx5_core_dev *dev) { struct mlx5_lag *ldev; - mlx5_dev_list_lock(); ldev = mlx5_lag_dev(dev); + if (!ldev) + return; + + mlx5_dev_list_lock(); ldev->mode_changes_in_progress--; mlx5_dev_list_unlock(); mlx5_queue_bond_work(ldev, 0); -- cgit v1.2.3-59-g8ed1b From dfe6fd72b5f1878b16aa2c8603e031bbcd66b96d Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Wed, 18 Aug 2021 13:09:26 -0700 Subject: net/mlx5: FWTrace, cancel work on alloc pd error flow Handle error flow on mlx5_core_alloc_pd() failure, read_fw_strings_work must be canceled. Fixes: c71ad41ccb0c ("net/mlx5: FW tracer, events handling") Reported-by: Pavel Machek (CIP) Suggested-by: Pavel Machek (CIP) Signed-off-by: Saeed Mahameed Reviewed-by: Aya Levin --- drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index 3f8a98093f8c..f9cf9fb31547 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -1007,7 +1007,7 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer) err = mlx5_core_alloc_pd(dev, &tracer->buff.pdn); if (err) { mlx5_core_warn(dev, "FWTracer: Failed to allocate PD %d\n", err); - return err; + goto err_cancel_work; } err = mlx5_fw_tracer_create_mkey(tracer); @@ -1031,6 +1031,7 @@ err_notifier_unregister: mlx5_core_destroy_mkey(dev, &tracer->buff.mkey); err_dealloc_pd: mlx5_core_dealloc_pd(dev, tracer->buff.pdn); +err_cancel_work: cancel_work_sync(&tracer->read_fw_strings_work); return err; } -- cgit v1.2.3-59-g8ed1b From ee27e330a953595903979ffdb84926843595a9fe Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Wed, 1 Sep 2021 11:48:13 +0300 Subject: net/mlx5: Fix potential sleeping in atomic context Fixes the below flow of sleeping in atomic context by releasing the RCU lock before calling to free_match_list. build_match_list() <- disables preempt -> free_match_list() -> tree_put_node() -> down_write_ref_node() <- take write lock Fixes: 693c6883bbc4 ("net/mlx5: Add hash table for flow groups in flow table") Reported-by: Dan Carpenter Signed-off-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 9fe8e3c204d6..fe501ba88bea 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1682,14 +1682,13 @@ static int build_match_list(struct match_list *match_head, curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC); if (!curr_match) { + rcu_read_unlock(); free_match_list(match_head, ft_locked); - err = -ENOMEM; - goto out; + return -ENOMEM; } curr_match->g = g; list_add_tail(&curr_match->list, &match_head->list); } -out: rcu_read_unlock(); return err; } -- cgit v1.2.3-59-g8ed1b From c91c1da72b47fc4c5e353cdd9099ba94ae07d2fa Mon Sep 17 00:00:00 2001 From: Aya Levin Date: Thu, 15 Jul 2021 10:53:28 +0300 Subject: net/mlx5e: Fix mutual exclusion between CQE compression and HW TS Some profiles of the driver don't support a dedicated PTP-RQ, hence can't support HW TS and CQE compression simultaneously. When HW TS is enabled the COE compression is disabled, and should be restored when the HW TS is turned off. Add rx_filter as an input to modifying CQE compression to enforce this restriction. Fixes: 256f79d13c1d ("net/mlx5e: Fix HW TS with CQE compression according to profile") Signed-off-by: Aya Levin Reviewed-by: Moshe Shemesh Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | 11 ++++++----- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 4 ++-- 3 files changed, 9 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 669a75f3537a..7b8c8187543a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -922,7 +922,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work); int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr); int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr); -int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val); +int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val, bool rx_filter); int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, u16 vid); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 2cfd12953909..306fb5d6a36d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1884,7 +1884,7 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable) return set_pflag_cqe_based_moder(netdev, enable, true); } -int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val) +int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val, bool rx_filter) { bool curr_val = MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS); struct mlx5e_params new_params; @@ -1896,8 +1896,7 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val if (curr_val == new_val) return 0; - if (new_val && !priv->profile->rx_ptp_support && - priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE) { + if (new_val && !priv->profile->rx_ptp_support && rx_filter) { netdev_err(priv->netdev, "Profile doesn't support enabling of CQE compression while hardware time-stamping is enabled.\n"); return -EINVAL; @@ -1905,7 +1904,7 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val new_params = priv->channels.params; MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val); - if (priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE) + if (rx_filter) new_params.ptp_rx = new_val; if (new_params.ptp_rx == priv->channels.params.ptp_rx) @@ -1928,12 +1927,14 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev, { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; + bool rx_filter; int err; if (!MLX5_CAP_GEN(mdev, cqe_compression)) return -EOPNOTSUPP; - err = mlx5e_modify_rx_cqe_compression_locked(priv, enable); + rx_filter = priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE; + err = mlx5e_modify_rx_cqe_compression_locked(priv, enable, rx_filter); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 47efd858964d..3fd515e7bf30 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3554,14 +3554,14 @@ static int mlx5e_hwstamp_config_no_ptp_rx(struct mlx5e_priv *priv, bool rx_filte if (!rx_filter) /* Reset CQE compression to Admin default */ - return mlx5e_modify_rx_cqe_compression_locked(priv, rx_cqe_compress_def); + return mlx5e_modify_rx_cqe_compression_locked(priv, rx_cqe_compress_def, false); if (!MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS)) return 0; /* Disable CQE compression */ netdev_warn(priv->netdev, "Disabling RX cqe compression\n"); - err = mlx5e_modify_rx_cqe_compression_locked(priv, false); + err = mlx5e_modify_rx_cqe_compression_locked(priv, false, true); if (err) netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err); -- cgit v1.2.3-59-g8ed1b From 8db6a54f3cae6a803b2cbf5390662bca641f7da8 Mon Sep 17 00:00:00 2001 From: Aya Levin Date: Sun, 29 Aug 2021 13:41:08 +0300 Subject: net/mlx5e: Fix condition when retrieving PTP-rqn When activating the PTP-RQ, redirect the RQT from drop-RQ to PTP-RQ. Use mlx5e_channels_get_ptp_rqn to retrieve the rqn. This helper returns a boolean (not status), hence caller should consider return value 0 as a fail. Change the caller interpretation of the return value. Fixes: 43ec0f41fa73 ("net/mlx5e: Hide all implementation details of mlx5e_rx_res") Signed-off-by: Aya Levin Reviewed-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c index bf0313e2682b..13056cb9757d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c @@ -572,7 +572,7 @@ void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_chann if (res->features & MLX5E_RX_RES_FEATURE_PTP) { u32 rqn; - if (mlx5e_channels_get_ptp_rqn(chs, &rqn)) + if (!mlx5e_channels_get_ptp_rqn(chs, &rqn)) rqn = res->drop_rqn; err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, rqn); -- cgit v1.2.3-59-g8ed1b From d437f5aa23aa2b7bd07cd44b839d7546cc17166f Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Tue, 7 Sep 2021 22:07:03 -0700 Subject: ibmvnic: check failover_pending in login response If a failover occurs before a login response is received, the login response buffer maybe undefined. Check that there was no failover before accessing the login response buffer. Signed-off-by: Sukadev Bhattiprolu Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index a775c69e4fd7..6aa6ff89a765 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -4700,6 +4700,14 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, return 0; } + if (adapter->failover_pending) { + adapter->init_done_rc = -EAGAIN; + netdev_dbg(netdev, "Failover pending, ignoring login response\n"); + complete(&adapter->init_done); + /* login response buffer will be released on reset */ + return 0; + } + netdev->mtu = adapter->req_mtu - ETH_HLEN; netdev_dbg(adapter->netdev, "Login Response Buffer:\n"); -- cgit v1.2.3-59-g8ed1b From d7e203ffd3ba5965e88952e7364a42ab32064408 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 7 Sep 2021 15:46:10 +0200 Subject: ne2000: fix unused function warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Geert noticed a warning on MIPS TX49xx, Atari and presuambly other platforms when the driver is built-in but NETDEV_LEGACY_INIT is disabled: drivers/net/ethernet/8390/ne.c:909:20: warning: ‘ne_add_devices’ defined but not used [-Wunused-function] Merge the two module init functions into a single one with an IS_ENABLED() check to replace the incorrect #ifdef. Fixes: 4228c3942821 ("make legacy ISA probe optional") Reported-by: Geert Uytterhoeven Signed-off-by: Arnd Bergmann Reviewed-by: Geert Uytterhoeven Tested-by: Geert Uytterhoeven Signed-off-by: David S. Miller --- drivers/net/ethernet/8390/ne.c | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c index 53660bc8d6ff..9afc712f5948 100644 --- a/drivers/net/ethernet/8390/ne.c +++ b/drivers/net/ethernet/8390/ne.c @@ -922,13 +922,16 @@ static void __init ne_add_devices(void) } } -#ifdef MODULE static int __init ne_init(void) { int retval; - ne_add_devices(); + + if (IS_MODULE(CONFIG_NE2000)) + ne_add_devices(); + retval = platform_driver_probe(&ne_driver, ne_drv_probe); - if (retval) { + + if (IS_MODULE(CONFIG_NE2000) && retval) { if (io[0] == 0) pr_notice("ne.c: You must supply \"io=0xNNN\"" " value(s) for ISA cards.\n"); @@ -941,18 +944,8 @@ static int __init ne_init(void) return retval; } module_init(ne_init); -#else /* MODULE */ -static int __init ne_init(void) -{ - int retval = platform_driver_probe(&ne_driver, ne_drv_probe); - - /* Unregister unused platform_devices. */ - ne_loop_rm_unreg(0); - return retval; -} -module_init(ne_init); -#ifdef CONFIG_NETDEV_LEGACY_INIT +#if !defined(MODULE) && defined(CONFIG_NETDEV_LEGACY_INIT) struct net_device * __init ne_probe(int unit) { int this_dev; @@ -994,7 +987,6 @@ struct net_device * __init ne_probe(int unit) return ERR_PTR(-ENODEV); } #endif -#endif /* MODULE */ static void __exit ne_exit(void) { -- cgit v1.2.3-59-g8ed1b From ea269a6f720782ed94171fb962b14ce07c372138 Mon Sep 17 00:00:00 2001 From: Nathan Rossi Date: Thu, 2 Sep 2021 05:14:49 +0000 Subject: net: phylink: Update SFP selected interface on advertising changes Currently changes to the advertising state via ethtool do not cause any reselection of the configured interface mode after the SFP is already inserted and initially configured. While it is not typical to change the advertised link modes for an interface using an SFP in certain use cases it is desirable. In the case of a SFP port that is capable of handling both SFP and SFP+ modules it will automatically select between 1G and 10G modes depending on the supported mode of the SFP. However if the SFP module is capable of working in multiple modes (e.g. a SFP+ DAC that can operate at 1G or 10G), one end of the cable may be attached to a SFP 1000base-x port thus the SFP+ end must be manually configured to the 1000base-x mode in order for the link to be established. This change causes the ethtool setting of advertised mode changes to reselect the interface mode so that the link can be established. Additionally when a module is inserted the advertising mode is reset to match the supported modes of the module. Signed-off-by: Nathan Rossi Signed-off-by: David S. Miller --- drivers/net/phy/phylink.c | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index a1464b764d4d..0a0abe8e4be0 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -1607,6 +1607,32 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, if (config.an_enabled && phylink_is_empty_linkmode(config.advertising)) return -EINVAL; + /* If this link is with an SFP, ensure that changes to advertised modes + * also cause the associated interface to be selected such that the + * link can be configured correctly. + */ + if (pl->sfp_port && pl->sfp_bus) { + config.interface = sfp_select_interface(pl->sfp_bus, + config.advertising); + if (config.interface == PHY_INTERFACE_MODE_NA) { + phylink_err(pl, + "selection of interface failed, advertisement %*pb\n", + __ETHTOOL_LINK_MODE_MASK_NBITS, + config.advertising); + return -EINVAL; + } + + /* Revalidate with the selected interface */ + linkmode_copy(support, pl->supported); + if (phylink_validate(pl, support, &config)) { + phylink_err(pl, "validation of %s/%s with support %*pb failed\n", + phylink_an_mode_str(pl->cur_link_an_mode), + phy_modes(config.interface), + __ETHTOOL_LINK_MODE_MASK_NBITS, support); + return -EINVAL; + } + } + mutex_lock(&pl->state_mutex); pl->link_config.speed = config.speed; pl->link_config.duplex = config.duplex; @@ -2186,7 +2212,9 @@ static int phylink_sfp_config(struct phylink *pl, u8 mode, if (phy_interface_mode_is_8023z(iface) && pl->phydev) return -EINVAL; - changed = !linkmode_equal(pl->supported, support); + changed = !linkmode_equal(pl->supported, support) || + !linkmode_equal(pl->link_config.advertising, + config.advertising); if (changed) { linkmode_copy(pl->supported, support); linkmode_copy(pl->link_config.advertising, config.advertising); -- cgit v1.2.3-59-g8ed1b From b5c102238cea985d8126b173d06b9e1de88037ee Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 7 Sep 2021 12:05:54 -0500 Subject: net: ipa: initialize all filter table slots There is an off-by-one problem in ipa_table_init_add(), when initializing filter tables. In that function, the number of filter table entries is determined based on the number of set bits in the filter map. However that count does *not* include the extra "slot" in the filter table that holds the filter map itself. Meanwhile, ipa_table_addr() *does* include the filter map in the memory it returns, but because the count it's provided doesn't include it, it includes one too few table entries. Fix this by including the extra slot for the filter map in the count computed in ipa_table_init_add(). Note: ipa_filter_reset_table() does not have this problem; it resets filter table entries one by one, but does not overwrite the filter bitmap. Fixes: 2b9feef2b6c2 ("soc: qcom: ipa: filter and routing tables") Signed-off-by: Alex Elder Signed-off-by: David S. Miller --- drivers/net/ipa/ipa_table.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c index 2324e1b93e37..1da334f54944 100644 --- a/drivers/net/ipa/ipa_table.c +++ b/drivers/net/ipa/ipa_table.c @@ -430,7 +430,8 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter, * table region determines the number of entries it has. */ if (filter) { - count = hweight32(ipa->filter_map); + /* Include one extra "slot" to hold the filter map itself */ + count = 1 + hweight32(ipa->filter_map); hash_count = hash_mem->size ? count : 0; } else { count = mem->size / sizeof(__le64); -- cgit v1.2.3-59-g8ed1b From 276aae377206d60b9b7b7df4586cd9f2a813f5d0 Mon Sep 17 00:00:00 2001 From: Joakim Zhang Date: Wed, 8 Sep 2021 15:43:35 +0800 Subject: net: stmmac: fix system hang caused by eee_ctrl_timer during suspend/resume commit 5f58591323bf ("net: stmmac: delete the eee_ctrl_timer after napi disabled"), this patch tries to fix system hang caused by eee_ctrl_timer, unfortunately, it only can resolve it for system reboot stress test. System hang also can be reproduced easily during system suspend/resume stess test when mount NFS on i.MX8MP EVK board. In stmmac driver, eee feature is combined to phylink framework. When do system suspend, phylink_stop() would queue delayed work, it invokes stmmac_mac_link_down(), where to deactivate eee_ctrl_timer synchronizly. In above commit, try to fix issue by deactivating eee_ctrl_timer obviously, but it is not enough. Looking into eee_ctrl_timer expire callback stmmac_eee_ctrl_timer(), it could enable hareware eee mode again. What is unexpected is that LPI interrupt (MAC_Interrupt_Enable.LPIEN bit) is always asserted. This interrupt has chance to be issued when LPI state entry/exit from the MAC, and at that time, clock could have been already disabled. The result is that system hang when driver try to touch register from interrupt handler. The reason why above commit can fix system hang issue in stmmac_release() is that, deactivate eee_ctrl_timer not just after napi disabled, further after irq freed. In conclusion, hardware would generate LPI interrupt when clock has been disabled during suspend or resume, since hardware is in eee mode and LPI interrupt enabled. Interrupts from MAC, MTL and DMA level are enabled and never been disabled when system suspend, so postpone clocks management from suspend stage to noirq suspend stage should be more safe. Fixes: 5f58591323bf ("net: stmmac: delete the eee_ctrl_timer after napi disabled") Signed-off-by: Joakim Zhang Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 14 ------- .../net/ethernet/stmicro/stmmac/stmmac_platform.c | 44 ++++++++++++++++++++++ 2 files changed, 44 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index ece02b35a6ce..246f84fedbc8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -7118,7 +7118,6 @@ int stmmac_suspend(struct device *dev) struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); u32 chan; - int ret; if (!ndev || !netif_running(ndev)) return 0; @@ -7150,13 +7149,6 @@ int stmmac_suspend(struct device *dev) } else { stmmac_mac_set(priv, priv->ioaddr, false); pinctrl_pm_select_sleep_state(priv->device); - /* Disable clock in case of PWM is off */ - clk_disable_unprepare(priv->plat->clk_ptp_ref); - ret = pm_runtime_force_suspend(dev); - if (ret) { - mutex_unlock(&priv->lock); - return ret; - } } mutex_unlock(&priv->lock); @@ -7242,12 +7234,6 @@ int stmmac_resume(struct device *dev) priv->irq_wake = 0; } else { pinctrl_pm_select_default_state(priv->device); - /* enable the clk previously disabled */ - ret = pm_runtime_force_resume(dev); - if (ret) - return ret; - if (priv->plat->clk_ptp_ref) - clk_prepare_enable(priv->plat->clk_ptp_ref); /* reset the phy so that it's ready */ if (priv->mii) stmmac_mdio_reset(priv->mii); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 5ca710844cc1..4885f9ad1b1e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -9,6 +9,7 @@ *******************************************************************************/ #include +#include #include #include #include @@ -771,9 +772,52 @@ static int __maybe_unused stmmac_runtime_resume(struct device *dev) return stmmac_bus_clks_config(priv, true); } +static int stmmac_pltfr_noirq_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct stmmac_priv *priv = netdev_priv(ndev); + int ret; + + if (!netif_running(ndev)) + return 0; + + if (!device_may_wakeup(priv->device) || !priv->plat->pmt) { + /* Disable clock in case of PWM is off */ + clk_disable_unprepare(priv->plat->clk_ptp_ref); + + ret = pm_runtime_force_suspend(dev); + if (ret) + return ret; + } + + return 0; +} + +static int stmmac_pltfr_noirq_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct stmmac_priv *priv = netdev_priv(ndev); + int ret; + + if (!netif_running(ndev)) + return 0; + + if (!device_may_wakeup(priv->device) || !priv->plat->pmt) { + /* enable the clk previously disabled */ + ret = pm_runtime_force_resume(dev); + if (ret) + return ret; + + clk_prepare_enable(priv->plat->clk_ptp_ref); + } + + return 0; +} + const struct dev_pm_ops stmmac_pltfr_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_suspend, stmmac_pltfr_resume) SET_RUNTIME_PM_OPS(stmmac_runtime_suspend, stmmac_runtime_resume, NULL) + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_noirq_suspend, stmmac_pltfr_noirq_resume) }; EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops); -- cgit v1.2.3-59-g8ed1b From 3c4cea8fa7f71f00c5279547043a84bc2a4d8b8c Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Wed, 8 Sep 2021 13:42:09 +0200 Subject: vhost_net: fix OoB on sendmsg() failure. If the sendmsg() call in vhost_tx_batch() fails, both the 'batched_xdp' and 'done_idx' indexes are left unchanged. If such failure happens when batched_xdp == VHOST_NET_BATCH, the next call to vhost_net_build_xdp() will access and write memory outside the xdp buffers area. Since sendmsg() can only error with EBADFD, this change addresses the issue explicitly freeing the XDP buffers batch on error. Fixes: 0a0be13b8fe2 ("vhost_net: batch submitting XDP buffers to underlayer sockets") Suggested-by: Jason Wang Signed-off-by: Paolo Abeni Acked-by: Jason Wang Signed-off-by: David S. Miller --- drivers/vhost/net.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 3a249ee7e144..28ef323882fb 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -467,7 +467,7 @@ static void vhost_tx_batch(struct vhost_net *net, .num = nvq->batched_xdp, .ptr = nvq->xdp, }; - int err; + int i, err; if (nvq->batched_xdp == 0) goto signal_used; @@ -476,6 +476,15 @@ static void vhost_tx_batch(struct vhost_net *net, err = sock->ops->sendmsg(sock, msghdr, 0); if (unlikely(err < 0)) { vq_err(&nvq->vq, "Fail to batch sending packets\n"); + + /* free pages owned by XDP; since this is an unlikely error path, + * keep it simple and avoid more complex bulk update for the + * used pages + */ + for (i = 0; i < nvq->batched_xdp; ++i) + put_page(virt_to_head_page(nvq->xdp[i].data)); + nvq->batched_xdp = 0; + nvq->done_idx = 0; return; } -- cgit v1.2.3-59-g8ed1b From 273c29e944bda9a20a30c26cfc34c9a3f363280b Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Wed, 8 Sep 2021 09:58:20 -0700 Subject: ibmvnic: check failover_pending in login response If a failover occurs before a login response is received, the login response buffer maybe undefined. Check that there was no failover before accessing the login response buffer. Fixes: 032c5e82847a ("Driver for IBM System i/p VNIC protocol") Signed-off-by: Sukadev Bhattiprolu Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 6aa6ff89a765..a4579b340120 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -4708,6 +4708,14 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, return 0; } + if (adapter->failover_pending) { + adapter->init_done_rc = -EAGAIN; + netdev_dbg(netdev, "Failover pending, ignoring login response\n"); + complete(&adapter->init_done); + /* login response buffer will be released on reset */ + return 0; + } + netdev->mtu = adapter->req_mtu - ETH_HLEN; netdev_dbg(adapter->netdev, "Login Response Buffer:\n"); -- cgit v1.2.3-59-g8ed1b From d82d5303c4c539db86588ffb5dc5b26c3f1513e8 Mon Sep 17 00:00:00 2001 From: Tong Zhang Date: Wed, 8 Sep 2021 12:02:32 -0700 Subject: net: macb: fix use after free on rmmod plat_dev->dev->platform_data is released by platform_device_unregister(), use of pclk and hclk is a use-after-free. Since device unregister won't need a clk device we adjust the function call sequence to fix this issue. [ 31.261225] BUG: KASAN: use-after-free in macb_remove+0x77/0xc6 [macb_pci] [ 31.275563] Freed by task 306: [ 30.276782] platform_device_release+0x25/0x80 Suggested-by: Nicolas Ferre Signed-off-by: Tong Zhang Acked-by: Nicolas Ferre Signed-off-by: David S. Miller --- drivers/net/ethernet/cadence/macb_pci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/cadence/macb_pci.c b/drivers/net/ethernet/cadence/macb_pci.c index 8b7b59908a1a..f66d22de5168 100644 --- a/drivers/net/ethernet/cadence/macb_pci.c +++ b/drivers/net/ethernet/cadence/macb_pci.c @@ -111,9 +111,9 @@ static void macb_remove(struct pci_dev *pdev) struct platform_device *plat_dev = pci_get_drvdata(pdev); struct macb_platform_data *plat_data = dev_get_platdata(&plat_dev->dev); - platform_device_unregister(plat_dev); clk_unregister(plat_data->pclk); clk_unregister(plat_data->hclk); + platform_device_unregister(plat_dev); } static const struct pci_device_id dev_id_table[] = { -- cgit v1.2.3-59-g8ed1b From 2a48d96fd58a666ae231c3dd6fe4a458798ac645 Mon Sep 17 00:00:00 2001 From: Joakim Zhang Date: Thu, 9 Sep 2021 17:23:22 +0800 Subject: net: stmmac: platform: fix build warning when with !CONFIG_PM_SLEEP Use __maybe_unused for noirq_suspend()/noirq_resume() hooks to avoid build warning with !CONFIG_PM_SLEEP: >> drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c:796:12: error: 'stmmac_pltfr_noirq_resume' defined but not used [-Werror=unused-function] 796 | static int stmmac_pltfr_noirq_resume(struct device *dev) | ^~~~~~~~~~~~~~~~~~~~~~~~~ >> drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c:775:12: error: 'stmmac_pltfr_noirq_suspend' defined but not used [-Werror=unused-function] 775 | static int stmmac_pltfr_noirq_suspend(struct device *dev) | ^~~~~~~~~~~~~~~~~~~~~~~~~~ cc1: all warnings being treated as errors Fixes: 276aae377206 ("net: stmmac: fix system hang caused by eee_ctrl_timer during suspend/resume") Reported-by: kernel test robot Signed-off-by: Joakim Zhang Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 4885f9ad1b1e..62cec9bfcd33 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -772,7 +772,7 @@ static int __maybe_unused stmmac_runtime_resume(struct device *dev) return stmmac_bus_clks_config(priv, true); } -static int stmmac_pltfr_noirq_suspend(struct device *dev) +static int __maybe_unused stmmac_pltfr_noirq_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); @@ -793,7 +793,7 @@ static int stmmac_pltfr_noirq_suspend(struct device *dev) return 0; } -static int stmmac_pltfr_noirq_resume(struct device *dev) +static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); -- cgit v1.2.3-59-g8ed1b From 415446185b939dcdcd6a483e705c805ab961e54c Mon Sep 17 00:00:00 2001 From: Íñigo Huguet Date: Thu, 9 Sep 2021 11:28:45 +0200 Subject: sfc: fallback for lack of xdp tx queues MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If there are not enough resources to allocate one TX queue per core for XDP TX it was completely disabled. This patch implements a fallback solution for sharing the available queues using __netif_tx_lock for synchronization. In the normal case that there is one TX queue per CPU, no locking is done, as it was before. With this fallback solution, XDP TX will work in much more cases that were failing, specially in machines with many CPUs. It's hard for XDP users to know what features are supported across different NICs and configurations, so they will benefit on having wider support. Signed-off-by: Íñigo Huguet Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/efx_channels.c | 54 ++++++++++++++++++++++++++------- drivers/net/ethernet/sfc/net_driver.h | 8 +++++ drivers/net/ethernet/sfc/tx.c | 21 ++++++++----- 3 files changed, 64 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index e5b0d795c301..e7849f1260a1 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -166,32 +166,46 @@ static int efx_allocate_msix_channels(struct efx_nic *efx, * We need a channel per event queue, plus a VI per tx queue. * This may be more pessimistic than it needs to be. */ - if (n_channels + n_xdp_ev > max_channels) { + if (n_channels >= max_channels) { + efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_DISABLED; netif_err(efx, drv, efx->net_dev, "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n", n_xdp_ev, n_channels, max_channels); netif_err(efx, drv, efx->net_dev, - "XDP_TX and XDP_REDIRECT will not work on this interface"); - efx->n_xdp_channels = 0; - efx->xdp_tx_per_channel = 0; - efx->xdp_tx_queue_count = 0; + "XDP_TX and XDP_REDIRECT will not work on this interface\n"); } else if (n_channels + n_xdp_tx > efx->max_vis) { + efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_DISABLED; netif_err(efx, drv, efx->net_dev, "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n", n_xdp_tx, n_channels, efx->max_vis); netif_err(efx, drv, efx->net_dev, - "XDP_TX and XDP_REDIRECT will not work on this interface"); - efx->n_xdp_channels = 0; - efx->xdp_tx_per_channel = 0; - efx->xdp_tx_queue_count = 0; + "XDP_TX and XDP_REDIRECT will not work on this interface\n"); + } else if (n_channels + n_xdp_ev > max_channels) { + efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_SHARED; + netif_warn(efx, drv, efx->net_dev, + "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n", + n_xdp_ev, n_channels, max_channels); + + n_xdp_ev = max_channels - n_channels; + netif_warn(efx, drv, efx->net_dev, + "XDP_TX and XDP_REDIRECT will work with reduced performance (%d cpus/tx_queue)\n", + DIV_ROUND_UP(n_xdp_tx, tx_per_ev * n_xdp_ev)); } else { + efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_DEDICATED; + } + + if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DISABLED) { efx->n_xdp_channels = n_xdp_ev; efx->xdp_tx_per_channel = tx_per_ev; efx->xdp_tx_queue_count = n_xdp_tx; n_channels += n_xdp_ev; netif_dbg(efx, drv, efx->net_dev, "Allocating %d TX and %d event queues for XDP\n", - n_xdp_tx, n_xdp_ev); + n_xdp_ev * tx_per_ev, n_xdp_ev); + } else { + efx->n_xdp_channels = 0; + efx->xdp_tx_per_channel = 0; + efx->xdp_tx_queue_count = 0; } if (vec_count < n_channels) { @@ -921,7 +935,25 @@ int efx_set_channels(struct efx_nic *efx) } } } - WARN_ON(xdp_queue_number != efx->xdp_tx_queue_count); + WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED && + xdp_queue_number != efx->xdp_tx_queue_count); + WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED && + xdp_queue_number > efx->xdp_tx_queue_count); + + /* If we have less XDP TX queues than CPUs, assign the already existing + * queues to the exceeding CPUs (this means that we will have to use + * locking when transmitting with XDP) + */ + next_queue = 0; + while (xdp_queue_number < efx->xdp_tx_queue_count) { + tx_queue = efx->xdp_tx_queues[next_queue++]; + channel = tx_queue->channel; + netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n", + channel->channel, tx_queue->label, + xdp_queue_number, tx_queue->queue); + + efx->xdp_tx_queues[xdp_queue_number++] = tx_queue; + } rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); if (rc) diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 9b4b25704271..e731c766f709 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -782,6 +782,12 @@ struct efx_async_filter_insertion { #define EFX_RPS_MAX_IN_FLIGHT 8 #endif /* CONFIG_RFS_ACCEL */ +enum efx_xdp_tx_queues_mode { + EFX_XDP_TX_QUEUES_DEDICATED, /* one queue per core, locking not needed */ + EFX_XDP_TX_QUEUES_SHARED, /* each queue used by more than 1 core */ + EFX_XDP_TX_QUEUES_DISABLED /* xdp tx not available */ +}; + /** * struct efx_nic - an Efx NIC * @name: Device name (net device name or bus id before net device registered) @@ -820,6 +826,7 @@ struct efx_async_filter_insertion { * should be allocated for this NIC * @xdp_tx_queue_count: Number of entries in %xdp_tx_queues. * @xdp_tx_queues: Array of pointers to tx queues used for XDP transmit. + * @xdp_txq_queues_mode: XDP TX queues sharing strategy. * @rxq_entries: Size of receive queues requested by user. * @txq_entries: Size of transmit queues requested by user. * @txq_stop_thresh: TX queue fill level at or above which we stop it. @@ -979,6 +986,7 @@ struct efx_nic { unsigned int xdp_tx_queue_count; struct efx_tx_queue **xdp_tx_queues; + enum efx_xdp_tx_queues_mode xdp_txq_queues_mode; unsigned rxq_entries; unsigned txq_entries; diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 0c6650d2e239..c7afd6cda902 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -430,21 +430,23 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs, int cpu; int i; - cpu = raw_smp_processor_id(); + if (unlikely(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DISABLED)) + return -EINVAL; + if (unlikely(n && !xdpfs)) + return -EINVAL; + if (unlikely(!n)) + return 0; - if (!efx->xdp_tx_queue_count || - unlikely(cpu >= efx->xdp_tx_queue_count)) + cpu = raw_smp_processor_id(); + if (unlikely(cpu >= efx->xdp_tx_queue_count)) return -EINVAL; tx_queue = efx->xdp_tx_queues[cpu]; if (unlikely(!tx_queue)) return -EINVAL; - if (unlikely(n && !xdpfs)) - return -EINVAL; - - if (!n) - return 0; + if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED) + HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu); /* Check for available space. We should never need multiple * descriptors per frame. @@ -484,6 +486,9 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs, if (flush && i > 0) efx_nic_push_buffers(tx_queue); + if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED) + HARD_TX_UNLOCK(efx->net_dev, tx_queue->core_txq); + return i == 0 ? -EIO : i; } -- cgit v1.2.3-59-g8ed1b From 6215b608a8c4d4a478721e14a6faa0dc56e4a693 Mon Sep 17 00:00:00 2001 From: Íñigo Huguet Date: Thu, 9 Sep 2021 11:28:46 +0200 Subject: sfc: last resort fallback for lack of xdp tx queues MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previous patch addressed the situation of having some free resources for xdp tx but not enough for one tx queue per CPU. This patch address the worst case of not having resources at all for xdp tx. Instead of using queues dedicated to xdp, normal queues used by network stack are shared for both cases, using __netif_tx_lock for synchronization. Also queue stop/restart must be considered in the xdp path to avoid freezing the queue. This is not the ideal situation we might want to be, and a performance penalty is expected both for normal and xdp traffic, but at least XDP will work in all possible situations (with a warning in the logs), improving a bit the pain of not knowing in what situations we can use it and in what situations we cannot. Signed-off-by: Íñigo Huguet Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/efx_channels.c | 82 ++++++++++++++++++--------------- drivers/net/ethernet/sfc/net_driver.h | 2 +- drivers/net/ethernet/sfc/tx.c | 14 ++++-- 3 files changed, 58 insertions(+), 40 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index e7849f1260a1..3dbea028b325 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -167,19 +167,19 @@ static int efx_allocate_msix_channels(struct efx_nic *efx, * This may be more pessimistic than it needs to be. */ if (n_channels >= max_channels) { - efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_DISABLED; - netif_err(efx, drv, efx->net_dev, - "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n", - n_xdp_ev, n_channels, max_channels); - netif_err(efx, drv, efx->net_dev, - "XDP_TX and XDP_REDIRECT will not work on this interface\n"); + efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED; + netif_warn(efx, drv, efx->net_dev, + "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n", + n_xdp_ev, n_channels, max_channels); + netif_warn(efx, drv, efx->net_dev, + "XDP_TX and XDP_REDIRECT might decrease device's performance\n"); } else if (n_channels + n_xdp_tx > efx->max_vis) { - efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_DISABLED; - netif_err(efx, drv, efx->net_dev, - "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n", - n_xdp_tx, n_channels, efx->max_vis); - netif_err(efx, drv, efx->net_dev, - "XDP_TX and XDP_REDIRECT will not work on this interface\n"); + efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED; + netif_warn(efx, drv, efx->net_dev, + "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n", + n_xdp_tx, n_channels, efx->max_vis); + netif_warn(efx, drv, efx->net_dev, + "XDP_TX and XDP_REDIRECT might decrease device's performance\n"); } else if (n_channels + n_xdp_ev > max_channels) { efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_SHARED; netif_warn(efx, drv, efx->net_dev, @@ -194,7 +194,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx, efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_DEDICATED; } - if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DISABLED) { + if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_BORROWED) { efx->n_xdp_channels = n_xdp_ev; efx->xdp_tx_per_channel = tx_per_ev; efx->xdp_tx_queue_count = n_xdp_tx; @@ -205,7 +205,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx, } else { efx->n_xdp_channels = 0; efx->xdp_tx_per_channel = 0; - efx->xdp_tx_queue_count = 0; + efx->xdp_tx_queue_count = n_xdp_tx; } if (vec_count < n_channels) { @@ -872,6 +872,20 @@ rollback: goto out; } +static inline int +efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number, + struct efx_tx_queue *tx_queue) +{ + if (xdp_queue_number >= efx->xdp_tx_queue_count) + return -EINVAL; + + netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n", + tx_queue->channel->channel, tx_queue->label, + xdp_queue_number, tx_queue->queue); + efx->xdp_tx_queues[xdp_queue_number] = tx_queue; + return 0; +} + int efx_set_channels(struct efx_nic *efx) { struct efx_tx_queue *tx_queue; @@ -910,20 +924,9 @@ int efx_set_channels(struct efx_nic *efx) if (efx_channel_is_xdp_tx(channel)) { efx_for_each_channel_tx_queue(tx_queue, channel) { tx_queue->queue = next_queue++; - - /* We may have a few left-over XDP TX - * queues owing to xdp_tx_queue_count - * not dividing evenly by EFX_MAX_TXQ_PER_CHANNEL. - * We still allocate and probe those - * TXQs, but never use them. - */ - if (xdp_queue_number < efx->xdp_tx_queue_count) { - netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n", - channel->channel, tx_queue->label, - xdp_queue_number, tx_queue->queue); - efx->xdp_tx_queues[xdp_queue_number] = tx_queue; + rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue); + if (rc == 0) xdp_queue_number++; - } } } else { efx_for_each_channel_tx_queue(tx_queue, channel) { @@ -932,6 +935,17 @@ int efx_set_channels(struct efx_nic *efx) channel->channel, tx_queue->label, tx_queue->queue); } + + /* If XDP is borrowing queues from net stack, it must use the queue + * with no csum offload, which is the first one of the channel + * (note: channel->tx_queue_by_type is not initialized yet) + */ + if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) { + tx_queue = &channel->tx_queue[0]; + rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue); + if (rc == 0) + xdp_queue_number++; + } } } } @@ -940,19 +954,15 @@ int efx_set_channels(struct efx_nic *efx) WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED && xdp_queue_number > efx->xdp_tx_queue_count); - /* If we have less XDP TX queues than CPUs, assign the already existing - * queues to the exceeding CPUs (this means that we will have to use - * locking when transmitting with XDP) + /* If we have more CPUs than assigned XDP TX queues, assign the already + * existing queues to the exceeding CPUs */ next_queue = 0; while (xdp_queue_number < efx->xdp_tx_queue_count) { tx_queue = efx->xdp_tx_queues[next_queue++]; - channel = tx_queue->channel; - netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n", - channel->channel, tx_queue->label, - xdp_queue_number, tx_queue->queue); - - efx->xdp_tx_queues[xdp_queue_number++] = tx_queue; + rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue); + if (rc == 0) + xdp_queue_number++; } rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index e731c766f709..f6981810039d 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -785,7 +785,7 @@ struct efx_async_filter_insertion { enum efx_xdp_tx_queues_mode { EFX_XDP_TX_QUEUES_DEDICATED, /* one queue per core, locking not needed */ EFX_XDP_TX_QUEUES_SHARED, /* each queue used by more than 1 core */ - EFX_XDP_TX_QUEUES_DISABLED /* xdp tx not available */ + EFX_XDP_TX_QUEUES_BORROWED /* queues borrowed from net stack */ }; /** diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index c7afd6cda902..d16e031e95f4 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -428,10 +428,8 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs, unsigned int len; int space; int cpu; - int i; + int i = 0; - if (unlikely(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DISABLED)) - return -EINVAL; if (unlikely(n && !xdpfs)) return -EINVAL; if (unlikely(!n)) @@ -448,6 +446,15 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs, if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED) HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu); + /* If we're borrowing net stack queues we have to handle stop-restart + * or we might block the queue and it will be considered as frozen + */ + if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) { + if (netif_tx_queue_stopped(tx_queue->core_txq)) + goto unlock; + efx_tx_maybe_stop_queue(tx_queue); + } + /* Check for available space. We should never need multiple * descriptors per frame. */ @@ -486,6 +493,7 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs, if (flush && i > 0) efx_nic_push_buffers(tx_queue); +unlock: if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED) HARD_TX_UNLOCK(efx->net_dev, tx_queue->core_txq); -- cgit v1.2.3-59-g8ed1b From e011912651bdf72840d88e8a8de3716bbcc4be99 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Wed, 8 Sep 2021 21:49:53 -0700 Subject: net: ni65: Avoid typecast of pointer to u32 Building alpha:allmodconfig results in the following error. drivers/net/ethernet/amd/ni65.c: In function 'ni65_stop_start': drivers/net/ethernet/amd/ni65.c:751:37: error: cast from pointer to integer of different size buffer[i] = (u32) isa_bus_to_virt(tmdp->u.buffer); 'buffer[]' is declared as unsigned long, so replace the typecast to u32 with a typecast to unsigned long to fix the problem. Cc: Arnd Bergmann Signed-off-by: Guenter Roeck Acked-by: Arnd Bergmann Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/ni65.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c index b5df7ad5a83f..032e8922b482 100644 --- a/drivers/net/ethernet/amd/ni65.c +++ b/drivers/net/ethernet/amd/ni65.c @@ -748,7 +748,7 @@ static void ni65_stop_start(struct net_device *dev,struct priv *p) #ifdef XMT_VIA_SKB skb_save[i] = p->tmd_skb[i]; #endif - buffer[i] = (u32) isa_bus_to_virt(tmdp->u.buffer); + buffer[i] = (unsigned long)isa_bus_to_virt(tmdp->u.buffer); blen[i] = tmdp->blen; tmdp->u.s.status = 0x0; } -- cgit v1.2.3-59-g8ed1b From bfe84435090a6c85271b02a42b1d83fef9ff7cc7 Mon Sep 17 00:00:00 2001 From: Dave Ertman Date: Thu, 9 Sep 2021 08:12:23 -0700 Subject: ice: Correctly deal with PFs that do not support RDMA There are two cases where the current PF does not support RDMA functionality. The first is if the NVM loaded on the device is set to not support RDMA (common_caps.rdma is false). The second is if the kernel bonding driver has included the current PF in an active link aggregate. When the driver has determined that this PF does not support RDMA, then auxiliary devices should not be created on the auxiliary bus. Without a device on the auxiliary bus, even if the irdma driver is present, there will be no RDMA activity attempted on this PF. Currently, in the reset flow, an attempt to create auxiliary devices is performed without regard to the ability of the PF. There needs to be a check in ice_aux_plug_dev (as the central point that creates auxiliary devices) to see if the PF is in a state to support the functionality. When disabling and re-enabling RDMA due to the inclusion/removal of the PF in a link aggregate, we also need to set/clear the bit which controls auxiliary device creation so that a reset recovery in a link aggregate situation doesn't try to create auxiliary devices when it shouldn't. Fixes: f9f5301e7e2d ("ice: Register auxiliary device to provide RDMA") Reported-by: Yongxin Liu Signed-off-by: Dave Ertman Signed-off-by: Tony Nguyen Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ice/ice.h | 2 ++ drivers/net/ethernet/intel/ice/ice_idc.c | 6 ++++++ 2 files changed, 8 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index eadcb9958346..3c4f08d20414 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -695,6 +695,7 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf) { if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) { set_bit(ICE_FLAG_RDMA_ENA, pf->flags); + set_bit(ICE_FLAG_AUX_ENA, pf->flags); ice_plug_aux_dev(pf); } } @@ -707,5 +708,6 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf) { ice_unplug_aux_dev(pf); clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); + clear_bit(ICE_FLAG_AUX_ENA, pf->flags); } #endif /* _ICE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c index 1f2afdf6cd48..adcc9a251595 100644 --- a/drivers/net/ethernet/intel/ice/ice_idc.c +++ b/drivers/net/ethernet/intel/ice/ice_idc.c @@ -271,6 +271,12 @@ int ice_plug_aux_dev(struct ice_pf *pf) struct auxiliary_device *adev; int ret; + /* if this PF doesn't support a technology that requires auxiliary + * devices, then gracefully exit + */ + if (!ice_is_aux_ena(pf)) + return 0; + iadev = kzalloc(sizeof(*iadev), GFP_KERNEL); if (!iadev) return -ENOMEM; -- cgit v1.2.3-59-g8ed1b From e3f0cc1a945fcefec0c7c9d9dfd028a51daa1846 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Thu, 9 Sep 2021 10:33:28 -0700 Subject: r6040: Restore MDIO clock frequency after MAC reset A number of users have reported that they were not able to get the PHY to successfully link up, especially after commit c36757eb9dee ("net: phy: consider AN_RESTART status when reading link status") where we stopped reading just BMSR, but we also read BMCR to determine the link status. Andrius at NetBSD did a wonderful job at debugging the problem and found out that the MDIO bus clock frequency would be incorrectly set back to its default value which would prevent the MDIO bus controller from reading PHY registers properly. Back when we only read BMSR, if we read all 1s, we could falsely indicate a link status, though in general there is a cable plugged in, so this went unnoticed. After a second read of BMCR was added, a wrong read will lead to the inability to determine a link UP condition which is when it started to be visibly broken, even if it was long before that. The fix consists in restoring the value of the MD_CSR register that was set prior to the MAC reset. Link: http://gnats.netbsd.org/cgi-bin/query-pr-single.pl?number=53494 Fixes: 90f750a81a29 ("r6040: consolidate MAC reset to its own function") Reported-by: Andrius V Reported-by: Darek Strugacz Tested-by: Darek Strugacz Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/rdc/r6040.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c index 4b2eca5e08e2..01ef5efd7bc2 100644 --- a/drivers/net/ethernet/rdc/r6040.c +++ b/drivers/net/ethernet/rdc/r6040.c @@ -119,6 +119,8 @@ #define PHY_ST 0x8A /* PHY status register */ #define MAC_SM 0xAC /* MAC status machine */ #define MAC_SM_RST 0x0002 /* MAC status machine reset */ +#define MD_CSC 0xb6 /* MDC speed control register */ +#define MD_CSC_DEFAULT 0x0030 #define MAC_ID 0xBE /* Identifier register */ #define TX_DCNT 0x80 /* TX descriptor count */ @@ -355,8 +357,9 @@ static void r6040_reset_mac(struct r6040_private *lp) { void __iomem *ioaddr = lp->base; int limit = MAC_DEF_TIMEOUT; - u16 cmd; + u16 cmd, md_csc; + md_csc = ioread16(ioaddr + MD_CSC); iowrite16(MAC_RST, ioaddr + MCR1); while (limit--) { cmd = ioread16(ioaddr + MCR1); @@ -368,6 +371,10 @@ static void r6040_reset_mac(struct r6040_private *lp) iowrite16(MAC_SM_RST, ioaddr + MAC_SM); iowrite16(0, ioaddr + MAC_SM); mdelay(5); + + /* Restore MDIO clock frequency */ + if (md_csc != MD_CSC_DEFAULT) + iowrite16(md_csc, ioaddr + MD_CSC); } static void r6040_init_mac_regs(struct net_device *dev) -- cgit v1.2.3-59-g8ed1b From 20e100f52730cd0db609e559799c1712b5f27582 Mon Sep 17 00:00:00 2001 From: Shai Malin Date: Fri, 10 Sep 2021 11:33:56 +0300 Subject: qed: Handle management FW error Handle MFW (management FW) error response in order to avoid a crash during recovery flows. Changes from v1: - Add "Fixes tag". Fixes: tag 5e7ba042fd05 ("qed: Fix reading stale configuration information") Signed-off-by: Ariel Elior Signed-off-by: Shai Malin Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_mcp.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 6e5a6cc97d0e..24cd41567775 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -3367,6 +3367,7 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, struct qed_nvm_image_att *p_image_att) { enum nvm_image_type type; + int rc; u32 i; /* Translate image_id into MFW definitions */ @@ -3395,7 +3396,10 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, return -EINVAL; } - qed_mcp_nvm_info_populate(p_hwfn); + rc = qed_mcp_nvm_info_populate(p_hwfn); + if (rc) + return rc; + for (i = 0; i < p_hwfn->nvm_info.num_images; i++) if (type == p_hwfn->nvm_info.image_att[i].image_type) break; -- cgit v1.2.3-59-g8ed1b From 666eb96d85dcbc93aacc186a037db2e05b92b9f5 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 10 Sep 2021 12:15:11 +0100 Subject: qlcnic: Remove redundant initialization of variable ret The variable ret is being initialized with a value that is never read, it is being updated later on. The assignment is redundant and can be removed. Addresses-Coverity: ("Unused value") Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 0a2f34fc8b24..27dffa299ca6 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -1354,10 +1354,10 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter) struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info; const struct firmware *fw = fw_info->fw; u32 dest, *p_cache, *temp; - int i, ret = -EIO; __le32 *temp_le; u8 data[16]; size_t size; + int i, ret; u64 addr; temp = vzalloc(fw->size); -- cgit v1.2.3-59-g8ed1b From 08dad2f4d541fcfe5e7bfda72cc6314bbfd2802f Mon Sep 17 00:00:00 2001 From: Jesper Nilsson Date: Fri, 10 Sep 2021 21:55:34 +0200 Subject: net: stmmac: allow CSR clock of 300MHz The Synopsys Ethernet IP uses the CSR clock as a base clock for MDC. The divisor used is set in the MAC_MDIO_Address register field CR (Clock Rate) The divisor is there to change the CSR clock into a clock that falls below the IEEE 802.3 specified max frequency of 2.5MHz. If the CSR clock is 300MHz, the code falls back to using the reset value in the MAC_MDIO_Address register, as described in the comment above this code. However, 300MHz is actually an allowed value and the proper divider can be estimated quite easily (it's just 1Hz difference!) A CSR frequency of 300MHz with the maximum clock rate value of 0x5 (STMMAC_CSR_250_300M, a divisor of 124) gives somewhere around ~2.42MHz which is below the IEEE 802.3 specified maximum. For the ARTPEC-8 SoC, the CSR clock is this problematic 300MHz, and unfortunately, the reset-value of the MAC_MDIO_Address CR field is 0x0. This leads to a clock rate of zero and a divisor of 42, and gives an MDC frequency of ~7.14MHz. Allow CSR clock of 300MHz by making the comparison inclusive. Signed-off-by: Jesper Nilsson Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 246f84fedbc8..553c4403258a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -309,7 +309,7 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv) priv->clk_csr = STMMAC_CSR_100_150M; else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) priv->clk_csr = STMMAC_CSR_150_250M; - else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M)) + else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M)) priv->clk_csr = STMMAC_CSR_250_300M; } -- cgit v1.2.3-59-g8ed1b From ce062a0adbfe933b1932235fdfd874c4c91d1bb0 Mon Sep 17 00:00:00 2001 From: Ansuel Smith Date: Sat, 11 Sep 2021 17:50:09 +0200 Subject: net: dsa: qca8k: fix kernel panic with legacy mdio mapping When the mdio legacy mapping is used the mii_bus priv registered by DSA refer to the dsa switch struct instead of the qca8k_priv struct and causes a kernel panic. Create dedicated function when the internal dedicated mdio driver is used to properly handle the 2 different implementation. Fixes: 759bafb8a322 ("net: dsa: qca8k: add support for internal phy and internal mdio") Signed-off-by: Ansuel Smith Signed-off-by: David S. Miller --- drivers/net/dsa/qca8k.c | 30 ++++++++++++++++++++++-------- 1 file changed, 22 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 1f63f50f73f1..bda5a9bf4f52 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -643,10 +643,8 @@ qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask) } static int -qca8k_mdio_write(struct mii_bus *salve_bus, int phy, int regnum, u16 data) +qca8k_mdio_write(struct mii_bus *bus, int phy, int regnum, u16 data) { - struct qca8k_priv *priv = salve_bus->priv; - struct mii_bus *bus = priv->bus; u16 r1, r2, page; u32 val; int ret; @@ -682,10 +680,8 @@ exit: } static int -qca8k_mdio_read(struct mii_bus *salve_bus, int phy, int regnum) +qca8k_mdio_read(struct mii_bus *bus, int phy, int regnum) { - struct qca8k_priv *priv = salve_bus->priv; - struct mii_bus *bus = priv->bus; u16 r1, r2, page; u32 val; int ret; @@ -726,6 +722,24 @@ exit: return ret; } +static int +qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data) +{ + struct qca8k_priv *priv = slave_bus->priv; + struct mii_bus *bus = priv->bus; + + return qca8k_mdio_write(bus, phy, regnum, data); +} + +static int +qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum) +{ + struct qca8k_priv *priv = slave_bus->priv; + struct mii_bus *bus = priv->bus; + + return qca8k_mdio_read(bus, phy, regnum); +} + static int qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data) { @@ -775,8 +789,8 @@ qca8k_mdio_register(struct qca8k_priv *priv, struct device_node *mdio) bus->priv = (void *)priv; bus->name = "qca8k slave mii"; - bus->read = qca8k_mdio_read; - bus->write = qca8k_mdio_write; + bus->read = qca8k_internal_mdio_read; + bus->write = qca8k_internal_mdio_write; snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d", ds->index); -- cgit v1.2.3-59-g8ed1b From f11ee2ad25b22c2ee587045dd6999434375532f7 Mon Sep 17 00:00:00 2001 From: Len Baker Date: Sat, 11 Sep 2021 12:28:18 +0200 Subject: net: mana: Prefer struct_size over open coded arithmetic As noted in the "Deprecated Interfaces, Language Features, Attributes, and Conventions" documentation [1], size calculations (especially multiplication) should not be performed in memory allocator (or similar) function arguments due to the risk of them overflowing. This could lead to values wrapping around and a smaller allocation being made than the caller was expecting. Using those allocations could lead to linear overflows of heap memory and other misbehaviors. So, use the struct_size() helper to do the arithmetic instead of the argument "size + count * size" in the kzalloc() function. [1] https://www.kernel.org/doc/html/v5.14/process/deprecated.html#open-coded-arithmetic-in-allocator-arguments Signed-off-by: Len Baker Reviewed-by: Haiyang Zhang Signed-off-by: David S. Miller --- drivers/net/ethernet/microsoft/mana/hw_channel.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c index c1310ea1c216..d5c485a6d284 100644 --- a/drivers/net/ethernet/microsoft/mana/hw_channel.c +++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c @@ -398,9 +398,7 @@ static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth, int err; u16 i; - dma_buf = kzalloc(sizeof(*dma_buf) + - q_depth * sizeof(struct hwc_work_request), - GFP_KERNEL); + dma_buf = kzalloc(struct_size(dma_buf, reqs, q_depth), GFP_KERNEL); if (!dma_buf) return -ENOMEM; -- cgit v1.2.3-59-g8ed1b From eca4cf12acda306f851f6d2a05b1c9ef62cf0e81 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sun, 12 Sep 2021 12:34:47 -0400 Subject: bnxt_en: Fix error recovery regression The recent patch has introduced a regression by not reading the reset count in the ERROR_RECOVERY async event handler. We may have just gone through a reset and the reset count has just incremented. If we don't update the reset count in the ERROR_RECOVERY event handler, the health check timer will see that the reset count has changed and will initiate an unintended reset. Restore the unconditional update of the reset count in bnxt_async_event_process() if error recovery watchdog is enabled. Also, update the reset count at the end of the reset sequence to make it even more robust. Fixes: 1b2b91831983 ("bnxt_en: Fix possible unintended driver initiated error recovery") Reviewed-by: Edwin Peer Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 9b86516e59a1..8b0a2ae1367c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -2213,12 +2213,11 @@ static int bnxt_async_event_process(struct bnxt *bp, DIV_ROUND_UP(fw_health->polling_dsecs * HZ, bp->current_interval * 10); fw_health->tmr_counter = fw_health->tmr_multiplier; - if (!fw_health->enabled) { + if (!fw_health->enabled) fw_health->last_fw_heartbeat = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); - fw_health->last_fw_reset_cnt = - bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); - } + fw_health->last_fw_reset_cnt = + bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); netif_info(bp, drv, bp->dev, "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n", fw_health->master, fw_health->last_fw_reset_cnt, @@ -12207,6 +12206,11 @@ static void bnxt_fw_reset_task(struct work_struct *work) return; } + if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && + bp->fw_health->enabled) { + bp->fw_health->last_fw_reset_cnt = + bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); + } bp->fw_reset_state = 0; /* Make sure fw_reset_state is 0 before clearing the flag */ smp_mb__before_atomic(); -- cgit v1.2.3-59-g8ed1b From 1affc01fdc6035189a5ab2a24948c9419ee0ecf2 Mon Sep 17 00:00:00 2001 From: Edwin Peer Date: Sun, 12 Sep 2021 12:34:48 -0400 Subject: bnxt_en: make bnxt_free_skbs() safe to call after bnxt_free_mem() The call to bnxt_free_mem(..., false) in the bnxt_half_open_nic() error path will deallocate ring descriptor memory via bnxt_free_?x_rings(), but because irq_re_init is false, the ring info itself is not freed. To simplify error paths, deallocation functions have generally been written to be safe when called on unallocated memory. It should always be safe to call dev_close(), which calls bnxt_free_skbs() a second time, even in this semi- allocated ring state. Calling bnxt_free_skbs() a second time with the rings already freed will cause NULL pointer dereference. Fix it by checking the rings are valid before proceeding in bnxt_free_tx_skbs() and bnxt_free_one_rx_ring_skbs(). Fixes: 975bc99a4a39 ("bnxt_en: Refactor bnxt_free_rx_skbs().") Signed-off-by: Edwin Peer Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 8b0a2ae1367c..9f9806f1c0fc 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -2729,6 +2729,9 @@ static void bnxt_free_tx_skbs(struct bnxt *bp) struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; int j; + if (!txr->tx_buf_ring) + continue; + for (j = 0; j < max_idx;) { struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; struct sk_buff *skb; @@ -2813,6 +2816,9 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) } skip_rx_tpa_free: + if (!rxr->rx_buf_ring) + goto skip_rx_buf_free; + for (i = 0; i < max_idx; i++) { struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i]; dma_addr_t mapping = rx_buf->mapping; @@ -2835,6 +2841,11 @@ skip_rx_tpa_free: kfree(data); } } + +skip_rx_buf_free: + if (!rxr->rx_agg_ring) + goto skip_rx_agg_free; + for (i = 0; i < max_agg_idx; i++) { struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i]; struct page *page = rx_agg_buf->page; @@ -2851,6 +2862,8 @@ skip_rx_tpa_free: __free_page(page); } + +skip_rx_agg_free: if (rxr->rx_page) { __free_page(rxr->rx_page); rxr->rx_page = NULL; -- cgit v1.2.3-59-g8ed1b From 985941e1dd5e996311c29688ca0d3aa1ff8eb0b6 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sun, 12 Sep 2021 12:34:49 -0400 Subject: bnxt_en: Clean up completion ring page arrays completely We recently changed the completion ring page arrays to be dynamically allocated to better support the expanded range of ring depths. The cleanup path for this was not quite complete. It might cause the shutdown path to crash if we need to abort before the completion ring arrays have been allocated and initialized. Fix it by initializing the ring_mem->pg_arr to NULL after freeing the completion ring page array. Add a check in bnxt_free_ring() to skip referencing the rmem->pg_arr if it is NULL. Fixes: 03c7448790b8 ("bnxt_en: Don't use static arrays for completion ring pages") Reviewed-by: Andy Gospodarek Reviewed-by: Edwin Peer Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 9f9806f1c0fc..f32431a7e5a6 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -2912,6 +2912,9 @@ static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) struct pci_dev *pdev = bp->pdev; int i; + if (!rmem->pg_arr) + goto skip_pages; + for (i = 0; i < rmem->nr_pages; i++) { if (!rmem->pg_arr[i]) continue; @@ -2921,6 +2924,7 @@ static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) rmem->pg_arr[i] = NULL; } +skip_pages: if (rmem->pg_tbl) { size_t pg_tbl_size = rmem->nr_pages * 8; @@ -3240,10 +3244,14 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp) static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr) { + struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; + kfree(cpr->cp_desc_ring); cpr->cp_desc_ring = NULL; + ring->ring_mem.pg_arr = NULL; kfree(cpr->cp_desc_mapping); cpr->cp_desc_mapping = NULL; + ring->ring_mem.dma_arr = NULL; } static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n) -- cgit v1.2.3-59-g8ed1b From 111b64e35ea03d58c882832744f571a88bb2e2e2 Mon Sep 17 00:00:00 2001 From: Aleksander Jan Bajkowski Date: Sun, 12 Sep 2021 13:58:07 +0200 Subject: net: dsa: lantiq_gswip: Add 200ms assert delay The delay is especially needed by the xRX300 and xRX330 SoCs. Without this patch, some phys are sometimes not properly detected. The patch was tested on BT Home Hub 5A and D-Link DWR-966. Fixes: a09d042b0862 ("net: dsa: lantiq: allow to use all GPHYs on xRX300 and xRX330") Signed-off-by: Aleksander Jan Bajkowski Signed-off-by: Martin Blumenstingl Acked-by: Hauke Mehrtens Signed-off-by: David S. Miller --- drivers/net/dsa/lantiq_gswip.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index 64d6dfa83122..267324889dd6 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c @@ -1885,6 +1885,12 @@ static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gph reset_control_assert(gphy_fw->reset); + /* The vendor BSP uses a 200ms delay after asserting the reset line. + * Without this some users are observing that the PHY is not coming up + * on the MDIO bus. + */ + msleep(200); + ret = request_firmware(&fw, gphy_fw->fw_name, dev); if (ret) { dev_err(dev, "failed to load firmware: %s, error: %i\n", -- cgit v1.2.3-59-g8ed1b From f7ec554b73c5239a96afb9a9c3eb18cb11f539b7 Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Mon, 13 Sep 2021 21:08:20 +0800 Subject: net: hns3: add option to turn off page pool feature When page pool is added to the hns3 driver, it is always enabled unconditionally, which means spilt page handling in the hns3 driver is dead code. As there is a requirement to test the performance between spilt page handling in driver and page pool, so add a module param to support disabling the page pool. When the page pool is proved to perform better in most case, the spilt page handling in driver can be removed. Fixes: 93188e9642c3 ("net: hns3: support skb's frag page recycling based on page pool") Signed-off-by: Yunsheng Lin Signed-off-by: Guangbin Huang Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 22af3d6ce178..293243bbe407 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -61,6 +61,9 @@ static unsigned int tx_sgl = 1; module_param(tx_sgl, uint, 0600); MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to optimize the IOMMU mapping"); +static bool page_pool_enabled = true; +module_param(page_pool_enabled, bool, 0400); + #define HNS3_SGL_SIZE(nfrag) (sizeof(struct scatterlist) * (nfrag) + \ sizeof(struct sg_table)) #define HNS3_MAX_SGL_SIZE ALIGN(HNS3_SGL_SIZE(HNS3_MAX_TSO_BD_NUM), \ @@ -4753,7 +4756,8 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) goto out_with_desc_cb; if (!HNAE3_IS_TX_RING(ring)) { - hns3_alloc_page_pool(ring); + if (page_pool_enabled) + hns3_alloc_page_pool(ring); ret = hns3_alloc_ring_buffers(ring); if (ret) -- cgit v1.2.3-59-g8ed1b From d18e81183b1cb9c309266cbbce9acd3e0c528d04 Mon Sep 17 00:00:00 2001 From: Yufeng Mo Date: Mon, 13 Sep 2021 21:08:21 +0800 Subject: net: hns3: pad the short tunnel frame before sending to hardware The hardware cannot handle short tunnel frames below 65 bytes, and will cause vlan tag missing problem. So pads packet size to 65 bytes for tunnel frames to fix this bug. Fixes: 3db084d28dc0("net: hns3: Fix for vxlan tx checksum bug") Signed-off-by: Yufeng Mo Signed-off-by: Guangbin Huang Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 293243bbe407..adc54a726661 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -76,6 +76,7 @@ module_param(page_pool_enabled, bool, 0400); #define HNS3_OUTER_VLAN_TAG 2 #define HNS3_MIN_TX_LEN 33U +#define HNS3_MIN_TUN_PKT_LEN 65U /* hns3_pci_tbl - PCI Device ID Table * @@ -1427,8 +1428,11 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto, l4.tcp->doff); break; case IPPROTO_UDP: - if (hns3_tunnel_csum_bug(skb)) - return skb_checksum_help(skb); + if (hns3_tunnel_csum_bug(skb)) { + int ret = skb_put_padto(skb, HNS3_MIN_TUN_PKT_LEN); + + return ret ? ret : skb_checksum_help(skb); + } hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, -- cgit v1.2.3-59-g8ed1b From 1dc839ec09d3ab2a4156dc98328b8bc3586f2b70 Mon Sep 17 00:00:00 2001 From: Yufeng Mo Date: Mon, 13 Sep 2021 21:08:22 +0800 Subject: net: hns3: change affinity_mask to numa node range Currently, affinity_mask is set to a single cpu. As a result, irqbalance becomes invalid in SUBSET or EXACT mode. To solve this problem, change affinity_mask to numa node range. In this way, irqbalance can be performed on the cpu of the numa node. Fixes: 0812545487ec ("net: hns3: add interrupt affinity support for misc interrupt") Signed-off-by: Yufeng Mo Signed-off-by: Guangbin Huang Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index e55ba2e511b1..aed97c934bfb 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -1528,9 +1528,10 @@ static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev) static int hclge_configure(struct hclge_dev *hdev) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + const struct cpumask *cpumask = cpu_online_mask; struct hclge_cfg cfg; unsigned int i; - int ret; + int node, ret; ret = hclge_get_cfg(hdev, &cfg); if (ret) @@ -1595,11 +1596,12 @@ static int hclge_configure(struct hclge_dev *hdev) hclge_init_kdump_kernel_config(hdev); - /* Set the init affinity based on pci func number */ - i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev))); - i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0; - cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)), - &hdev->affinity_mask); + /* Set the affinity based on numa node */ + node = dev_to_node(&hdev->pdev->dev); + if (node != NUMA_NO_NODE) + cpumask = cpumask_of_node(node); + + cpumask_copy(&hdev->affinity_mask, cpumask); return ret; } -- cgit v1.2.3-59-g8ed1b From b81d8948746520f989e86d66292ff72b5056114a Mon Sep 17 00:00:00 2001 From: Yufeng Mo Date: Mon, 13 Sep 2021 21:08:23 +0800 Subject: net: hns3: disable mac in flr process The firmware will not disable mac in flr process. Therefore, the driver needs to proactively disable mac during flr, which is the same as the function reset. Fixes: 35d93a30040c ("net: hns3: adjust the process of PF reset") Signed-off-by: Yufeng Mo Signed-off-by: Guangbin Huang Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index aed97c934bfb..f1e46ba799f9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -8127,11 +8127,12 @@ static void hclge_ae_stop(struct hnae3_handle *handle) hclge_clear_arfs_rules(hdev); spin_unlock_bh(&hdev->fd_rule_lock); - /* If it is not PF reset, the firmware will disable the MAC, + /* If it is not PF reset or FLR, the firmware will disable the MAC, * so it only need to stop phy here. */ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && - hdev->reset_type != HNAE3_FUNC_RESET) { + hdev->reset_type != HNAE3_FUNC_RESET && + hdev->reset_type != HNAE3_FLR_RESET) { hclge_mac_stop_phy(hdev); hclge_update_link_status(hdev); return; -- cgit v1.2.3-59-g8ed1b From 472430a7b066f19afa1b55867d621b2d6d323e0d Mon Sep 17 00:00:00 2001 From: Jiaran Zhang Date: Mon, 13 Sep 2021 21:08:24 +0800 Subject: net: hns3: fix the exception when query imp info When the command for querying imp info is issued to the firmware, if the firmware does not support the command, the returned value of bd num is 0. Add protection mechanism before alloc memory to prevent apply for 0-length memory. Fixes: 0b198b0d80ea ("net: hns3: refactor dump m7 info of debugfs") Signed-off-by: Jiaran Zhang Signed-off-by: Guangbin Huang Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index 68ed1715ac52..87d96f82c318 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -1724,6 +1724,10 @@ hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len) } bd_num = le32_to_cpu(req->bd_num); + if (!bd_num) { + dev_err(&hdev->pdev->dev, "imp statistics bd number is 0!\n"); + return -EINVAL; + } desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL); if (!desc_src) -- cgit v1.2.3-59-g8ed1b From 427900d27d86b820c559037a984bd403f910860f Mon Sep 17 00:00:00 2001 From: Jiaran Zhang Date: Mon, 13 Sep 2021 21:08:25 +0800 Subject: net: hns3: fix the timing issue of VF clearing interrupt sources Currently, the VF does not clear the interrupt source immediately after receiving the interrupt. As a result, if the second interrupt task is triggered when processing the first interrupt task, clearing the interrupt source before exiting will clear the interrupt sources of the two tasks at the same time. As a result, no interrupt is triggered for the second task. The VF detects the missed message only when the next interrupt is generated. Clearing it immediately after executing check_evt_cause ensures that: 1. Even if two interrupt tasks are triggered at the same time, they can be processed. 2. If the second task is triggered during the processing of the first task and the interrupt source is not cleared, the interrupt is reported after vector0 is enabled. Fixes: b90fcc5bd904 ("net: hns3: add reset handling for VF when doing Core/Global/IMP reset") Signed-off-by: Jiaran Zhang Signed-off-by: Guangbin Huang Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 82e727020120..a69e892277b3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -2465,6 +2465,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) hclgevf_enable_vector(&hdev->misc_vector, false); event_cause = hclgevf_check_evt_cause(hdev, &clearval); + if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) + hclgevf_clear_event_cause(hdev, clearval); switch (event_cause) { case HCLGEVF_VECTOR0_EVENT_RST: @@ -2477,10 +2479,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) break; } - if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { - hclgevf_clear_event_cause(hdev, clearval); + if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) hclgevf_enable_vector(&hdev->misc_vector, true); - } return IRQ_HANDLED; } -- cgit v1.2.3-59-g8ed1b From 52ce14c134a003fee03d8fc57442c05a55b53715 Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Sun, 12 Sep 2021 22:05:23 +0300 Subject: bnx2x: Fix enabling network interfaces without VFs This function is called to enable SR-IOV when available, not enabling interfaces without VFs was a regression. Fixes: 65161c35554f ("bnx2x: Fix missing error code in bnx2x_iov_init_one()") Signed-off-by: Adrian Bunk Reported-by: YunQiang Su Tested-by: YunQiang Su Cc: stable@vger.kernel.org Acked-by: Shai Malin Link: https://lore.kernel.org/r/20210912190523.27991-1-bunk@kernel.org Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index f255fd0b16db..6fbf735fca31 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -1224,7 +1224,7 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, /* SR-IOV capability was enabled but there are no VFs*/ if (iov->total == 0) { - err = -EINVAL; + err = 0; goto failed; } -- cgit v1.2.3-59-g8ed1b From 7366c23ff492ad260776a3ee1aaabba9fc773a8b Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Mon, 13 Sep 2021 15:06:05 -0700 Subject: ptp: dp83640: don't define PAGE0 Building dp83640.c on arch/parisc/ produces a build warning for PAGE0 being redefined. Since the macro is not used in the dp83640 driver, just make it a comment for documentation purposes. In file included from ../drivers/net/phy/dp83640.c:23: ../drivers/net/phy/dp83640_reg.h:8: warning: "PAGE0" redefined 8 | #define PAGE0 0x0000 from ../drivers/net/phy/dp83640.c:11: ../arch/parisc/include/asm/page.h:187: note: this is the location of the previous definition 187 | #define PAGE0 ((struct zeropage *)__PAGE_OFFSET) Fixes: cb646e2b02b2 ("ptp: Added a clock driver for the National Semiconductor PHYTER.") Signed-off-by: Randy Dunlap Reported-by: Geert Uytterhoeven Cc: Richard Cochran Cc: John Stultz Cc: Heiner Kallweit Cc: Russell King Reviewed-by: Andrew Lunn Link: https://lore.kernel.org/r/20210913220605.19682-1-rdunlap@infradead.org Signed-off-by: Jakub Kicinski --- drivers/net/phy/dp83640_reg.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/phy/dp83640_reg.h b/drivers/net/phy/dp83640_reg.h index 21aa24c741b9..daae7fa58fb8 100644 --- a/drivers/net/phy/dp83640_reg.h +++ b/drivers/net/phy/dp83640_reg.h @@ -5,7 +5,7 @@ #ifndef HAVE_DP83640_REGISTERS #define HAVE_DP83640_REGISTERS -#define PAGE0 0x0000 +/* #define PAGE0 0x0000 */ #define PHYCR2 0x001c /* PHY Control Register 2 */ #define PAGE4 0x0004 -- cgit v1.2.3-59-g8ed1b From 301de697d869be6564aebeb5ab811c84c0a7abed Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Tue, 14 Sep 2021 17:05:15 +0300 Subject: Revert "net: phy: Uniform PHY driver access" This reverts commit 3ac8eed62596387214869319379c1fcba264d8c6, which did more than it said on the box, and not only it replaced to_phy_driver with phydev->drv, but it also removed the "!drv" check, without actually explaining why that is fine. That patch in fact breaks suspend/resume on any system which has PHY devices with no drivers bound. The stack trace is: Unable to handle kernel NULL pointer dereference at virtual address 00000000000000e8 pc : mdio_bus_phy_suspend+0xd8/0xec lr : dpm_run_callback+0x38/0x90 Call trace: mdio_bus_phy_suspend+0xd8/0xec dpm_run_callback+0x38/0x90 __device_suspend+0x108/0x3cc dpm_suspend+0x140/0x210 dpm_suspend_start+0x7c/0xa0 suspend_devices_and_enter+0x13c/0x540 pm_suspend+0x2a4/0x330 Examples why that assumption is not fine: - There is an MDIO bus with a PHY device that doesn't have a specific PHY driver loaded, because mdiobus_register() automatically creates a PHY device for it but there is no specific PHY driver in the system. Normally under those circumstances, the generic PHY driver will be bound lazily to it (at phy_attach_direct time). But some Ethernet drivers attach to their PHY at .ndo_open time. Until then it, the to-be-driven-by-genphy PHY device will not have a driver. The blamed patch amounts to saying "you need to open all net devices before the system can suspend, to avoid the NULL pointer dereference". - There is any raw MDIO device which has 'plausible' values in the PHY ID registers 2 and 3, which is located on an MDIO bus whose driver does not set bus->phy_mask = ~0 (which prevents auto-scanning of PHY devices). An example could be a MAC's internal MDIO bus with PCS devices on it, for serial links such as SGMII. PHY devices will get created for those PCSes too, due to that MDIO bus auto-scanning, and although those PHY devices are not used, they do not bother anybody either. PCS devices are usually managed in Linux as raw MDIO devices. Nonetheless, they do not have a PHY driver, nor does anybody attempt to connect to them (because they are not a PHY), and therefore this patch breaks that. The goal itself of the patch is questionable, so I am going for a straight revert. to_phy_driver does not seem to have a need to be replaced by phydev->drv, in fact that might even trigger code paths which were not given too deep of a thought. For instance: phy_probe populates phydev->drv at the beginning, but does not clean it up on any error (including EPROBE_DEFER). So if the phydev driver requests probe deferral, phydev->drv will remain populated despite there being no driver bound. If a system suspend starts in between the initial probe deferral request and the subsequent probe retry, we will be calling the phydev->drv->suspend method, but _before_ any phydev->drv->probe call has succeeded. That is to say, if the phydev->drv is allocating any driver-private data structure in ->probe, it pretty much expects that data structure to be available in ->suspend. But it may not. That is a pretty insane environment to present to PHY drivers. In the code structure before the blamed patch, mdio_bus_phy_may_suspend would just say "no, don't suspend" to any PHY device which does not have a driver pointer _in_the_device_structure_ (not the phydev->drv). That would essentially ensure that ->suspend will never get called for a device that has not yet successfully completed probe. This is the code structure the patch is returning to, via the revert. Fixes: 3ac8eed62596 ("net: phy: Uniform PHY driver access") Signed-off-by: Vladimir Oltean Acked-by: Florian Fainelli Link: https://lore.kernel.org/r/20210914140515.2311548-1-vladimir.oltean@nxp.com Signed-off-by: Jakub Kicinski --- drivers/net/phy/phy_device.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 9e2891d8e8dd..ba5ad86ec826 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -233,9 +233,11 @@ static DEFINE_MUTEX(phy_fixup_lock); static bool mdio_bus_phy_may_suspend(struct phy_device *phydev) { + struct device_driver *drv = phydev->mdio.dev.driver; + struct phy_driver *phydrv = to_phy_driver(drv); struct net_device *netdev = phydev->attached_dev; - if (!phydev->drv->suspend) + if (!drv || !phydrv->suspend) return false; /* PHY not attached? May suspend if the PHY has not already been -- cgit v1.2.3-59-g8ed1b From 84fb7dfc7463afcba61281f36535576a7f7b0626 Mon Sep 17 00:00:00 2001 From: Adam Borowski Date: Sun, 12 Sep 2021 23:23:21 +0200 Subject: net: wan: wanxl: define CROSS_COMPILE_M68K It was used but never set. The hardcoded value from before the dawn of time was non-standard; the usual name for cross-tools is $TRIPLET-$TOOL Signed-off-by: Adam Borowski Signed-off-by: David S. Miller --- drivers/net/wan/Makefile | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile index f6b92efffc94..480bcd1f6c1c 100644 --- a/drivers/net/wan/Makefile +++ b/drivers/net/wan/Makefile @@ -34,6 +34,8 @@ obj-$(CONFIG_SLIC_DS26522) += slic_ds26522.o clean-files := wanxlfw.inc $(obj)/wanxl.o: $(obj)/wanxlfw.inc +CROSS_COMPILE_M68K = m68k-linux-gnu- + ifeq ($(CONFIG_WANXL_BUILD_FIRMWARE),y) ifeq ($(ARCH),m68k) M68KCC = $(CC) -- cgit v1.2.3-59-g8ed1b From 7c3a0a018e672a9723a79b128227272562300055 Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Wed, 15 Sep 2021 07:47:27 +0300 Subject: net/{mlx5|nfp|bnxt}: Remove unnecessary RTNL lock assert Remove the assert from the callback priv lookup function since it does not require RTNL lock and is already protected by flow_indr_block_lock. This will avoid warnings from being emitted to dmesg if the driver registers its callback after an ingress qdisc was created for a netdevice. The warnings started after the following patch was merged: commit 74fc4f828769 ("net: Fix offloading indirect devices dependency on qdisc order creation") Signed-off-by: Eli Cohen Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c | 3 --- drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c | 3 --- drivers/net/ethernet/netronome/nfp/flower/offload.c | 3 --- 3 files changed, 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 46fae1acbeed..e6a4a768b10b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -1884,9 +1884,6 @@ bnxt_tc_indr_block_cb_lookup(struct bnxt *bp, struct net_device *netdev) { struct bnxt_flower_indr_block_cb_priv *cb_priv; - /* All callback list access should be protected by RTNL. */ - ASSERT_RTNL(); - list_for_each_entry(cb_priv, &bp->tc_indr_block_list, list) if (cb_priv->tunnel_netdev == netdev) return cb_priv; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c index 51a4d80f7fa3..de03684528bb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c @@ -300,9 +300,6 @@ mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv, { struct mlx5e_rep_indr_block_priv *cb_priv; - /* All callback list access should be protected by RTNL. */ - ASSERT_RTNL(); - list_for_each_entry(cb_priv, &rpriv->uplink_priv.tc_indr_block_priv_list, list) diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 556c3495211d..64c0ef57ad42 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -1767,9 +1767,6 @@ nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app, struct nfp_flower_indr_block_cb_priv *cb_priv; struct nfp_flower_priv *priv = app->priv; - /* All callback list access should be protected by RTNL. */ - ASSERT_RTNL(); - list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list) if (cb_priv->netdev == netdev) return cb_priv; -- cgit v1.2.3-59-g8ed1b From 40ee363c844fcb6ae0f1f5cfea68aed7e268c2f4 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Wed, 15 Sep 2021 10:19:07 -0700 Subject: igc: fix tunnel offloading Checking tunnel offloading, it turns out that offloading doesn't work as expected. The following script allows to reproduce the issue. Call it as `testscript DEVICE LOCALIP REMOTEIP NETMASK' === SNIP === if [ $# -ne 4 ] then echo "Usage $0 DEVICE LOCALIP REMOTEIP NETMASK" exit 1 fi DEVICE="$1" LOCAL_ADDRESS="$2" REMOTE_ADDRESS="$3" NWMASK="$4" echo "Driver: $(ethtool -i ${DEVICE} | awk '/^driver:/{print $2}') " ethtool -k "${DEVICE}" | grep tx-udp echo echo "Set up NIC and tunnel..." ip addr add "${LOCAL_ADDRESS}/${NWMASK}" dev "${DEVICE}" ip link set "${DEVICE}" up sleep 2 ip link add vxlan1 type vxlan id 42 \ remote "${REMOTE_ADDRESS}" \ local "${LOCAL_ADDRESS}" \ dstport 0 \ dev "${DEVICE}" ip addr add fc00::1/64 dev vxlan1 ip link set vxlan1 up sleep 2 rm -f vxlan.pcap echo "Running tcpdump and iperf3..." ( nohup tcpdump -i any -w vxlan.pcap >/dev/null 2>&1 ) & sleep 2 iperf3 -c fc00::2 >/dev/null pkill tcpdump echo echo -n "Max. Paket Size: " tcpdump -r vxlan.pcap -nnle 2>/dev/null \ | grep "${LOCAL_ADDRESS}.*> ${REMOTE_ADDRESS}.*OTV" \ | awk '{print $8}' | awk -F ':' '{print $1}' \ | sort -n | tail -1 echo ip link del vxlan1 ip addr del ${LOCAL_ADDRESS}/${NWMASK} dev "${DEVICE}" === SNAP === The expected outcome is Max. Paket Size: 64904 This is what you see on igb, the code igc has been taken from. However, on igc the output is Max. Paket Size: 1516 so the GSO aggregate packets are segmented by the kernel before calling igc_xmit_frame. Inside the subsequent call to igc_tso, the check for skb_is_gso(skb) fails and the function returns prematurely. It turns out that this occurs because the feature flags aren't set entirely correctly in igc_probe. In contrast to the original code from igb_probe, igc_probe neglects to set the flags required to allow tunnel offloading. Setting the same flags as igb fixes the issue on igc. Fixes: 34428dff3679 ("igc: Add GSO partial support") Signed-off-by: Paolo Abeni Tested-by: Corinna Vinschen Acked-by: Sasha Neftin Tested-by: Nechama Kraus Signed-off-by: Tony Nguyen Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/igc/igc_main.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index b877efae61df..0e19b4d02e62 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -6350,7 +6350,9 @@ static int igc_probe(struct pci_dev *pdev, if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; - netdev->vlan_features |= netdev->features; + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; + netdev->mpls_features |= NETIF_F_HW_CSUM; + netdev->hw_enc_features |= netdev->vlan_features; /* MTU range: 68 - 9216 */ netdev->min_mtu = ETH_MIN_MTU; -- cgit v1.2.3-59-g8ed1b From ee8a9600b5391f434905c46bec7f77d34505083e Mon Sep 17 00:00:00 2001 From: David Thompson Date: Wed, 15 Sep 2021 14:08:48 -0400 Subject: mlxbf_gige: clear valid_polarity upon open The network interface managed by the mlxbf_gige driver can get into a problem state where traffic does not flow. In this state, the interface will be up and enabled, but will stop processing received packets. This problem state will happen if three specific conditions occur: 1) driver has received more than (N * RxRingSize) packets but less than (N+1 * RxRingSize) packets, where N is an odd number Note: the command "ethtool -g " will display the current receive ring size, which currently defaults to 128 2) the driver's interface was disabled via "ifconfig oob_net0 down" during the window described in #1. 3) the driver's interface is re-enabled via "ifconfig oob_net0 up" This patch ensures that the driver's "valid_polarity" field is cleared during the open() method so that it always matches the receive polarity used by hardware. Without this fix, the driver needs to be unloaded and reloaded to correct this problem state. Fixes: f92e1869d74e ("Add Mellanox BlueField Gigabit Ethernet driver") Reviewed-by: Asmaa Mnebhi Signed-off-by: David Thompson Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c index 3e85b17f5857..6704f5c1aa32 100644 --- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c @@ -142,6 +142,13 @@ static int mlxbf_gige_open(struct net_device *netdev) err = mlxbf_gige_clean_port(priv); if (err) goto free_irqs; + + /* Clear driver's valid_polarity to match hardware, + * since the above call to clean_port() resets the + * receive polarity used by hardware. + */ + priv->valid_polarity = 0; + err = mlxbf_gige_rx_init(priv); if (err) goto free_irqs; -- cgit v1.2.3-59-g8ed1b