aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c28
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c48
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c10
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c49
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c114
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c6
-rw-r--r--drivers/net/ethernet/freescale/fman/Kconfig28
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c18
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c47
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c3
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.h1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c3
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c5
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c24
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h6
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c14
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c28
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c8
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c186
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h3
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c7
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c8
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h1
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c32
-rw-r--r--drivers/net/ethernet/sfc/efx.h1
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c1
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h3
-rw-r--r--drivers/net/ethernet/sfc/ptp.c38
-rw-r--r--drivers/net/ethernet/sfc/tx.c38
-rw-r--r--drivers/net/ethernet/sfc/tx_common.c29
-rw-r--r--drivers/net/ethernet/sfc/tx_common.h6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c13
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac.h4
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c209
54 files changed, 762 insertions, 324 deletions
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index e0611cba87f9..15b31cddc054 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2135,7 +2135,7 @@ static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv,
return -ENOSPC;
index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX);
- if (index > RXCHK_BRCM_TAG_MAX)
+ if (index >= RXCHK_BRCM_TAG_MAX)
return -ENOSPC;
/* Location is the classification ID, and index is the position
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index fd6e0e48cd51..c5c8effc0139 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -10982,13 +10982,13 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
struct bnxt *bp = netdev_priv(dev);
if (netif_running(dev))
- bnxt_close_nic(bp, false, false);
+ bnxt_close_nic(bp, true, false);
dev->mtu = new_mtu;
bnxt_set_ring_params(bp);
if (netif_running(dev))
- return bnxt_open_nic(bp, false, false);
+ return bnxt_open_nic(bp, true, false);
return 0;
}
@@ -11252,7 +11252,7 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
}
}
if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
- netdev_info(bp->dev, "Receive PF driver unload event!");
+ netdev_info(bp->dev, "Receive PF driver unload event!\n");
}
#else
@@ -11759,7 +11759,7 @@ static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
u32 dw;
if (!pos) {
- netdev_info(bp->dev, "Unable do read adapter's DSN");
+ netdev_info(bp->dev, "Unable do read adapter's DSN\n");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index eec0168330b7..d3c93ccee86a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -641,14 +641,14 @@ static int bnxt_dl_params_register(struct bnxt *bp)
rc = devlink_params_register(bp->dl, bnxt_dl_params,
ARRAY_SIZE(bnxt_dl_params));
if (rc) {
- netdev_warn(bp->dev, "devlink_params_register failed. rc=%d",
+ netdev_warn(bp->dev, "devlink_params_register failed. rc=%d\n",
rc);
return rc;
}
rc = devlink_port_params_register(&bp->dl_port, bnxt_dl_port_params,
ARRAY_SIZE(bnxt_dl_port_params));
if (rc) {
- netdev_err(bp->dev, "devlink_port_params_register failed");
+ netdev_err(bp->dev, "devlink_port_params_register failed\n");
devlink_params_unregister(bp->dl, bnxt_dl_params,
ARRAY_SIZE(bnxt_dl_params));
return rc;
@@ -679,7 +679,7 @@ int bnxt_dl_register(struct bnxt *bp)
else
dl = devlink_alloc(&bnxt_vf_dl_ops, sizeof(struct bnxt_dl));
if (!dl) {
- netdev_warn(bp->dev, "devlink_alloc failed");
+ netdev_warn(bp->dev, "devlink_alloc failed\n");
return -ENOMEM;
}
@@ -692,7 +692,7 @@ int bnxt_dl_register(struct bnxt *bp)
rc = devlink_register(dl, &bp->pdev->dev);
if (rc) {
- netdev_warn(bp->dev, "devlink_register failed. rc=%d", rc);
+ netdev_warn(bp->dev, "devlink_register failed. rc=%d\n", rc);
goto err_dl_free;
}
@@ -704,7 +704,7 @@ int bnxt_dl_register(struct bnxt *bp)
sizeof(bp->dsn));
rc = devlink_port_register(dl, &bp->dl_port, bp->pf.port_id);
if (rc) {
- netdev_err(bp->dev, "devlink_port_register failed");
+ netdev_err(bp->dev, "devlink_port_register failed\n");
goto err_dl_unreg;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 6171fa8b3677..1f67e6729a2c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -2007,8 +2007,8 @@ int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
struct hwrm_nvm_install_update_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_nvm_install_update_input install = {0};
const struct firmware *fw;
- int rc, hwrm_err = 0;
u32 item_len;
+ int rc = 0;
u16 index;
bnxt_hwrm_fw_set_time(bp);
@@ -2028,7 +2028,7 @@ int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
}
if (fw->size > item_len) {
- netdev_err(dev, "PKG insufficient update area in nvram: %lu",
+ netdev_err(dev, "PKG insufficient update area in nvram: %lu\n",
(unsigned long)fw->size);
rc = -EFBIG;
} else {
@@ -2052,15 +2052,14 @@ int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
memcpy(kmem, fw->data, fw->size);
modify.host_src_addr = cpu_to_le64(dma_handle);
- hwrm_err = hwrm_send_message(bp, &modify,
- sizeof(modify),
- FLASH_PACKAGE_TIMEOUT);
+ rc = hwrm_send_message(bp, &modify, sizeof(modify),
+ FLASH_PACKAGE_TIMEOUT);
dma_free_coherent(&bp->pdev->dev, fw->size, kmem,
dma_handle);
}
}
release_firmware(fw);
- if (rc || hwrm_err)
+ if (rc)
goto err_exit;
if ((install_type & 0xffff) == 0)
@@ -2069,20 +2068,19 @@ int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
install.install_type = cpu_to_le32(install_type);
mutex_lock(&bp->hwrm_cmd_lock);
- hwrm_err = _hwrm_send_message(bp, &install, sizeof(install),
- INSTALL_PACKAGE_TIMEOUT);
- if (hwrm_err) {
+ rc = _hwrm_send_message(bp, &install, sizeof(install),
+ INSTALL_PACKAGE_TIMEOUT);
+ if (rc) {
u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err;
if (resp->error_code && error_code ==
NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
install.flags |= cpu_to_le16(
NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
- hwrm_err = _hwrm_send_message(bp, &install,
- sizeof(install),
- INSTALL_PACKAGE_TIMEOUT);
+ rc = _hwrm_send_message(bp, &install, sizeof(install),
+ INSTALL_PACKAGE_TIMEOUT);
}
- if (hwrm_err)
+ if (rc)
goto flash_pkg_exit;
}
@@ -2094,7 +2092,7 @@ int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
flash_pkg_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
err_exit:
- if (hwrm_err == -EACCES)
+ if (rc == -EACCES)
bnxt_print_admin_err(bp);
return rc;
}
@@ -3338,7 +3336,7 @@ err:
kfree(coredump.data);
*dump_len += sizeof(struct bnxt_coredump_record);
if (rc == -ENOBUFS)
- netdev_err(bp->dev, "Firmware returned large coredump buffer");
+ netdev_err(bp->dev, "Firmware returned large coredump buffer\n");
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 0cc6ec51f45f..9bec256b0934 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -50,7 +50,7 @@ static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev)
/* check if dev belongs to the same switch */
if (!netdev_port_same_parent_id(pf_bp->dev, dev)) {
- netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch",
+ netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch\n",
dev->ifindex);
return BNXT_FID_INVALID;
}
@@ -70,7 +70,7 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,
struct net_device *dev = act->dev;
if (!dev) {
- netdev_info(bp->dev, "no dev in mirred action");
+ netdev_info(bp->dev, "no dev in mirred action\n");
return -EINVAL;
}
@@ -106,7 +106,7 @@ static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,
const struct ip_tunnel_key *tun_key = &tun_info->key;
if (ip_tunnel_info_af(tun_info) != AF_INET) {
- netdev_info(bp->dev, "only IPv4 tunnel-encap is supported");
+ netdev_info(bp->dev, "only IPv4 tunnel-encap is supported\n");
return -EOPNOTSUPP;
}
@@ -295,7 +295,7 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
int i, rc;
if (!flow_action_has_entries(flow_action)) {
- netdev_info(bp->dev, "no actions");
+ netdev_info(bp->dev, "no actions\n");
return -EINVAL;
}
@@ -370,7 +370,7 @@ static int bnxt_tc_parse_flow(struct bnxt *bp,
/* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 ||
(dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) {
- netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x",
+ netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x\n",
dissector->used_keys);
return -EOPNOTSUPP;
}
@@ -508,7 +508,7 @@ static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp,
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
- netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
+ netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
return rc;
}
@@ -841,7 +841,7 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
resp = bnxt_get_hwrm_resp_addr(bp, &req);
*decap_filter_handle = resp->decap_filter_id;
} else {
- netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
+ netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
}
mutex_unlock(&bp->hwrm_cmd_lock);
@@ -859,7 +859,7 @@ static int hwrm_cfa_decap_filter_free(struct bnxt *bp,
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
- netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
+ netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
return rc;
}
@@ -906,7 +906,7 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
resp = bnxt_get_hwrm_resp_addr(bp, &req);
*encap_record_handle = resp->encap_record_id;
} else {
- netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
+ netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
}
mutex_unlock(&bp->hwrm_cmd_lock);
@@ -924,7 +924,7 @@ static int hwrm_cfa_encap_record_free(struct bnxt *bp,
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
- netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
+ netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
return rc;
}
@@ -943,7 +943,7 @@ static int bnxt_tc_put_l2_node(struct bnxt *bp,
tc_info->l2_ht_params);
if (rc)
netdev_err(bp->dev,
- "Error: %s: rhashtable_remove_fast: %d",
+ "Error: %s: rhashtable_remove_fast: %d\n",
__func__, rc);
kfree_rcu(l2_node, rcu);
}
@@ -972,7 +972,7 @@ bnxt_tc_get_l2_node(struct bnxt *bp, struct rhashtable *l2_table,
if (rc) {
kfree_rcu(l2_node, rcu);
netdev_err(bp->dev,
- "Error: %s: rhashtable_insert_fast: %d",
+ "Error: %s: rhashtable_insert_fast: %d\n",
__func__, rc);
return NULL;
}
@@ -1031,7 +1031,7 @@ static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow)
if ((flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) &&
(flow->l4_key.ip_proto != IPPROTO_TCP &&
flow->l4_key.ip_proto != IPPROTO_UDP)) {
- netdev_info(bp->dev, "Cannot offload non-TCP/UDP (%d) ports",
+ netdev_info(bp->dev, "Cannot offload non-TCP/UDP (%d) ports\n",
flow->l4_key.ip_proto);
return false;
}
@@ -1088,7 +1088,7 @@ static int bnxt_tc_put_tunnel_node(struct bnxt *bp,
rc = rhashtable_remove_fast(tunnel_table, &tunnel_node->node,
*ht_params);
if (rc) {
- netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc);
+ netdev_err(bp->dev, "rhashtable_remove_fast rc=%d\n", rc);
rc = -1;
}
kfree_rcu(tunnel_node, rcu);
@@ -1129,7 +1129,7 @@ bnxt_tc_get_tunnel_node(struct bnxt *bp, struct rhashtable *tunnel_table,
tunnel_node->refcount++;
return tunnel_node;
err:
- netdev_info(bp->dev, "error rc=%d", rc);
+ netdev_info(bp->dev, "error rc=%d\n", rc);
return NULL;
}
@@ -1187,7 +1187,7 @@ static void bnxt_tc_put_decap_l2_node(struct bnxt *bp,
&decap_l2_node->node,
tc_info->decap_l2_ht_params);
if (rc)
- netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc);
+ netdev_err(bp->dev, "rhashtable_remove_fast rc=%d\n", rc);
kfree_rcu(decap_l2_node, rcu);
}
}
@@ -1227,7 +1227,7 @@ static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp,
rt = ip_route_output_key(dev_net(real_dst_dev), &flow);
if (IS_ERR(rt)) {
- netdev_info(bp->dev, "no route to %pI4b", &flow.daddr);
+ netdev_info(bp->dev, "no route to %pI4b\n", &flow.daddr);
return -EOPNOTSUPP;
}
@@ -1241,7 +1241,7 @@ static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp,
if (vlan->real_dev != real_dst_dev) {
netdev_info(bp->dev,
- "dst_dev(%s) doesn't use PF-if(%s)",
+ "dst_dev(%s) doesn't use PF-if(%s)\n",
netdev_name(dst_dev),
netdev_name(real_dst_dev));
rc = -EOPNOTSUPP;
@@ -1253,7 +1253,7 @@ static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp,
#endif
} else if (dst_dev != real_dst_dev) {
netdev_info(bp->dev,
- "dst_dev(%s) for %pI4b is not PF-if(%s)",
+ "dst_dev(%s) for %pI4b is not PF-if(%s)\n",
netdev_name(dst_dev), &flow.daddr,
netdev_name(real_dst_dev));
rc = -EOPNOTSUPP;
@@ -1262,7 +1262,7 @@ static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp,
nbr = dst_neigh_lookup(&rt->dst, &flow.daddr);
if (!nbr) {
- netdev_info(bp->dev, "can't lookup neighbor for %pI4b",
+ netdev_info(bp->dev, "can't lookup neighbor for %pI4b\n",
&flow.daddr);
rc = -EOPNOTSUPP;
goto put_rt;
@@ -1472,7 +1472,7 @@ static int __bnxt_tc_del_flow(struct bnxt *bp,
rc = rhashtable_remove_fast(&tc_info->flow_table, &flow_node->node,
tc_info->flow_ht_params);
if (rc)
- netdev_err(bp->dev, "Error: %s: rhashtable_remove_fast rc=%d",
+ netdev_err(bp->dev, "Error: %s: rhashtable_remove_fast rc=%d\n",
__func__, rc);
kfree_rcu(flow_node, rcu);
@@ -1587,7 +1587,7 @@ unlock:
free_node:
kfree_rcu(new_node, rcu);
done:
- netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d",
+ netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d\n",
__func__, tc_flow_cmd->cookie, rc);
return rc;
}
@@ -1700,7 +1700,7 @@ bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
le64_to_cpu(resp_bytes[i]);
}
} else {
- netdev_info(bp->dev, "error rc=%d", rc);
+ netdev_info(bp->dev, "error rc=%d\n", rc);
}
mutex_unlock(&bp->hwrm_cmd_lock);
@@ -1970,7 +1970,7 @@ static int bnxt_tc_indr_block_event(struct notifier_block *nb,
bp);
if (rc)
netdev_info(bp->dev,
- "Failed to register indirect blk: dev: %s",
+ "Failed to register indirect blk: dev: %s\n",
netdev->name);
break;
case NETDEV_UNREGISTER:
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index b010b34cdaf8..6f2faf81c1ae 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -43,7 +43,7 @@ static int hwrm_cfa_vfr_alloc(struct bnxt *bp, u16 vf_idx,
netdev_dbg(bp->dev, "tx_cfa_action=0x%x, rx_cfa_code=0x%x",
*tx_cfa_action, *rx_cfa_code);
} else {
- netdev_info(bp->dev, "%s error rc=%d", __func__, rc);
+ netdev_info(bp->dev, "%s error rc=%d\n", __func__, rc);
}
mutex_unlock(&bp->hwrm_cmd_lock);
@@ -60,7 +60,7 @@ static int hwrm_cfa_vfr_free(struct bnxt *bp, u16 vf_idx)
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
- netdev_info(bp->dev, "%s error rc=%d", __func__, rc);
+ netdev_info(bp->dev, "%s error rc=%d\n", __func__, rc);
return rc;
}
@@ -465,7 +465,7 @@ static int bnxt_vf_reps_create(struct bnxt *bp)
return 0;
err:
- netdev_info(bp->dev, "%s error=%d", __func__, rc);
+ netdev_info(bp->dev, "%s error=%d\n", __func__, rc);
kfree(cfa_code_map);
__bnxt_vf_reps_destroy(bp);
return rc;
@@ -488,7 +488,7 @@ int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode,
mutex_lock(&bp->sriov_lock);
if (bp->eswitch_mode == mode) {
- netdev_info(bp->dev, "already in %s eswitch mode",
+ netdev_info(bp->dev, "already in %s eswitch mode\n",
mode == DEVLINK_ESWITCH_MODE_LEGACY ?
"legacy" : "switchdev");
rc = -EINVAL;
@@ -508,7 +508,7 @@ int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode,
}
if (pci_num_vf(bp->pdev) == 0) {
- netdev_info(bp->dev, "Enable VFs before setting switchdev mode");
+ netdev_info(bp->dev, "Enable VFs before setting switchdev mode\n");
rc = -EPERM;
goto done;
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 6392a2530183..10244941a7a6 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -294,6 +294,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
*/
if (priv->ext_phy) {
reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
+ reg &= ~ID_MODE_DIS;
reg |= id_mode_dis;
if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
reg |= RGMII_MODE_EN_V123;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 649842a8aa28..97f90edbc068 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -5381,12 +5381,11 @@ static inline bool is_x_10g_port(const struct link_config *lc)
static int cfg_queues(struct adapter *adap)
{
u32 avail_qsets, avail_eth_qsets, avail_uld_qsets;
+ u32 i, n10g = 0, qidx = 0, n1g = 0;
+ u32 ncpus = num_online_cpus();
u32 niqflint, neq, num_ulds;
struct sge *s = &adap->sge;
- u32 i, n10g = 0, qidx = 0;
-#ifndef CONFIG_CHELSIO_T4_DCB
- int q10g = 0;
-#endif
+ u32 q10g = 0, q1g;
/* Reduce memory usage in kdump environment, disable all offload. */
if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) {
@@ -5424,44 +5423,50 @@ static int cfg_queues(struct adapter *adap)
n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
avail_eth_qsets = min_t(u32, avail_qsets, MAX_ETH_QSETS);
+
+ /* We default to 1 queue per non-10G port and up to # of cores queues
+ * per 10G port.
+ */
+ if (n10g)
+ q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
+
+ n1g = adap->params.nports - n10g;
#ifdef CONFIG_CHELSIO_T4_DCB
/* For Data Center Bridging support we need to be able to support up
* to 8 Traffic Priorities; each of which will be assigned to its
* own TX Queue in order to prevent Head-Of-Line Blocking.
*/
+ q1g = 8;
if (adap->params.nports * 8 > avail_eth_qsets) {
dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n",
avail_eth_qsets, adap->params.nports * 8);
return -ENOMEM;
}
- for_each_port(adap, i) {
- struct port_info *pi = adap2pinfo(adap, i);
+ if (adap->params.nports * ncpus < avail_eth_qsets)
+ q10g = max(8U, ncpus);
+ else
+ q10g = max(8U, q10g);
- pi->first_qset = qidx;
- pi->nqsets = is_kdump_kernel() ? 1 : 8;
- qidx += pi->nqsets;
- }
-#else /* !CONFIG_CHELSIO_T4_DCB */
- /* We default to 1 queue per non-10G port and up to # of cores queues
- * per 10G port.
- */
- if (n10g)
- q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
- if (q10g > netif_get_num_default_rss_queues())
- q10g = netif_get_num_default_rss_queues();
+ while ((q10g * n10g) > (avail_eth_qsets - n1g * q1g))
+ q10g--;
- if (is_kdump_kernel())
+#else /* !CONFIG_CHELSIO_T4_DCB */
+ q1g = 1;
+ q10g = min(q10g, ncpus);
+#endif /* !CONFIG_CHELSIO_T4_DCB */
+ if (is_kdump_kernel()) {
q10g = 1;
+ q1g = 1;
+ }
for_each_port(adap, i) {
struct port_info *pi = adap2pinfo(adap, i);
pi->first_qset = qidx;
- pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
+ pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : q1g;
qidx += pi->nqsets;
}
-#endif /* !CONFIG_CHELSIO_T4_DCB */
s->ethqsets = qidx;
s->max_ethqsets = qidx; /* MSI-X may lower it later */
@@ -5473,7 +5478,7 @@ static int cfg_queues(struct adapter *adap)
* capped by the number of available cores.
*/
num_ulds = adap->num_uld + adap->num_ofld_uld;
- i = min_t(u32, MAX_OFLD_QSETS, num_online_cpus());
+ i = min_t(u32, MAX_OFLD_QSETS, ncpus);
avail_uld_qsets = roundup(i, adap->params.nports);
if (avail_qsets < num_ulds * adap->params.nports) {
adap->params.offload = 0;
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index fd93d542f497..ca74a684a904 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -1,4 +1,5 @@
/* Copyright 2008 - 2016 Freescale Semiconductor Inc.
+ * Copyright 2020 NXP
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -123,7 +124,22 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
#define FSL_QMAN_MAX_OAL 127
/* Default alignment for start of data in an Rx FD */
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+/* aligning data start to 64 avoids DMA transaction splits, unless the buffer
+ * is crossing a 4k page boundary
+ */
+#define DPAA_FD_DATA_ALIGNMENT (fman_has_errata_a050385() ? 64 : 16)
+/* aligning to 256 avoids DMA transaction splits caused by 4k page boundary
+ * crossings; also, all SG fragments except the last must have a size multiple
+ * of 256 to avoid DMA transaction splits
+ */
+#define DPAA_A050385_ALIGN 256
+#define DPAA_FD_RX_DATA_ALIGNMENT (fman_has_errata_a050385() ? \
+ DPAA_A050385_ALIGN : 16)
+#else
#define DPAA_FD_DATA_ALIGNMENT 16
+#define DPAA_FD_RX_DATA_ALIGNMENT DPAA_FD_DATA_ALIGNMENT
+#endif
/* The DPAA requires 256 bytes reserved and mapped for the SGT */
#define DPAA_SGT_SIZE 256
@@ -158,8 +174,13 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
#define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result)
#define DPAA_TIME_STAMP_SIZE 8
#define DPAA_HASH_RESULTS_SIZE 8
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+#define DPAA_RX_PRIV_DATA_SIZE (DPAA_A050385_ALIGN - (DPAA_PARSE_RESULTS_SIZE\
+ + DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE))
+#else
#define DPAA_RX_PRIV_DATA_SIZE (u16)(DPAA_TX_PRIV_DATA_SIZE + \
dpaa_rx_extra_headroom)
+#endif
#define DPAA_ETH_PCD_RXQ_NUM 128
@@ -180,7 +201,12 @@ static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS];
#define DPAA_BP_RAW_SIZE 4096
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+#define dpaa_bp_size(raw_size) (SKB_WITH_OVERHEAD(raw_size) & \
+ ~(DPAA_A050385_ALIGN - 1))
+#else
#define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD(raw_size)
+#endif
static int dpaa_max_frm;
@@ -1192,7 +1218,7 @@ static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp *bp,
buf_prefix_content.pass_prs_result = true;
buf_prefix_content.pass_hash_result = true;
buf_prefix_content.pass_time_stamp = true;
- buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
+ buf_prefix_content.data_align = DPAA_FD_RX_DATA_ALIGNMENT;
rx_p = &params.specific_params.rx_params;
rx_p->err_fqid = errq->fqid;
@@ -1662,6 +1688,8 @@ static u8 rx_csum_offload(const struct dpaa_priv *priv, const struct qm_fd *fd)
return CHECKSUM_NONE;
}
+#define PTR_IS_ALIGNED(x, a) (IS_ALIGNED((unsigned long)(x), (a)))
+
/* Build a linear skb around the received buffer.
* We are guaranteed there is enough room at the end of the data buffer to
* accommodate the shared info area of the skb.
@@ -1733,8 +1761,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
sg_addr = qm_sg_addr(&sgt[i]);
sg_vaddr = phys_to_virt(sg_addr);
- WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
- SMP_CACHE_BYTES));
+ WARN_ON(!PTR_IS_ALIGNED(sg_vaddr, SMP_CACHE_BYTES));
dma_unmap_page(priv->rx_dma_dev, sg_addr,
DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
@@ -2022,6 +2049,75 @@ static inline int dpaa_xmit(struct dpaa_priv *priv,
return 0;
}
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+int dpaa_a050385_wa(struct net_device *net_dev, struct sk_buff **s)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct sk_buff *new_skb, *skb = *s;
+ unsigned char *start, i;
+
+ /* check linear buffer alignment */
+ if (!PTR_IS_ALIGNED(skb->data, DPAA_A050385_ALIGN))
+ goto workaround;
+
+ /* linear buffers just need to have an aligned start */
+ if (!skb_is_nonlinear(skb))
+ return 0;
+
+ /* linear data size for nonlinear skbs needs to be aligned */
+ if (!IS_ALIGNED(skb_headlen(skb), DPAA_A050385_ALIGN))
+ goto workaround;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ /* all fragments need to have aligned start addresses */
+ if (!IS_ALIGNED(skb_frag_off(frag), DPAA_A050385_ALIGN))
+ goto workaround;
+
+ /* all but last fragment need to have aligned sizes */
+ if (!IS_ALIGNED(skb_frag_size(frag), DPAA_A050385_ALIGN) &&
+ (i < skb_shinfo(skb)->nr_frags - 1))
+ goto workaround;
+ }
+
+ return 0;
+
+workaround:
+ /* copy all the skb content into a new linear buffer */
+ new_skb = netdev_alloc_skb(net_dev, skb->len + DPAA_A050385_ALIGN - 1 +
+ priv->tx_headroom);
+ if (!new_skb)
+ return -ENOMEM;
+
+ /* NET_SKB_PAD bytes already reserved, adding up to tx_headroom */
+ skb_reserve(new_skb, priv->tx_headroom - NET_SKB_PAD);
+
+ /* Workaround for DPAA_A050385 requires data start to be aligned */
+ start = PTR_ALIGN(new_skb->data, DPAA_A050385_ALIGN);
+ if (start - new_skb->data != 0)
+ skb_reserve(new_skb, start - new_skb->data);
+
+ skb_put(new_skb, skb->len);
+ skb_copy_bits(skb, 0, new_skb->data, skb->len);
+ skb_copy_header(new_skb, skb);
+ new_skb->dev = skb->dev;
+
+ /* We move the headroom when we align it so we have to reset the
+ * network and transport header offsets relative to the new data
+ * pointer. The checksum offload relies on these offsets.
+ */
+ skb_set_network_header(new_skb, skb_network_offset(skb));
+ skb_set_transport_header(new_skb, skb_transport_offset(skb));
+
+ /* TODO: does timestamping need the result in the old skb? */
+ dev_kfree_skb(skb);
+ *s = new_skb;
+
+ return 0;
+}
+#endif
+
static netdev_tx_t
dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
{
@@ -2068,6 +2164,14 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
nonlinear = skb_is_nonlinear(skb);
}
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+ if (unlikely(fman_has_errata_a050385())) {
+ if (dpaa_a050385_wa(net_dev, &skb))
+ goto enomem;
+ nonlinear = skb_is_nonlinear(skb);
+ }
+#endif
+
if (nonlinear) {
/* Just create a S/G fd based on the skb */
err = skb_to_sg_fd(priv, skb, &fd);
@@ -2741,9 +2845,7 @@ static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
headroom = (u16)(bl->priv_data_size + DPAA_PARSE_RESULTS_SIZE +
DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE);
- return DPAA_FD_DATA_ALIGNMENT ? ALIGN(headroom,
- DPAA_FD_DATA_ALIGNMENT) :
- headroom;
+ return ALIGN(headroom, DPAA_FD_DATA_ALIGNMENT);
}
static int dpaa_eth_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 4432a59904c7..23c5fef2f1ad 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2529,15 +2529,15 @@ fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
return -EINVAL;
}
- cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
+ cycle = fec_enet_us_to_itr_clock(ndev, ec->rx_coalesce_usecs);
if (cycle > 0xFFFF) {
dev_err(dev, "Rx coalesced usec exceed hardware limitation\n");
return -EINVAL;
}
- cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
+ cycle = fec_enet_us_to_itr_clock(ndev, ec->tx_coalesce_usecs);
if (cycle > 0xFFFF) {
- dev_err(dev, "Rx coalesced usec exceed hardware limitation\n");
+ dev_err(dev, "Tx coalesced usec exceed hardware limitation\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/freescale/fman/Kconfig b/drivers/net/ethernet/freescale/fman/Kconfig
index 0139cb9042ec..34150182cc35 100644
--- a/drivers/net/ethernet/freescale/fman/Kconfig
+++ b/drivers/net/ethernet/freescale/fman/Kconfig
@@ -8,3 +8,31 @@ config FSL_FMAN
help
Freescale Data-Path Acceleration Architecture Frame Manager
(FMan) support
+
+config DPAA_ERRATUM_A050385
+ bool
+ depends on ARM64 && FSL_DPAA
+ default y
+ help
+ DPAA FMan erratum A050385 software workaround implementation:
+ align buffers, data start, SG fragment length to avoid FMan DMA
+ splits.
+ FMAN DMA read or writes under heavy traffic load may cause FMAN
+ internal resource leak thus stopping further packet processing.
+ The FMAN internal queue can overflow when FMAN splits single
+ read or write transactions into multiple smaller transactions
+ such that more than 17 AXI transactions are in flight from FMAN
+ to interconnect. When the FMAN internal queue overflows, it can
+ stall further packet processing. The issue can occur with any
+ one of the following three conditions:
+ 1. FMAN AXI transaction crosses 4K address boundary (Errata
+ A010022)
+ 2. FMAN DMA address for an AXI transaction is not 16 byte
+ aligned, i.e. the last 4 bits of an address are non-zero
+ 3. Scatter Gather (SG) frames have more than one SG buffer in
+ the SG list and any one of the buffers, except the last
+ buffer in the SG list has data size that is not a multiple
+ of 16 bytes, i.e., other than 16, 32, 48, 64, etc.
+ With any one of the above three conditions present, there is
+ likelihood of stalled FMAN packet processing, especially under
+ stress with multiple ports injecting line-rate traffic.
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index 934111def0be..f151d6e111dd 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -1,5 +1,6 @@
/*
* Copyright 2008-2015 Freescale Semiconductor Inc.
+ * Copyright 2020 NXP
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -566,6 +567,10 @@ struct fman_cfg {
u32 qmi_def_tnums_thresh;
};
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+static bool fman_has_err_a050385;
+#endif
+
static irqreturn_t fman_exceptions(struct fman *fman,
enum fman_exceptions exception)
{
@@ -2518,6 +2523,14 @@ struct fman *fman_bind(struct device *fm_dev)
}
EXPORT_SYMBOL(fman_bind);
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+bool fman_has_errata_a050385(void)
+{
+ return fman_has_err_a050385;
+}
+EXPORT_SYMBOL(fman_has_errata_a050385);
+#endif
+
static irqreturn_t fman_err_irq(int irq, void *handle)
{
struct fman *fman = (struct fman *)handle;
@@ -2845,6 +2858,11 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
goto fman_free;
}
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+ fman_has_err_a050385 =
+ of_property_read_bool(fm_node, "fsl,erratum-a050385");
+#endif
+
return fman;
fman_node_put:
diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h
index 935c317fa696..f2ede1360f03 100644
--- a/drivers/net/ethernet/freescale/fman/fman.h
+++ b/drivers/net/ethernet/freescale/fman/fman.h
@@ -1,5 +1,6 @@
/*
* Copyright 2008-2015 Freescale Semiconductor Inc.
+ * Copyright 2020 NXP
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -398,6 +399,10 @@ u16 fman_get_max_frm(void);
int fman_get_rx_extra_headroom(void);
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+bool fman_has_errata_a050385(void);
+#endif
+
struct fman *fman_bind(struct device *dev);
#endif /* __FM_H */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index 1b0313900f98..d87158acdf6f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -46,6 +46,7 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_PUSH_VLAN_INFO, /* (PF -> VF) push port base vlan */
HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */
HCLGE_MBX_PUSH_PROMISC_INFO, /* (PF -> VF) push vf promisc info */
+ HCLGE_MBX_VF_UNINIT, /* (VF -> PF) vf is unintializing */
HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */
HCLGE_MBX_PUSH_LINK_STATUS, /* (M7 -> PF) get port link status */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index acb796cc10d0..a7f40aa1a0ea 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -1711,7 +1711,7 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc);
return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
- kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
+ kinfo->dcb_ops->setup_tc(h, tc ? tc : 1, prio_tc) : -EOPNOTSUPP;
}
static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 492bc9446463..d3b0cd74ecd2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -2446,10 +2446,12 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
{
+ struct hclge_mac *mac = &hdev->hw.mac;
int ret;
duplex = hclge_check_speed_dup(duplex, speed);
- if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
+ if (!mac->support_autoneg && mac->speed == speed &&
+ mac->duplex == duplex)
return 0;
ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
@@ -7743,16 +7745,27 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
struct hclge_desc desc;
int ret;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
-
+ /* read current vlan filter parameter */
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
req->vlan_type = vlan_type;
- req->vlan_fe = filter_en ? fe_type : 0;
req->vf_id = vf_id;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get vlan filter config, ret = %d.\n", ret);
+ return ret;
+ }
+
+ /* modify and write new config parameter */
+ hclge_cmd_reuse_desc(&desc, false);
+ req->vlan_fe = filter_en ?
+ (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
- dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
+ dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
ret);
return ret;
@@ -8270,6 +8283,7 @@ void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
kfree(vlan);
}
}
+ clear_bit(vport->vport_id, hdev->vf_vlan_full);
}
void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
@@ -8486,6 +8500,28 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
}
}
+static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
+{
+ struct hclge_vlan_info *vlan_info;
+ struct hclge_vport *vport;
+ int ret;
+ int vf;
+
+ /* clear port base vlan for all vf */
+ for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
+ vport = &hdev->vport[vf];
+ vlan_info = &vport->port_base_vlan_cfg.vlan_info;
+
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+ vport->vport_id,
+ vlan_info->vlan_tag, true);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to clear vf vlan for vf%d, ret = %d\n",
+ vf - HCLGE_VF_VPORT_START_NUM, ret);
+ }
+}
+
int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
u16 vlan_id, bool is_kill)
{
@@ -9895,6 +9931,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
struct hclge_mac *mac = &hdev->hw.mac;
hclge_reset_vf_rate(hdev);
+ hclge_clear_vf_vlan(hdev);
hclge_misc_affinity_teardown(hdev);
hclge_state_uninit(hdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index a3c0822191a9..3d850f6b1e37 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -799,6 +799,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
hclge_get_link_mode(vport, req);
break;
case HCLGE_MBX_GET_VF_FLR_STATUS:
+ case HCLGE_MBX_VF_UNINIT:
hclge_rm_vport_all_mac_table(vport, true,
HCLGE_MAC_ADDR_UC);
hclge_rm_vport_all_mac_table(vport, true,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index d6597206e692..0510d85a7f6a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -2803,6 +2803,9 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
{
hclgevf_state_uninit(hdev);
+ hclgevf_send_mbx_msg(hdev, HCLGE_MBX_VF_UNINIT, 0, NULL, 0,
+ false, NULL, 0);
+
if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
hclgevf_misc_irq_uninit(hdev);
hclgevf_uninit_msi(hdev);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
index 6f2cf569a283..79b3d53f2fbf 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
@@ -297,6 +297,7 @@ static int set_hw_ioctxt(struct hinic_hwdev *hwdev, unsigned int rq_depth,
}
hw_ioctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
+ hw_ioctxt.ppf_idx = HINIC_HWIF_PPF_IDX(hwif);
hw_ioctxt.set_cmdq_depth = HW_IOCTXT_SET_CMDQ_DEPTH_DEFAULT;
hw_ioctxt.cmdq_depth = 0;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
index b069045de416..66fd2340d447 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
@@ -151,8 +151,8 @@ struct hinic_cmd_hw_ioctxt {
u8 lro_en;
u8 rsvd3;
+ u8 ppf_idx;
u8 rsvd4;
- u8 rsvd5;
u16 rq_depth;
u16 rx_buf_sz_idx;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
index 517794509eb2..c7bb9ceca72c 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
@@ -137,6 +137,7 @@
#define HINIC_HWIF_FUNC_IDX(hwif) ((hwif)->attr.func_idx)
#define HINIC_HWIF_PCI_INTF(hwif) ((hwif)->attr.pci_intf_idx)
#define HINIC_HWIF_PF_IDX(hwif) ((hwif)->attr.pf_idx)
+#define HINIC_HWIF_PPF_IDX(hwif) ((hwif)->attr.ppf_idx)
#define HINIC_FUNC_TYPE(hwif) ((hwif)->attr.func_type)
#define HINIC_IS_PF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_PF)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
index f4a339b10b10..79091e131418 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
@@ -94,6 +94,7 @@ struct hinic_rq {
struct hinic_wq *wq;
+ struct cpumask affinity_mask;
u32 irq;
u16 msix_entry;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 02a14f5e7fe3..13560975c103 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -356,7 +356,8 @@ static void hinic_enable_rss(struct hinic_dev *nic_dev)
if (!num_cpus)
num_cpus = num_online_cpus();
- nic_dev->num_qps = min_t(u16, nic_dev->max_qps, num_cpus);
+ nic_dev->num_qps = hinic_hwdev_num_qps(hwdev);
+ nic_dev->num_qps = min_t(u16, nic_dev->num_qps, num_cpus);
nic_dev->rss_limit = nic_dev->num_qps;
nic_dev->num_rss = nic_dev->num_qps;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index 56ea6d692f1c..2695ad69fca6 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -475,7 +475,6 @@ static int rx_request_irq(struct hinic_rxq *rxq)
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_rq *rq = rxq->rq;
struct hinic_qp *qp;
- struct cpumask mask;
int err;
rx_add_napi(rxq);
@@ -492,8 +491,8 @@ static int rx_request_irq(struct hinic_rxq *rxq)
}
qp = container_of(rq, struct hinic_qp, rq);
- cpumask_set_cpu(qp->q_id % num_online_cpus(), &mask);
- return irq_set_affinity_hint(rq->irq, &mask);
+ cpumask_set_cpu(qp->q_id % num_online_cpus(), &rq->affinity_mask);
+ return irq_set_affinity_hint(rq->irq, &rq->affinity_mask);
}
static void rx_free_irq(struct hinic_rxq *rxq)
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index c75239d8820f..4bd33245bad6 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2142,6 +2142,8 @@ static void __ibmvnic_reset(struct work_struct *work)
{
struct ibmvnic_rwi *rwi;
struct ibmvnic_adapter *adapter;
+ bool saved_state = false;
+ unsigned long flags;
u32 reset_state;
int rc = 0;
@@ -2153,17 +2155,25 @@ static void __ibmvnic_reset(struct work_struct *work)
return;
}
- reset_state = adapter->state;
-
rwi = get_next_rwi(adapter);
while (rwi) {
+ spin_lock_irqsave(&adapter->state_lock, flags);
+
if (adapter->state == VNIC_REMOVING ||
adapter->state == VNIC_REMOVED) {
+ spin_unlock_irqrestore(&adapter->state_lock, flags);
kfree(rwi);
rc = EBUSY;
break;
}
+ if (!saved_state) {
+ reset_state = adapter->state;
+ adapter->state = VNIC_RESETTING;
+ saved_state = true;
+ }
+ spin_unlock_irqrestore(&adapter->state_lock, flags);
+
if (rwi->reset_reason == VNIC_RESET_CHANGE_PARAM) {
/* CHANGE_PARAM requestor holds rtnl_lock */
rc = do_change_param_reset(adapter, rwi, reset_state);
@@ -5091,6 +5101,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
__ibmvnic_delayed_reset);
INIT_LIST_HEAD(&adapter->rwi_list);
spin_lock_init(&adapter->rwi_lock);
+ spin_lock_init(&adapter->state_lock);
mutex_init(&adapter->fw_lock);
init_completion(&adapter->init_done);
init_completion(&adapter->fw_done);
@@ -5163,8 +5174,17 @@ static int ibmvnic_remove(struct vio_dev *dev)
{
struct net_device *netdev = dev_get_drvdata(&dev->dev);
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->state_lock, flags);
+ if (adapter->state == VNIC_RESETTING) {
+ spin_unlock_irqrestore(&adapter->state_lock, flags);
+ return -EBUSY;
+ }
adapter->state = VNIC_REMOVING;
+ spin_unlock_irqrestore(&adapter->state_lock, flags);
+
rtnl_lock();
unregister_netdevice(netdev);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 60eccaf91b12..f8416e1d4cf0 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -941,7 +941,8 @@ enum vnic_state {VNIC_PROBING = 1,
VNIC_CLOSING,
VNIC_CLOSED,
VNIC_REMOVING,
- VNIC_REMOVED};
+ VNIC_REMOVED,
+ VNIC_RESETTING};
enum ibmvnic_reset_reason {VNIC_RESET_FAILOVER = 1,
VNIC_RESET_MOBILITY,
@@ -1090,4 +1091,7 @@ struct ibmvnic_adapter {
struct ibmvnic_tunables desired;
struct ibmvnic_tunables fallback;
+
+ /* Used for serializatin of state field */
+ spinlock_t state_lock;
};
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 0b9e851f3da4..d2e2dc538428 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -347,7 +347,7 @@ static int orion_mdio_probe(struct platform_device *pdev)
}
- dev->err_interrupt = platform_get_irq(pdev, 0);
+ dev->err_interrupt = platform_get_irq_optional(pdev, 0);
if (dev->err_interrupt > 0 &&
resource_size(r) < MVMDIO_ERR_INT_MASK + 4) {
dev_err(&pdev->dev,
@@ -364,8 +364,8 @@ static int orion_mdio_probe(struct platform_device *pdev)
writel(MVMDIO_ERR_INT_SMI_DONE,
dev->regs + MVMDIO_ERR_INT_MASK);
- } else if (dev->err_interrupt == -EPROBE_DEFER) {
- ret = -EPROBE_DEFER;
+ } else if (dev->err_interrupt < 0) {
+ ret = dev->err_interrupt;
goto out_mdio;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 966983674663..21de4764d4c0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -5147,7 +5147,6 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
static void mlx5e_nic_disable(struct mlx5e_priv *priv)
{
- struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
#ifdef CONFIG_MLX5_CORE_EN_DCB
@@ -5168,7 +5167,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
mlx5e_monitor_counter_cleanup(priv);
mlx5e_disable_async_events(priv);
- mlx5_lag_remove(mdev, netdev);
+ mlx5_lag_remove(mdev);
}
int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 7b48ccacebe2..6ed307d7f191 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -1861,7 +1861,6 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
{
- struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -1870,7 +1869,7 @@ static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
#endif
mlx5_notifier_unregister(mdev, &priv->events_nb);
cancel_work_sync(&rpriv->uplink_priv.reoffload_flows_work);
- mlx5_lag_remove(mdev, netdev);
+ mlx5_lag_remove(mdev);
}
static MLX5E_DEFINE_STATS_GRP(sw_rep, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index b91eabc09fbc..8e19f6ab8393 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -464,9 +464,6 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
struct mlx5_lag *ldev;
int changed = 0;
- if (!net_eq(dev_net(ndev), &init_net))
- return NOTIFY_DONE;
-
if ((event != NETDEV_CHANGEUPPER) && (event != NETDEV_CHANGELOWERSTATE))
return NOTIFY_DONE;
@@ -586,8 +583,7 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
if (!ldev->nb.notifier_call) {
ldev->nb.notifier_call = mlx5_lag_netdev_event;
- if (register_netdevice_notifier_dev_net(netdev, &ldev->nb,
- &ldev->nn)) {
+ if (register_netdevice_notifier_net(&init_net, &ldev->nb)) {
ldev->nb.notifier_call = NULL;
mlx5_core_err(dev, "Failed to register LAG netdev notifier\n");
}
@@ -600,7 +596,7 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
}
/* Must be called with intf_mutex held */
-void mlx5_lag_remove(struct mlx5_core_dev *dev, struct net_device *netdev)
+void mlx5_lag_remove(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
int i;
@@ -620,8 +616,7 @@ void mlx5_lag_remove(struct mlx5_core_dev *dev, struct net_device *netdev)
if (i == MLX5_MAX_PORTS) {
if (ldev->nb.notifier_call)
- unregister_netdevice_notifier_dev_net(netdev, &ldev->nb,
- &ldev->nn);
+ unregister_netdevice_notifier_net(&init_net, &ldev->nb);
mlx5_lag_mp_cleanup(ldev);
cancel_delayed_work_sync(&ldev->bond_work);
mlx5_lag_dev_free(ldev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag.h
index 316ab09e2664..f1068aac6406 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.h
@@ -44,7 +44,6 @@ struct mlx5_lag {
struct workqueue_struct *wq;
struct delayed_work bond_work;
struct notifier_block nb;
- struct netdev_net_notifier nn;
struct lag_mp lag_mp;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index fcce9e0fc82c..da67b28d6e23 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -157,7 +157,7 @@ int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam,
u8 feature_group, u8 access_reg_group);
void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev);
-void mlx5_lag_remove(struct mlx5_core_dev *dev, struct net_device *netdev);
+void mlx5_lag_remove(struct mlx5_core_dev *dev);
int mlx5_irq_table_init(struct mlx5_core_dev *dev);
void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index e0d7d2d9a0c8..43fa8c85b5d9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -28,7 +28,7 @@
#define MLXSW_PCI_SW_RESET 0xF0010
#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 900000
-#define MLXSW_PCI_SW_RESET_WAIT_MSECS 100
+#define MLXSW_PCI_SW_RESET_WAIT_MSECS 200
#define MLXSW_PCI_FW_READY 0xA1844
#define MLXSW_PCI_FW_READY_MASK 0xFFFF
#define MLXSW_PCI_FW_READY_MAGIC 0x5E
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index 1c9e70c8cc30..58579baf3f7a 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -513,14 +513,17 @@ static irqreturn_t ks_irq(int irq, void *pw)
{
struct net_device *netdev = pw;
struct ks_net *ks = netdev_priv(netdev);
+ unsigned long flags;
u16 status;
+ spin_lock_irqsave(&ks->statelock, flags);
/*this should be the first in IRQ handler */
ks_save_cmd_reg(ks);
status = ks_rdreg16(ks, KS_ISR);
if (unlikely(!status)) {
ks_restore_cmd_reg(ks);
+ spin_unlock_irqrestore(&ks->statelock, flags);
return IRQ_NONE;
}
@@ -546,6 +549,7 @@ static irqreturn_t ks_irq(int irq, void *pw)
ks->netdev->stats.rx_over_errors++;
/* this should be the last in IRQ handler*/
ks_restore_cmd_reg(ks);
+ spin_unlock_irqrestore(&ks->statelock, flags);
return IRQ_HANDLED;
}
@@ -615,6 +619,7 @@ static int ks_net_stop(struct net_device *netdev)
/* shutdown RX/TX QMU */
ks_disable_qmu(ks);
+ ks_disable_int(ks);
/* set powermode to soft power down to save power */
ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
@@ -671,10 +676,9 @@ static netdev_tx_t ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
netdev_tx_t retv = NETDEV_TX_OK;
struct ks_net *ks = netdev_priv(netdev);
+ unsigned long flags;
- disable_irq(netdev->irq);
- ks_disable_int(ks);
- spin_lock(&ks->statelock);
+ spin_lock_irqsave(&ks->statelock, flags);
/* Extra space are required:
* 4 byte for alignment, 4 for status/length, 4 for CRC
@@ -688,9 +692,7 @@ static netdev_tx_t ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
dev_kfree_skb(skb);
} else
retv = NETDEV_TX_BUSY;
- spin_unlock(&ks->statelock);
- ks_enable_int(ks);
- enable_irq(netdev->irq);
+ spin_unlock_irqrestore(&ks->statelock, flags);
return retv;
}
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 86d543ab1ab9..d3b7373c5961 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -2176,24 +2176,29 @@ static int ocelot_init_timestamp(struct ocelot *ocelot)
return 0;
}
-static void ocelot_port_set_mtu(struct ocelot *ocelot, int port, size_t mtu)
+/* Configure the maximum SDU (L2 payload) on RX to the value specified in @sdu.
+ * The length of VLAN tags is accounted for automatically via DEV_MAC_TAGS_CFG.
+ */
+static void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ int maxlen = sdu + ETH_HLEN + ETH_FCS_LEN;
int atop_wm;
- ocelot_port_writel(ocelot_port, mtu, DEV_MAC_MAXLEN_CFG);
+ ocelot_port_writel(ocelot_port, maxlen, DEV_MAC_MAXLEN_CFG);
/* Set Pause WM hysteresis
- * 152 = 6 * mtu / OCELOT_BUFFER_CELL_SZ
- * 101 = 4 * mtu / OCELOT_BUFFER_CELL_SZ
+ * 152 = 6 * maxlen / OCELOT_BUFFER_CELL_SZ
+ * 101 = 4 * maxlen / OCELOT_BUFFER_CELL_SZ
*/
ocelot_write_rix(ocelot, SYS_PAUSE_CFG_PAUSE_ENA |
SYS_PAUSE_CFG_PAUSE_STOP(101) |
SYS_PAUSE_CFG_PAUSE_START(152), SYS_PAUSE_CFG, port);
/* Tail dropping watermark */
- atop_wm = (ocelot->shared_queue_sz - 9 * mtu) / OCELOT_BUFFER_CELL_SZ;
- ocelot_write_rix(ocelot, ocelot_wm_enc(9 * mtu),
+ atop_wm = (ocelot->shared_queue_sz - 9 * maxlen) /
+ OCELOT_BUFFER_CELL_SZ;
+ ocelot_write_rix(ocelot, ocelot_wm_enc(9 * maxlen),
SYS_ATOP, port);
ocelot_write(ocelot, ocelot_wm_enc(atop_wm), SYS_ATOP_TOT_CFG);
}
@@ -2222,9 +2227,10 @@ void ocelot_init_port(struct ocelot *ocelot, int port)
DEV_MAC_HDX_CFG);
/* Set Max Length and maximum tags allowed */
- ocelot_port_set_mtu(ocelot, port, VLAN_ETH_FRAME_LEN);
+ ocelot_port_set_maxlen(ocelot, port, ETH_DATA_LEN);
ocelot_port_writel(ocelot_port, DEV_MAC_TAGS_CFG_TAG_ID(ETH_P_8021AD) |
DEV_MAC_TAGS_CFG_VLAN_AWR_ENA |
+ DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA |
DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA,
DEV_MAC_TAGS_CFG);
@@ -2310,18 +2316,18 @@ void ocelot_set_cpu_port(struct ocelot *ocelot, int cpu,
* Only one port can be an NPI at the same time.
*/
if (cpu < ocelot->num_phys_ports) {
- int mtu = VLAN_ETH_FRAME_LEN + OCELOT_TAG_LEN;
+ int sdu = ETH_DATA_LEN + OCELOT_TAG_LEN;
ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M |
QSYS_EXT_CPU_CFG_EXT_CPU_PORT(cpu),
QSYS_EXT_CPU_CFG);
if (injection == OCELOT_TAG_PREFIX_SHORT)
- mtu += OCELOT_SHORT_PREFIX_LEN;
+ sdu += OCELOT_SHORT_PREFIX_LEN;
else if (injection == OCELOT_TAG_PREFIX_LONG)
- mtu += OCELOT_LONG_PREFIX_LEN;
+ sdu += OCELOT_LONG_PREFIX_LEN;
- ocelot_port_set_mtu(ocelot, cpu, mtu);
+ ocelot_port_set_maxlen(ocelot, cpu, sdu);
}
/* CPU port Injection/Extraction configuration */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 191271f6260d..c2f5b691e0fa 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -1688,7 +1688,7 @@ static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac)))
return -EINVAL;
- down_read(&ionic->vf_op_lock);
+ down_write(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
ret = -EINVAL;
@@ -1698,7 +1698,7 @@ static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
ether_addr_copy(ionic->vfs[vf].macaddr, mac);
}
- up_read(&ionic->vf_op_lock);
+ up_write(&ionic->vf_op_lock);
return ret;
}
@@ -1719,7 +1719,7 @@ static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
if (proto != htons(ETH_P_8021Q))
return -EPROTONOSUPPORT;
- down_read(&ionic->vf_op_lock);
+ down_write(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
ret = -EINVAL;
@@ -1730,7 +1730,7 @@ static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
ionic->vfs[vf].vlanid = vlan;
}
- up_read(&ionic->vf_op_lock);
+ up_write(&ionic->vf_op_lock);
return ret;
}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 06de59521fc4..fbf4cbcf1a65 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -13,25 +13,6 @@
#include "rmnet_vnd.h"
#include "rmnet_private.h"
-/* Locking scheme -
- * The shared resource which needs to be protected is realdev->rx_handler_data.
- * For the writer path, this is using rtnl_lock(). The writer paths are
- * rmnet_newlink(), rmnet_dellink() and rmnet_force_unassociate_device(). These
- * paths are already called with rtnl_lock() acquired in. There is also an
- * ASSERT_RTNL() to ensure that we are calling with rtnl acquired. For
- * dereference here, we will need to use rtnl_dereference(). Dev list writing
- * needs to happen with rtnl_lock() acquired for netdev_master_upper_dev_link().
- * For the reader path, the real_dev->rx_handler_data is called in the TX / RX
- * path. We only need rcu_read_lock() for these scenarios. In these cases,
- * the rcu_read_lock() is held in __dev_queue_xmit() and
- * netif_receive_skb_internal(), so readers need to use rcu_dereference_rtnl()
- * to get the relevant information. For dev list reading, we again acquire
- * rcu_read_lock() in rmnet_dellink() for netdev_master_upper_dev_get_rcu().
- * We also use unregister_netdevice_many() to free all rmnet devices in
- * rmnet_force_unassociate_device() so we dont lose the rtnl_lock() and free in
- * same context.
- */
-
/* Local Definitions and Declarations */
static const struct nla_policy rmnet_policy[IFLA_RMNET_MAX + 1] = {
@@ -51,9 +32,10 @@ rmnet_get_port_rtnl(const struct net_device *real_dev)
return rtnl_dereference(real_dev->rx_handler_data);
}
-static int rmnet_unregister_real_device(struct net_device *real_dev,
- struct rmnet_port *port)
+static int rmnet_unregister_real_device(struct net_device *real_dev)
{
+ struct rmnet_port *port = rmnet_get_port_rtnl(real_dev);
+
if (port->nr_rmnet_devs)
return -EINVAL;
@@ -61,9 +43,6 @@ static int rmnet_unregister_real_device(struct net_device *real_dev,
kfree(port);
- /* release reference on real_dev */
- dev_put(real_dev);
-
netdev_dbg(real_dev, "Removed from rmnet\n");
return 0;
}
@@ -89,9 +68,6 @@ static int rmnet_register_real_device(struct net_device *real_dev)
return -EBUSY;
}
- /* hold on to real dev for MAP data */
- dev_hold(real_dev);
-
for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++)
INIT_HLIST_HEAD(&port->muxed_ep[entry]);
@@ -99,28 +75,33 @@ static int rmnet_register_real_device(struct net_device *real_dev)
return 0;
}
-static void rmnet_unregister_bridge(struct net_device *dev,
- struct rmnet_port *port)
+static void rmnet_unregister_bridge(struct rmnet_port *port)
{
- struct rmnet_port *bridge_port;
- struct net_device *bridge_dev;
+ struct net_device *bridge_dev, *real_dev, *rmnet_dev;
+ struct rmnet_port *real_port;
if (port->rmnet_mode != RMNET_EPMODE_BRIDGE)
return;
- /* bridge slave handling */
+ rmnet_dev = port->rmnet_dev;
if (!port->nr_rmnet_devs) {
- bridge_dev = port->bridge_ep;
+ /* bridge device */
+ real_dev = port->bridge_ep;
+ bridge_dev = port->dev;
- bridge_port = rmnet_get_port_rtnl(bridge_dev);
- bridge_port->bridge_ep = NULL;
- bridge_port->rmnet_mode = RMNET_EPMODE_VND;
+ real_port = rmnet_get_port_rtnl(real_dev);
+ real_port->bridge_ep = NULL;
+ real_port->rmnet_mode = RMNET_EPMODE_VND;
} else {
+ /* real device */
bridge_dev = port->bridge_ep;
- bridge_port = rmnet_get_port_rtnl(bridge_dev);
- rmnet_unregister_real_device(bridge_dev, bridge_port);
+ port->bridge_ep = NULL;
+ port->rmnet_mode = RMNET_EPMODE_VND;
}
+
+ netdev_upper_dev_unlink(bridge_dev, rmnet_dev);
+ rmnet_unregister_real_device(bridge_dev);
}
static int rmnet_newlink(struct net *src_net, struct net_device *dev,
@@ -135,6 +116,11 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
int err = 0;
u16 mux_id;
+ if (!tb[IFLA_LINK]) {
+ NL_SET_ERR_MSG_MOD(extack, "link not specified");
+ return -EINVAL;
+ }
+
real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
if (!real_dev || !dev)
return -ENODEV;
@@ -157,7 +143,12 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
if (err)
goto err1;
+ err = netdev_upper_dev_link(real_dev, dev, extack);
+ if (err < 0)
+ goto err2;
+
port->rmnet_mode = mode;
+ port->rmnet_dev = dev;
hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
@@ -173,8 +164,11 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
return 0;
+err2:
+ unregister_netdevice(dev);
+ rmnet_vnd_dellink(mux_id, port, ep);
err1:
- rmnet_unregister_real_device(real_dev, port);
+ rmnet_unregister_real_device(real_dev);
err0:
kfree(ep);
return err;
@@ -183,77 +177,74 @@ err0:
static void rmnet_dellink(struct net_device *dev, struct list_head *head)
{
struct rmnet_priv *priv = netdev_priv(dev);
- struct net_device *real_dev;
+ struct net_device *real_dev, *bridge_dev;
+ struct rmnet_port *real_port, *bridge_port;
struct rmnet_endpoint *ep;
- struct rmnet_port *port;
- u8 mux_id;
+ u8 mux_id = priv->mux_id;
real_dev = priv->real_dev;
- if (!real_dev || !rmnet_is_real_dev_registered(real_dev))
+ if (!rmnet_is_real_dev_registered(real_dev))
return;
- port = rmnet_get_port_rtnl(real_dev);
-
- mux_id = rmnet_vnd_get_mux(dev);
+ real_port = rmnet_get_port_rtnl(real_dev);
+ bridge_dev = real_port->bridge_ep;
+ if (bridge_dev) {
+ bridge_port = rmnet_get_port_rtnl(bridge_dev);
+ rmnet_unregister_bridge(bridge_port);
+ }
- ep = rmnet_get_endpoint(port, mux_id);
+ ep = rmnet_get_endpoint(real_port, mux_id);
if (ep) {
hlist_del_init_rcu(&ep->hlnode);
- rmnet_unregister_bridge(dev, port);
- rmnet_vnd_dellink(mux_id, port, ep);
+ rmnet_vnd_dellink(mux_id, real_port, ep);
kfree(ep);
}
- rmnet_unregister_real_device(real_dev, port);
+ netdev_upper_dev_unlink(real_dev, dev);
+ rmnet_unregister_real_device(real_dev);
unregister_netdevice_queue(dev, head);
}
-static void rmnet_force_unassociate_device(struct net_device *dev)
+static void rmnet_force_unassociate_device(struct net_device *real_dev)
{
- struct net_device *real_dev = dev;
struct hlist_node *tmp_ep;
struct rmnet_endpoint *ep;
struct rmnet_port *port;
unsigned long bkt_ep;
LIST_HEAD(list);
- if (!rmnet_is_real_dev_registered(real_dev))
- return;
-
- ASSERT_RTNL();
-
- port = rmnet_get_port_rtnl(dev);
-
- rcu_read_lock();
- rmnet_unregister_bridge(dev, port);
-
- hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
- unregister_netdevice_queue(ep->egress_dev, &list);
- rmnet_vnd_dellink(ep->mux_id, port, ep);
+ port = rmnet_get_port_rtnl(real_dev);
- hlist_del_init_rcu(&ep->hlnode);
- kfree(ep);
+ if (port->nr_rmnet_devs) {
+ /* real device */
+ rmnet_unregister_bridge(port);
+ hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
+ unregister_netdevice_queue(ep->egress_dev, &list);
+ netdev_upper_dev_unlink(real_dev, ep->egress_dev);
+ rmnet_vnd_dellink(ep->mux_id, port, ep);
+ hlist_del_init_rcu(&ep->hlnode);
+ kfree(ep);
+ }
+ rmnet_unregister_real_device(real_dev);
+ unregister_netdevice_many(&list);
+ } else {
+ rmnet_unregister_bridge(port);
}
-
- rcu_read_unlock();
- unregister_netdevice_many(&list);
-
- rmnet_unregister_real_device(real_dev, port);
}
static int rmnet_config_notify_cb(struct notifier_block *nb,
unsigned long event, void *data)
{
- struct net_device *dev = netdev_notifier_info_to_dev(data);
+ struct net_device *real_dev = netdev_notifier_info_to_dev(data);
- if (!dev)
+ if (!rmnet_is_real_dev_registered(real_dev))
return NOTIFY_DONE;
switch (event) {
case NETDEV_UNREGISTER:
- netdev_dbg(dev, "Kernel unregister\n");
- rmnet_force_unassociate_device(dev);
+ netdev_dbg(real_dev, "Kernel unregister\n");
+ rmnet_force_unassociate_device(real_dev);
break;
default:
@@ -295,16 +286,18 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
if (!dev)
return -ENODEV;
- real_dev = __dev_get_by_index(dev_net(dev),
- nla_get_u32(tb[IFLA_LINK]));
-
- if (!real_dev || !rmnet_is_real_dev_registered(real_dev))
+ real_dev = priv->real_dev;
+ if (!rmnet_is_real_dev_registered(real_dev))
return -ENODEV;
port = rmnet_get_port_rtnl(real_dev);
if (data[IFLA_RMNET_MUX_ID]) {
mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
+ if (rmnet_get_endpoint(port, mux_id)) {
+ NL_SET_ERR_MSG_MOD(extack, "MUX ID already exists");
+ return -EINVAL;
+ }
ep = rmnet_get_endpoint(port, priv->mux_id);
if (!ep)
return -ENODEV;
@@ -379,11 +372,10 @@ struct rtnl_link_ops rmnet_link_ops __read_mostly = {
.fill_info = rmnet_fill_info,
};
-/* Needs either rcu_read_lock() or rtnl lock */
-struct rmnet_port *rmnet_get_port(struct net_device *real_dev)
+struct rmnet_port *rmnet_get_port_rcu(struct net_device *real_dev)
{
if (rmnet_is_real_dev_registered(real_dev))
- return rcu_dereference_rtnl(real_dev->rx_handler_data);
+ return rcu_dereference_bh(real_dev->rx_handler_data);
else
return NULL;
}
@@ -409,7 +401,7 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
struct rmnet_port *port, *slave_port;
int err;
- port = rmnet_get_port(real_dev);
+ port = rmnet_get_port_rtnl(real_dev);
/* If there is more than one rmnet dev attached, its probably being
* used for muxing. Skip the briding in that case
@@ -417,6 +409,9 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
if (port->nr_rmnet_devs > 1)
return -EINVAL;
+ if (port->rmnet_mode != RMNET_EPMODE_VND)
+ return -EINVAL;
+
if (rmnet_is_real_dev_registered(slave_dev))
return -EBUSY;
@@ -424,9 +419,17 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
if (err)
return -EBUSY;
- slave_port = rmnet_get_port(slave_dev);
+ err = netdev_master_upper_dev_link(slave_dev, rmnet_dev, NULL, NULL,
+ extack);
+ if (err) {
+ rmnet_unregister_real_device(slave_dev);
+ return err;
+ }
+
+ slave_port = rmnet_get_port_rtnl(slave_dev);
slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE;
slave_port->bridge_ep = real_dev;
+ slave_port->rmnet_dev = rmnet_dev;
port->rmnet_mode = RMNET_EPMODE_BRIDGE;
port->bridge_ep = slave_dev;
@@ -438,16 +441,9 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
int rmnet_del_bridge(struct net_device *rmnet_dev,
struct net_device *slave_dev)
{
- struct rmnet_priv *priv = netdev_priv(rmnet_dev);
- struct net_device *real_dev = priv->real_dev;
- struct rmnet_port *port, *slave_port;
+ struct rmnet_port *port = rmnet_get_port_rtnl(slave_dev);
- port = rmnet_get_port(real_dev);
- port->rmnet_mode = RMNET_EPMODE_VND;
- port->bridge_ep = NULL;
-
- slave_port = rmnet_get_port(slave_dev);
- rmnet_unregister_real_device(slave_dev, slave_port);
+ rmnet_unregister_bridge(port);
netdev_dbg(slave_dev, "removed from rmnet as slave\n");
return 0;
@@ -473,8 +469,8 @@ static int __init rmnet_init(void)
static void __exit rmnet_exit(void)
{
- unregister_netdevice_notifier(&rmnet_dev_notifier);
rtnl_link_unregister(&rmnet_link_ops);
+ unregister_netdevice_notifier(&rmnet_dev_notifier);
}
module_init(rmnet_init)
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
index cd0a6bcbe74a..be515982d628 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
@@ -28,6 +28,7 @@ struct rmnet_port {
u8 rmnet_mode;
struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP];
struct net_device *bridge_ep;
+ struct net_device *rmnet_dev;
};
extern struct rtnl_link_ops rmnet_link_ops;
@@ -65,7 +66,7 @@ struct rmnet_priv {
struct rmnet_priv_stats stats;
};
-struct rmnet_port *rmnet_get_port(struct net_device *real_dev);
+struct rmnet_port *rmnet_get_port_rcu(struct net_device *real_dev);
struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id);
int rmnet_add_bridge(struct net_device *rmnet_dev,
struct net_device *slave_dev,
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index 1b74bc160402..29a7bfa2584d 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -159,6 +159,9 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
static void
rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev)
{
+ if (skb_mac_header_was_set(skb))
+ skb_push(skb, skb->mac_len);
+
if (bridge_dev) {
skb->dev = bridge_dev;
dev_queue_xmit(skb);
@@ -184,7 +187,7 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
return RX_HANDLER_PASS;
dev = skb->dev;
- port = rmnet_get_port(dev);
+ port = rmnet_get_port_rcu(dev);
switch (port->rmnet_mode) {
case RMNET_EPMODE_VND:
@@ -217,7 +220,7 @@ void rmnet_egress_handler(struct sk_buff *skb)
skb->dev = priv->real_dev;
mux_id = priv->mux_id;
- port = rmnet_get_port(skb->dev);
+ port = rmnet_get_port_rcu(skb->dev);
if (!port)
goto drop;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 509dfc895a33..26ad40f19c64 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -266,14 +266,6 @@ int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
return 0;
}
-u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev)
-{
- struct rmnet_priv *priv;
-
- priv = netdev_priv(rmnet_dev);
- return priv->mux_id;
-}
-
int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable)
{
netdev_dbg(rmnet_dev, "Setting VND TX queue state to %d\n", enable);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
index 54cbaf3c3bc4..14d77c709d4a 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
@@ -16,6 +16,5 @@ int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
struct rmnet_endpoint *ep);
void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev);
void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev);
-u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev);
void rmnet_vnd_setup(struct net_device *dev);
#endif /* _RMNET_VND_H_ */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index c705743d69f7..2cc8184b7e6b 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -2277,7 +2277,7 @@ static int __init sxgbe_cmdline_opt(char *str)
if (!str || !*str)
return -EINVAL;
while ((opt = strsep(&str, ",")) != NULL) {
- if (!strncmp(opt, "eee_timer:", 6)) {
+ if (!strncmp(opt, "eee_timer:", 10)) {
if (kstrtoint(opt + 10, 0, &eee_timer))
goto err;
}
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 52113b7529d6..3f16bd807c6e 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -2853,11 +2853,24 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
}
/* Transmit timestamps are only available for 8XXX series. They result
- * in three events per packet. These occur in order, and are:
- * - the normal completion event
+ * in up to three events per packet. These occur in order, and are:
+ * - the normal completion event (may be omitted)
* - the low part of the timestamp
* - the high part of the timestamp
*
+ * It's possible for multiple completion events to appear before the
+ * corresponding timestamps. So we can for example get:
+ * COMP N
+ * COMP N+1
+ * TS_LO N
+ * TS_HI N
+ * TS_LO N+1
+ * TS_HI N+1
+ *
+ * In addition it's also possible for the adjacent completions to be
+ * merged, so we may not see COMP N above. As such, the completion
+ * events are not very useful here.
+ *
* Each part of the timestamp is itself split across two 16 bit
* fields in the event.
*/
@@ -2865,17 +2878,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
switch (tx_ev_type) {
case TX_TIMESTAMP_EVENT_TX_EV_COMPLETION:
- /* In case of Queue flush or FLR, we might have received
- * the previous TX completion event but not the Timestamp
- * events.
- */
- if (tx_queue->completed_desc_ptr != tx_queue->ptr_mask)
- efx_xmit_done(tx_queue, tx_queue->completed_desc_ptr);
-
- tx_ev_desc_ptr = EFX_QWORD_FIELD(*event,
- ESF_DZ_TX_DESCR_INDX);
- tx_queue->completed_desc_ptr =
- tx_ev_desc_ptr & tx_queue->ptr_mask;
+ /* Ignore this event - see above. */
break;
case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO:
@@ -2887,8 +2890,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
ts_part = efx_ef10_extract_event_ts(event);
tx_queue->completed_timestamp_major = ts_part;
- efx_xmit_done(tx_queue, tx_queue->completed_desc_ptr);
- tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
+ efx_xmit_done_single(tx_queue);
break;
default:
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index f1bdb04efbe4..95395d67ea2d 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -20,6 +20,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
struct net_device *net_dev);
netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
+void efx_xmit_done_single(struct efx_tx_queue *tx_queue);
int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
void *type_data);
extern unsigned int efx_piobuf_size;
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index aeb5e8aa2f2a..73d4e39b5b16 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -583,6 +583,7 @@ struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel)
if (tx_queue->channel)
tx_queue->channel = channel;
tx_queue->buffer = NULL;
+ tx_queue->cb_page = NULL;
memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
}
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 9f9886f222c8..8164f0edcbf0 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -208,8 +208,6 @@ struct efx_tx_buffer {
* avoid cache-line ping-pong between the xmit path and the
* completion path.
* @merge_events: Number of TX merged completion events
- * @completed_desc_ptr: Most recent completed pointer - only used with
- * timestamping.
* @completed_timestamp_major: Top part of the most recent tx timestamp.
* @completed_timestamp_minor: Low part of the most recent tx timestamp.
* @insert_count: Current insert pointer
@@ -269,7 +267,6 @@ struct efx_tx_queue {
unsigned int merge_events;
unsigned int bytes_compl;
unsigned int pkts_compl;
- unsigned int completed_desc_ptr;
u32 completed_timestamp_major;
u32 completed_timestamp_minor;
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index af15a737c675..59b4f16896a8 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -560,13 +560,45 @@ efx_ptp_mac_nic_to_ktime_correction(struct efx_nic *efx,
u32 nic_major, u32 nic_minor,
s32 correction)
{
+ u32 sync_timestamp;
ktime_t kt = { 0 };
+ s16 delta;
if (!(nic_major & 0x80000000)) {
WARN_ON_ONCE(nic_major >> 16);
- /* Use the top bits from the latest sync event. */
- nic_major &= 0xffff;
- nic_major |= (last_sync_timestamp_major(efx) & 0xffff0000);
+
+ /* Medford provides 48 bits of timestamp, so we must get the top
+ * 16 bits from the timesync event state.
+ *
+ * We only have the lower 16 bits of the time now, but we do
+ * have a full resolution timestamp at some point in past. As
+ * long as the difference between the (real) now and the sync
+ * is less than 2^15, then we can reconstruct the difference
+ * between those two numbers using only the lower 16 bits of
+ * each.
+ *
+ * Put another way
+ *
+ * a - b = ((a mod k) - b) mod k
+ *
+ * when -k/2 < (a-b) < k/2. In our case k is 2^16. We know
+ * (a mod k) and b, so can calculate the delta, a - b.
+ *
+ */
+ sync_timestamp = last_sync_timestamp_major(efx);
+
+ /* Because delta is s16 this does an implicit mask down to
+ * 16 bits which is what we need, assuming
+ * MEDFORD_TX_SECS_EVENT_BITS is 16. delta is signed so that
+ * we can deal with the (unlikely) case of sync timestamps
+ * arriving from the future.
+ */
+ delta = nic_major - sync_timestamp;
+
+ /* Recover the fully specified time now, by applying the offset
+ * to the (fully specified) sync time.
+ */
+ nic_major = sync_timestamp + delta;
kt = ptp->nic_to_kernel_time(nic_major, nic_minor,
correction);
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 04d7f41d7ed9..8aafc54a4684 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -535,6 +535,44 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
return efx_enqueue_skb(tx_queue, skb);
}
+void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
+{
+ unsigned int pkts_compl = 0, bytes_compl = 0;
+ unsigned int read_ptr;
+ bool finished = false;
+
+ read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
+
+ while (!finished) {
+ struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
+
+ if (!efx_tx_buffer_in_use(buffer)) {
+ struct efx_nic *efx = tx_queue->efx;
+
+ netif_err(efx, hw, efx->net_dev,
+ "TX queue %d spurious single TX completion\n",
+ tx_queue->queue);
+ efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
+ return;
+ }
+
+ /* Need to check the flag before dequeueing. */
+ if (buffer->flags & EFX_TX_BUF_SKB)
+ finished = true;
+ efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
+
+ ++tx_queue->read_count;
+ read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
+ }
+
+ tx_queue->pkts_compl += pkts_compl;
+ tx_queue->bytes_compl += bytes_compl;
+
+ EFX_WARN_ON_PARANOID(pkts_compl != 1);
+
+ efx_xmit_done_check_empty(tx_queue);
+}
+
void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
{
struct efx_nic *efx = tx_queue->efx;
diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c
index b1571e9789d0..70876df1da69 100644
--- a/drivers/net/ethernet/sfc/tx_common.c
+++ b/drivers/net/ethernet/sfc/tx_common.c
@@ -80,7 +80,6 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
tx_queue->xmit_more_available = false;
tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) &&
tx_queue->channel == efx_ptp_channel(efx));
- tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
tx_queue->completed_timestamp_major = 0;
tx_queue->completed_timestamp_minor = 0;
@@ -210,10 +209,9 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
while (read_ptr != stop_index) {
struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
- if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
- unlikely(buffer->len == 0)) {
+ if (!efx_tx_buffer_in_use(buffer)) {
netif_err(efx, tx_err, efx->net_dev,
- "TX queue %d spurious TX completion id %x\n",
+ "TX queue %d spurious TX completion id %d\n",
tx_queue->queue, read_ptr);
efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
return;
@@ -226,6 +224,19 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
}
}
+void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue)
+{
+ if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
+ tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
+ if (tx_queue->read_count == tx_queue->old_write_count) {
+ /* Ensure that read_count is flushed. */
+ smp_mb();
+ tx_queue->empty_read_count =
+ tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
+ }
+ }
+}
+
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
{
unsigned int fill_level, pkts_compl = 0, bytes_compl = 0;
@@ -256,15 +267,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
netif_tx_wake_queue(tx_queue->core_txq);
}
- /* Check whether the hardware queue is now empty */
- if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
- tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
- if (tx_queue->read_count == tx_queue->old_write_count) {
- smp_mb();
- tx_queue->empty_read_count =
- tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
- }
- }
+ efx_xmit_done_check_empty(tx_queue);
}
/* Remove buffers put into a tx_queue for the current packet.
diff --git a/drivers/net/ethernet/sfc/tx_common.h b/drivers/net/ethernet/sfc/tx_common.h
index f92f1fe3a87f..99cf7ce2f36c 100644
--- a/drivers/net/ethernet/sfc/tx_common.h
+++ b/drivers/net/ethernet/sfc/tx_common.h
@@ -21,6 +21,12 @@ void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
unsigned int *pkts_compl,
unsigned int *bytes_compl);
+static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer)
+{
+ return buffer->len || (buffer->flags & EFX_TX_BUF_OPTION);
+}
+
+void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue);
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index d0356fbd1e43..542784300620 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -24,6 +24,7 @@
static void dwmac1000_core_init(struct mac_device_info *hw,
struct net_device *dev)
{
+ struct stmmac_priv *priv = netdev_priv(dev);
void __iomem *ioaddr = hw->pcsr;
u32 value = readl(ioaddr + GMAC_CONTROL);
int mtu = dev->mtu;
@@ -35,7 +36,7 @@ static void dwmac1000_core_init(struct mac_device_info *hw,
* Broadcom tags can look like invalid LLC/SNAP packets and cause the
* hardware to truncate packets on reception.
*/
- if (netdev_uses_dsa(dev))
+ if (netdev_uses_dsa(dev) || !priv->plat->enh_desc)
value &= ~GMAC_CONTROL_ACS;
if (mtu > 1500)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 5836b21edd7e..7da18c9afa01 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -4405,6 +4405,8 @@ static void stmmac_init_fs(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ rtnl_lock();
+
/* Create per netdev entries */
priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
@@ -4416,14 +4418,13 @@ static void stmmac_init_fs(struct net_device *dev)
debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
&stmmac_dma_cap_fops);
- register_netdevice_notifier(&stmmac_notifier);
+ rtnl_unlock();
}
static void stmmac_exit_fs(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
- unregister_netdevice_notifier(&stmmac_notifier);
debugfs_remove_recursive(priv->dbgfs_dir);
}
#endif /* CONFIG_DEBUG_FS */
@@ -4940,14 +4941,14 @@ int stmmac_dvr_remove(struct device *dev)
netdev_info(priv->dev, "%s: removing driver", __func__);
-#ifdef CONFIG_DEBUG_FS
- stmmac_exit_fs(ndev);
-#endif
stmmac_stop_all_dma(priv);
stmmac_mac_set(priv, priv->ioaddr, false);
netif_carrier_off(ndev);
unregister_netdev(ndev);
+#ifdef CONFIG_DEBUG_FS
+ stmmac_exit_fs(ndev);
+#endif
phylink_destroy(priv->phylink);
if (priv->plat->stmmac_rst)
reset_control_assert(priv->plat->stmmac_rst);
@@ -5166,6 +5167,7 @@ static int __init stmmac_init(void)
/* Create debugfs main directory if it doesn't exist yet */
if (!stmmac_fs_dir)
stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
+ register_netdevice_notifier(&stmmac_notifier);
#endif
return 0;
@@ -5174,6 +5176,7 @@ static int __init stmmac_init(void)
static void __exit stmmac_exit(void)
{
#ifdef CONFIG_DEBUG_FS
+ unregister_netdevice_notifier(&stmmac_notifier);
debugfs_remove_recursive(stmmac_fs_dir);
#endif
}
diff --git a/drivers/net/ethernet/xilinx/ll_temac.h b/drivers/net/ethernet/xilinx/ll_temac.h
index 276292bca334..53fb8141f1a6 100644
--- a/drivers/net/ethernet/xilinx/ll_temac.h
+++ b/drivers/net/ethernet/xilinx/ll_temac.h
@@ -375,10 +375,14 @@ struct temac_local {
int tx_bd_next;
int tx_bd_tail;
int rx_bd_ci;
+ int rx_bd_tail;
/* DMA channel control setup */
u32 tx_chnl_ctrl;
u32 rx_chnl_ctrl;
+ u8 coalesce_count_rx;
+
+ struct delayed_work restart_work;
};
/* Wrappers for temac_ior()/temac_iow() function pointers above */
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 6f11f52c9a9e..9461acec6f70 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -51,6 +51,7 @@
#include <linux/ip.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
+#include <linux/workqueue.h>
#include <linux/dma-mapping.h>
#include <linux/processor.h>
#include <linux/platform_data/xilinx-ll-temac.h>
@@ -367,6 +368,8 @@ static int temac_dma_bd_init(struct net_device *ndev)
skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
XTE_MAX_JUMBO_FRAME_SIZE,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, skb_dma_addr))
+ goto out;
lp->rx_bd_v[i].phys = cpu_to_be32(skb_dma_addr);
lp->rx_bd_v[i].len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
lp->rx_bd_v[i].app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
@@ -387,12 +390,13 @@ static int temac_dma_bd_init(struct net_device *ndev)
lp->tx_bd_next = 0;
lp->tx_bd_tail = 0;
lp->rx_bd_ci = 0;
+ lp->rx_bd_tail = RX_BD_NUM - 1;
/* Enable RX DMA transfers */
wmb();
lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p);
lp->dma_out(lp, RX_TAILDESC_PTR,
- lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
+ lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * lp->rx_bd_tail));
/* Prepare for TX DMA transfer */
lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
@@ -788,6 +792,9 @@ static void temac_start_xmit_done(struct net_device *ndev)
stat = be32_to_cpu(cur_p->app0);
}
+ /* Matches barrier in temac_start_xmit */
+ smp_mb();
+
netif_wake_queue(ndev);
}
@@ -830,9 +837,19 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
if (temac_check_tx_bd_space(lp, num_frag + 1)) {
- if (!netif_queue_stopped(ndev))
- netif_stop_queue(ndev);
- return NETDEV_TX_BUSY;
+ if (netif_queue_stopped(ndev))
+ return NETDEV_TX_BUSY;
+
+ netif_stop_queue(ndev);
+
+ /* Matches barrier in temac_start_xmit_done */
+ smp_mb();
+
+ /* Space might have just been freed - check again */
+ if (temac_check_tx_bd_space(lp, num_frag))
+ return NETDEV_TX_BUSY;
+
+ netif_wake_queue(ndev);
}
cur_p->app0 = 0;
@@ -850,12 +867,16 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
cur_p->len = cpu_to_be32(skb_headlen(skb));
+ if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent, skb_dma_addr))) {
+ dev_kfree_skb_any(skb);
+ ndev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
cur_p->phys = cpu_to_be32(skb_dma_addr);
ptr_to_txbd((void *)skb, cur_p);
for (ii = 0; ii < num_frag; ii++) {
- lp->tx_bd_tail++;
- if (lp->tx_bd_tail >= TX_BD_NUM)
+ if (++lp->tx_bd_tail >= TX_BD_NUM)
lp->tx_bd_tail = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
@@ -863,6 +884,27 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_frag_address(frag),
skb_frag_size(frag),
DMA_TO_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, skb_dma_addr)) {
+ if (--lp->tx_bd_tail < 0)
+ lp->tx_bd_tail = TX_BD_NUM - 1;
+ cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
+ while (--ii >= 0) {
+ --frag;
+ dma_unmap_single(ndev->dev.parent,
+ be32_to_cpu(cur_p->phys),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (--lp->tx_bd_tail < 0)
+ lp->tx_bd_tail = TX_BD_NUM - 1;
+ cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
+ }
+ dma_unmap_single(ndev->dev.parent,
+ be32_to_cpu(cur_p->phys),
+ skb_headlen(skb), DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ ndev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
cur_p->phys = cpu_to_be32(skb_dma_addr);
cur_p->len = cpu_to_be32(skb_frag_size(frag));
cur_p->app0 = 0;
@@ -884,31 +926,56 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_OK;
}
+static int ll_temac_recv_buffers_available(struct temac_local *lp)
+{
+ int available;
+
+ if (!lp->rx_skb[lp->rx_bd_ci])
+ return 0;
+ available = 1 + lp->rx_bd_tail - lp->rx_bd_ci;
+ if (available <= 0)
+ available += RX_BD_NUM;
+ return available;
+}
static void ll_temac_recv(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
- struct sk_buff *skb, *new_skb;
- unsigned int bdstat;
- struct cdmac_bd *cur_p;
- dma_addr_t tail_p, skb_dma_addr;
- int length;
unsigned long flags;
+ int rx_bd;
+ bool update_tail = false;
spin_lock_irqsave(&lp->rx_lock, flags);
- tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
- cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
-
- bdstat = be32_to_cpu(cur_p->app0);
- while ((bdstat & STS_CTRL_APP0_CMPLT)) {
+ /* Process all received buffers, passing them on network
+ * stack. After this, the buffer descriptors will be in an
+ * un-allocated stage, where no skb is allocated for it, and
+ * they are therefore not available for TEMAC/DMA.
+ */
+ do {
+ struct cdmac_bd *bd = &lp->rx_bd_v[lp->rx_bd_ci];
+ struct sk_buff *skb = lp->rx_skb[lp->rx_bd_ci];
+ unsigned int bdstat = be32_to_cpu(bd->app0);
+ int length;
+
+ /* While this should not normally happen, we can end
+ * here when GFP_ATOMIC allocations fail, and we
+ * therefore have un-allocated buffers.
+ */
+ if (!skb)
+ break;
- skb = lp->rx_skb[lp->rx_bd_ci];
- length = be32_to_cpu(cur_p->app4) & 0x3FFF;
+ /* Loop over all completed buffer descriptors */
+ if (!(bdstat & STS_CTRL_APP0_CMPLT))
+ break;
- dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys),
+ dma_unmap_single(ndev->dev.parent, be32_to_cpu(bd->phys),
XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
+ /* The buffer is not valid for DMA anymore */
+ bd->phys = 0;
+ bd->len = 0;
+ length = be32_to_cpu(bd->app4) & 0x3FFF;
skb_put(skb, length);
skb->protocol = eth_type_trans(skb, ndev);
skb_checksum_none_assert(skb);
@@ -923,43 +990,102 @@ static void ll_temac_recv(struct net_device *ndev)
* (back) for proper IP checksum byte order
* (be16).
*/
- skb->csum = htons(be32_to_cpu(cur_p->app3) & 0xFFFF);
+ skb->csum = htons(be32_to_cpu(bd->app3) & 0xFFFF);
skb->ip_summed = CHECKSUM_COMPLETE;
}
if (!skb_defer_rx_timestamp(skb))
netif_rx(skb);
+ /* The skb buffer is now owned by network stack above */
+ lp->rx_skb[lp->rx_bd_ci] = NULL;
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += length;
- new_skb = netdev_alloc_skb_ip_align(ndev,
- XTE_MAX_JUMBO_FRAME_SIZE);
- if (!new_skb) {
- spin_unlock_irqrestore(&lp->rx_lock, flags);
- return;
+ rx_bd = lp->rx_bd_ci;
+ if (++lp->rx_bd_ci >= RX_BD_NUM)
+ lp->rx_bd_ci = 0;
+ } while (rx_bd != lp->rx_bd_tail);
+
+ /* DMA operations will halt when the last buffer descriptor is
+ * processed (ie. the one pointed to by RX_TAILDESC_PTR).
+ * When that happens, no more interrupt events will be
+ * generated. No IRQ_COAL or IRQ_DLY, and not even an
+ * IRQ_ERR. To avoid stalling, we schedule a delayed work
+ * when there is a potential risk of that happening. The work
+ * will call this function, and thus re-schedule itself until
+ * enough buffers are available again.
+ */
+ if (ll_temac_recv_buffers_available(lp) < lp->coalesce_count_rx)
+ schedule_delayed_work(&lp->restart_work, HZ / 1000);
+
+ /* Allocate new buffers for those buffer descriptors that were
+ * passed to network stack. Note that GFP_ATOMIC allocations
+ * can fail (e.g. when a larger burst of GFP_ATOMIC
+ * allocations occurs), so while we try to allocate all
+ * buffers in the same interrupt where they were processed, we
+ * continue with what we could get in case of allocation
+ * failure. Allocation of remaining buffers will be retried
+ * in following calls.
+ */
+ while (1) {
+ struct sk_buff *skb;
+ struct cdmac_bd *bd;
+ dma_addr_t skb_dma_addr;
+
+ rx_bd = lp->rx_bd_tail + 1;
+ if (rx_bd >= RX_BD_NUM)
+ rx_bd = 0;
+ bd = &lp->rx_bd_v[rx_bd];
+
+ if (bd->phys)
+ break; /* All skb's allocated */
+
+ skb = netdev_alloc_skb_ip_align(ndev, XTE_MAX_JUMBO_FRAME_SIZE);
+ if (!skb) {
+ dev_warn(&ndev->dev, "skb alloc failed\n");
+ break;
}
- cur_p->app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
- skb_dma_addr = dma_map_single(ndev->dev.parent, new_skb->data,
+ skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
XTE_MAX_JUMBO_FRAME_SIZE,
DMA_FROM_DEVICE);
- cur_p->phys = cpu_to_be32(skb_dma_addr);
- cur_p->len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
- lp->rx_skb[lp->rx_bd_ci] = new_skb;
+ if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent,
+ skb_dma_addr))) {
+ dev_kfree_skb_any(skb);
+ break;
+ }
- lp->rx_bd_ci++;
- if (lp->rx_bd_ci >= RX_BD_NUM)
- lp->rx_bd_ci = 0;
+ bd->phys = cpu_to_be32(skb_dma_addr);
+ bd->len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
+ bd->app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
+ lp->rx_skb[rx_bd] = skb;
- cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
- bdstat = be32_to_cpu(cur_p->app0);
+ lp->rx_bd_tail = rx_bd;
+ update_tail = true;
+ }
+
+ /* Move tail pointer when buffers have been allocated */
+ if (update_tail) {
+ lp->dma_out(lp, RX_TAILDESC_PTR,
+ lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_tail);
}
- lp->dma_out(lp, RX_TAILDESC_PTR, tail_p);
spin_unlock_irqrestore(&lp->rx_lock, flags);
}
+/* Function scheduled to ensure a restart in case of DMA halt
+ * condition caused by running out of buffer descriptors.
+ */
+static void ll_temac_restart_work_func(struct work_struct *work)
+{
+ struct temac_local *lp = container_of(work, struct temac_local,
+ restart_work.work);
+ struct net_device *ndev = lp->ndev;
+
+ ll_temac_recv(ndev);
+}
+
static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev)
{
struct net_device *ndev = _ndev;
@@ -1052,6 +1178,8 @@ static int temac_stop(struct net_device *ndev)
dev_dbg(&ndev->dev, "temac_close()\n");
+ cancel_delayed_work_sync(&lp->restart_work);
+
free_irq(lp->tx_irq, ndev);
free_irq(lp->rx_irq, ndev);
@@ -1173,6 +1301,7 @@ static int temac_probe(struct platform_device *pdev)
lp->dev = &pdev->dev;
lp->options = XTE_OPTION_DEFAULTS;
spin_lock_init(&lp->rx_lock);
+ INIT_DELAYED_WORK(&lp->restart_work, ll_temac_restart_work_func);
/* Setup mutex for synchronization of indirect register access */
if (pdata) {
@@ -1279,6 +1408,7 @@ static int temac_probe(struct platform_device *pdev)
*/
lp->tx_chnl_ctrl = 0x10220000;
lp->rx_chnl_ctrl = 0xff070000;
+ lp->coalesce_count_rx = 0x07;
/* Finished with the DMA node; drop the reference */
of_node_put(dma_np);
@@ -1310,11 +1440,14 @@ static int temac_probe(struct platform_device *pdev)
(pdata->tx_irq_count << 16);
else
lp->tx_chnl_ctrl = 0x10220000;
- if (pdata->rx_irq_timeout || pdata->rx_irq_count)
+ if (pdata->rx_irq_timeout || pdata->rx_irq_count) {
lp->rx_chnl_ctrl = (pdata->rx_irq_timeout << 24) |
(pdata->rx_irq_count << 16);
- else
+ lp->coalesce_count_rx = pdata->rx_irq_count;
+ } else {
lp->rx_chnl_ctrl = 0xff070000;
+ lp->coalesce_count_rx = 0x07;
+ }
}
/* Error handle returned DMA RX and TX interrupts */