aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/net/ethtool/phy.c
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2025-03-05 08:37:28 -0800
committerJakub Kicinski <kuba@kernel.org>2025-03-06 12:59:44 -0800
commit2bcf4772e45adb00649a4e9cbff14b08a144f9e3 (patch)
tree3f9472a249e9d7ee3141be0bba0b8c53bf3a6b82 /net/ethtool/phy.c
parentnet: hold netdev instance lock during ndo_bpf (diff)
downloadwireguard-linux-2bcf4772e45adb00649a4e9cbff14b08a144f9e3.tar.xz
wireguard-linux-2bcf4772e45adb00649a4e9cbff14b08a144f9e3.zip
net: ethtool: try to protect all callback with netdev instance lock
Protect all ethtool callbacks and PHY related state with the netdev instance lock, for drivers which want / need to have their ops instance-locked. Basically take the lock everywhere we take rtnl_lock. It was tempting to take the lock in ethnl_ops_begin(), but turns out we actually nest those calls (when generating notifications). Tested-by: Maxime Chevallier <maxime.chevallier@bootlin.com> Cc: Saeed Mahameed <saeed@kernel.org> Signed-off-by: Stanislav Fomichev <sdf@fomichev.me> Link: https://patch.msgid.link/20250305163732.2766420-11-sdf@fomichev.me Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/ethtool/phy.c')
-rw-r--r--net/ethtool/phy.c20
1 files changed, 14 insertions, 6 deletions
diff --git a/net/ethtool/phy.c b/net/ethtool/phy.c
index ed8f690f6bac..2b428bc80c9b 100644
--- a/net/ethtool/phy.c
+++ b/net/ethtool/phy.c
@@ -158,18 +158,19 @@ int ethnl_phy_doit(struct sk_buff *skb, struct genl_info *info)
return ret;
rtnl_lock();
+ netdev_lock_ops(req_info.base.dev);
ret = ethnl_phy_parse_request(&req_info.base, tb, info->extack);
if (ret < 0)
- goto err_unlock_rtnl;
+ goto err_unlock;
/* No PHY, return early */
if (!req_info.pdn)
- goto err_unlock_rtnl;
+ goto err_unlock;
ret = ethnl_phy_reply_size(&req_info.base, info->extack);
if (ret < 0)
- goto err_unlock_rtnl;
+ goto err_unlock;
reply_len = ret + ethnl_reply_header_size();
rskb = ethnl_reply_init(reply_len, req_info.base.dev,
@@ -178,13 +179,14 @@ int ethnl_phy_doit(struct sk_buff *skb, struct genl_info *info)
info, &reply_payload);
if (!rskb) {
ret = -ENOMEM;
- goto err_unlock_rtnl;
+ goto err_unlock;
}
ret = ethnl_phy_fill_reply(&req_info.base, rskb);
if (ret)
goto err_free_msg;
+ netdev_unlock_ops(req_info.base.dev);
rtnl_unlock();
ethnl_parse_header_dev_put(&req_info.base);
genlmsg_end(rskb, reply_payload);
@@ -193,7 +195,8 @@ int ethnl_phy_doit(struct sk_buff *skb, struct genl_info *info)
err_free_msg:
nlmsg_free(rskb);
-err_unlock_rtnl:
+err_unlock:
+ netdev_unlock_ops(req_info.base.dev);
rtnl_unlock();
ethnl_parse_header_dev_put(&req_info.base);
return ret;
@@ -290,10 +293,15 @@ int ethnl_phy_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
rtnl_lock();
if (ctx->phy_req_info->base.dev) {
- ret = ethnl_phy_dump_one_dev(skb, ctx->phy_req_info->base.dev, cb);
+ dev = ctx->phy_req_info->base.dev;
+ netdev_lock_ops(dev);
+ ret = ethnl_phy_dump_one_dev(skb, dev, cb);
+ netdev_unlock_ops(dev);
} else {
for_each_netdev_dump(net, dev, ctx->ifindex) {
+ netdev_lock_ops(dev);
ret = ethnl_phy_dump_one_dev(skb, dev, cb);
+ netdev_unlock_ops(dev);
if (ret)
break;