aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorSabrina Dubroca <sd@queasysnail.net>2025-08-29 20:55:40 +0200
committerJakub Kicinski <kuba@kernel.org>2025-09-01 13:31:33 -0700
commit030e1c45666629f72d0fc1d040f9d2915680de8e (patch)
tree39a9e738d8a8c460f9d8d3be7ee114dee370a5f0
parentnet: macb: Fix tx_ptr_lock locking (diff)
downloadwireguard-linux-030e1c45666629f72d0fc1d040f9d2915680de8e.tar.xz
wireguard-linux-030e1c45666629f72d0fc1d040f9d2915680de8e.zip
macsec: read MACSEC_SA_ATTR_PN with nla_get_uint
The code currently reads both U32 attributes and U64 attributes as U64, so when a U32 attribute is provided by userspace (ie, when not using XPN), on big endian systems, we'll load that value into the upper 32bits of the next_pn field instead of the lower 32bits. This means that the value that userspace provided is ignored (we only care about the lower 32bits for non-XPN), and we'll start using PNs from 0. Switch to nla_get_uint, which will read the value correctly on all arches, whether it's 32b or 64b. Fixes: 48ef50fa866a ("macsec: Netlink support of XPN cipher suites (IEEE 802.1AEbw)") Signed-off-by: Sabrina Dubroca <sd@queasysnail.net> Reviewed-by: Simon Horman <horms@kernel.org> Link: https://patch.msgid.link/1c1df1661b89238caf5beefb84a10ebfd56c66ea.1756459839.git.sd@queasysnail.net Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-rw-r--r--drivers/net/macsec.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 4c75d1fea552..01329fe7451a 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -1844,7 +1844,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
if (tb_sa[MACSEC_SA_ATTR_PN]) {
spin_lock_bh(&rx_sa->lock);
- rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
+ rx_sa->next_pn = nla_get_uint(tb_sa[MACSEC_SA_ATTR_PN]);
spin_unlock_bh(&rx_sa->lock);
}
@@ -2086,7 +2086,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
}
spin_lock_bh(&tx_sa->lock);
- tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
+ tx_sa->next_pn = nla_get_uint(tb_sa[MACSEC_SA_ATTR_PN]);
spin_unlock_bh(&tx_sa->lock);
if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
@@ -2398,7 +2398,7 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
spin_lock_bh(&tx_sa->lock);
prev_pn = tx_sa->next_pn_halves;
- tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
+ tx_sa->next_pn = nla_get_uint(tb_sa[MACSEC_SA_ATTR_PN]);
spin_unlock_bh(&tx_sa->lock);
}
@@ -2496,7 +2496,7 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
spin_lock_bh(&rx_sa->lock);
prev_pn = rx_sa->next_pn_halves;
- rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
+ rx_sa->next_pn = nla_get_uint(tb_sa[MACSEC_SA_ATTR_PN]);
spin_unlock_bh(&rx_sa->lock);
}