aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/macsec.c
diff options
context:
space:
mode:
authorEmeel Hakim <ehakim@nvidia.com>2022-09-21 11:10:45 -0700
committerJakub Kicinski <kuba@kernel.org>2022-09-22 18:01:31 -0700
commit0a6e9b718dbbdeb6e9f56f2f79e789f6833ea804 (patch)
tree6a631e777952ec50d52e491b8bbf586903ed83a4 /drivers/net/macsec.c
parentMerge branch 'cleanup-in-huawei-hinic-driver' (diff)
downloadlinux-dev-0a6e9b718dbbdeb6e9f56f2f79e789f6833ea804.tar.xz
linux-dev-0a6e9b718dbbdeb6e9f56f2f79e789f6833ea804.zip
net: macsec: Expose extended packet number (EPN) properties to macsec offload
Currently macsec invokes HW offload path before reading extended packet number (EPN) related user properties i.e. salt and short secure channel identifier (ssci), hence preventing macsec EPN HW offload. Expose those by moving macsec EPN properties reading prior to HW offload path. Signed-off-by: Emeel Hakim <ehakim@nvidia.com> Reviewed-by: Tariq Toukan <tariqt@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers/net/macsec.c')
-rw-r--r--drivers/net/macsec.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 830fed3914b6..617f850bdb3a 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -1828,6 +1828,12 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
rx_sa->sc = rx_sc;
+ if (secy->xpn) {
+ rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
+ nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
+ MACSEC_SALT_LEN);
+ }
+
/* If h/w offloading is available, propagate to the device */
if (macsec_is_offloaded(netdev_priv(dev))) {
const struct macsec_ops *ops;
@@ -1850,12 +1856,6 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
goto cleanup;
}
- if (secy->xpn) {
- rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
- nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
- MACSEC_SALT_LEN);
- }
-
nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
@@ -2070,6 +2070,12 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
if (assoc_num == tx_sc->encoding_sa && tx_sa->active)
secy->operational = true;
+ if (secy->xpn) {
+ tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
+ nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
+ MACSEC_SALT_LEN);
+ }
+
/* If h/w offloading is available, propagate to the device */
if (macsec_is_offloaded(netdev_priv(dev))) {
const struct macsec_ops *ops;
@@ -2092,12 +2098,6 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
goto cleanup;
}
- if (secy->xpn) {
- tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
- nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
- MACSEC_SALT_LEN);
- }
-
nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa);