aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/6lowpan/nhc.c103
-rw-r--r--net/6lowpan/nhc.h38
-rw-r--r--net/6lowpan/nhc_dest.c9
-rw-r--r--net/6lowpan/nhc_fragment.c9
-rw-r--r--net/6lowpan/nhc_ghc_ext_dest.c9
-rw-r--r--net/6lowpan/nhc_ghc_ext_frag.c11
-rw-r--r--net/6lowpan/nhc_ghc_ext_hop.c9
-rw-r--r--net/6lowpan/nhc_ghc_ext_route.c9
-rw-r--r--net/6lowpan/nhc_ghc_icmpv6.c9
-rw-r--r--net/6lowpan/nhc_ghc_udp.c9
-rw-r--r--net/6lowpan/nhc_hop.c9
-rw-r--r--net/6lowpan/nhc_ipv6.c11
-rw-r--r--net/6lowpan/nhc_mobility.c9
-rw-r--r--net/6lowpan/nhc_routing.c9
-rw-r--r--net/6lowpan/nhc_udp.c9
-rw-r--r--net/8021q/vlan_core.c6
-rw-r--r--net/8021q/vlan_dev.c22
-rw-r--r--net/ax25/af_ax25.c7
-rw-r--r--net/ax25/ax25_dev.c9
-rw-r--r--net/bpf/test_run.c6
-rw-r--r--net/bridge/br_if.c10
-rw-r--r--net/bridge/br_mdb.c15
-rw-r--r--net/bridge/br_netlink.c8
-rw-r--r--net/bridge/br_vlan.c36
-rw-r--r--net/bridge/netfilter/nft_meta_bridge.c2
-rw-r--r--net/can/Kconfig5
-rw-r--r--net/compat.c1
-rw-r--r--net/core/.gitignore1
-rw-r--r--net/core/Makefile23
-rw-r--r--net/core/datagram.c17
-rw-r--r--net/core/dev.c48
-rw-r--r--net/core/dev_ioctl.c4
-rw-r--r--net/core/devlink.c927
-rw-r--r--net/core/drop_monitor.c36
-rw-r--r--net/core/dst.c8
-rw-r--r--net/core/failover.c4
-rw-r--r--net/core/filter.c195
-rw-r--r--net/core/flow_offload.c7
-rw-r--r--net/core/link_watch.c2
-rw-r--r--net/core/neighbour.c50
-rw-r--r--net/core/net-sysfs.c8
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/core/page_pool.c3
-rw-r--r--net/core/pktgen.c6
-rw-r--r--net/core/skbuff.c68
-rw-r--r--net/core/skmsg.c49
-rw-r--r--net/core/sock.c34
-rw-r--r--net/core/sock_map.c23
-rw-r--r--net/core/stream.c6
-rw-r--r--net/decnet/af_decnet.c4
-rw-r--r--net/decnet/dn_neigh.c1
-rw-r--r--net/dsa/Kconfig11
-rw-r--r--net/dsa/Makefile1
-rw-r--r--net/dsa/slave.c37
-rw-r--r--net/dsa/tag_ksz.c59
-rw-r--r--net/dsa/tag_rzn1_a5psw.c113
-rw-r--r--net/ethtool/ioctl.c21
-rw-r--r--net/ethtool/netlink.c6
-rw-r--r--net/ethtool/netlink.h2
-rw-r--r--net/ipv4/af_inet.c13
-rw-r--r--net/ipv4/arp.c25
-rw-r--r--net/ipv4/bpf_tcp_ca.c39
-rw-r--r--net/ipv4/devinet.c4
-rw-r--r--net/ipv4/esp4.c4
-rw-r--r--net/ipv4/fib_semantics.c11
-rw-r--r--net/ipv4/ip_output.c60
-rw-r--r--net/ipv4/ip_tunnel.c21
-rw-r--r--net/ipv4/ipconfig.c8
-rw-r--r--net/ipv4/ipmr.c217
-rw-r--r--net/ipv4/ipmr_base.c53
-rw-r--r--net/ipv4/netfilter/nf_nat_h323.c42
-rw-r--r--net/ipv4/ping.c36
-rw-r--r--net/ipv4/raw.c172
-rw-r--r--net/ipv4/raw_diag.c57
-rw-r--r--net/ipv4/route.c41
-rw-r--r--net/ipv4/tcp.c144
-rw-r--r--net/ipv4/tcp_bpf.c1
-rw-r--r--net/ipv4/tcp_input.c9
-rw-r--r--net/ipv4/tcp_ipv4.c5
-rw-r--r--net/ipv4/tcp_output.c32
-rw-r--r--net/ipv4/tcp_timer.c19
-rw-r--r--net/ipv4/udp.c25
-rw-r--r--net/ipv4/udplite.c3
-rw-r--r--net/ipv4/xfrm4_policy.c2
-rw-r--r--net/ipv6/addrconf.c68
-rw-r--r--net/ipv6/addrconf_core.c2
-rw-r--r--net/ipv6/af_inet6.c6
-rw-r--r--net/ipv6/ip6_gre.c51
-rw-r--r--net/ipv6/ip6_output.c49
-rw-r--r--net/ipv6/ip6_tunnel.c22
-rw-r--r--net/ipv6/ip6_vti.c4
-rw-r--r--net/ipv6/ip6mr.c297
-rw-r--r--net/ipv6/ndisc.c30
-rw-r--r--net/ipv6/raw.c120
-rw-r--r--net/ipv6/route.c12
-rw-r--r--net/ipv6/sit.c12
-rw-r--r--net/ipv6/tcp_ipv6.c8
-rw-r--r--net/ipv6/udp.c3
-rw-r--r--net/ipv6/udplite.c3
-rw-r--r--net/ipv6/xfrm6_policy.c4
-rw-r--r--net/iucv/af_iucv.c2
-rw-r--r--net/l2tp/l2tp_debugfs.c6
-rw-r--r--net/l2tp/l2tp_ppp.c2
-rw-r--r--net/llc/af_llc.c2
-rw-r--r--net/mac80211/agg-rx.c4
-rw-r--r--net/mac80211/agg-tx.c2
-rw-r--r--net/mac80211/airtime.c4
-rw-r--r--net/mac80211/cfg.c621
-rw-r--r--net/mac80211/chan.c660
-rw-r--r--net/mac80211/debug.h14
-rw-r--r--net/mac80211/debugfs.c101
-rw-r--r--net/mac80211/debugfs_key.c10
-rw-r--r--net/mac80211/debugfs_netdev.c52
-rw-r--r--net/mac80211/debugfs_sta.c24
-rw-r--r--net/mac80211/driver-ops.h102
-rw-r--r--net/mac80211/eht.c9
-rw-r--r--net/mac80211/ethtool.c26
-rw-r--r--net/mac80211/he.c17
-rw-r--r--net/mac80211/ht.c41
-rw-r--r--net/mac80211/ibss.c65
-rw-r--r--net/mac80211/ieee80211_i.h489
-rw-r--r--net/mac80211/iface.c254
-rw-r--r--net/mac80211/key.c78
-rw-r--r--net/mac80211/key.h9
-rw-r--r--net/mac80211/main.c227
-rw-r--r--net/mac80211/mesh.c20
-rw-r--r--net/mac80211/mesh_hwmp.c6
-rw-r--r--net/mac80211/mesh_plink.c19
-rw-r--r--net/mac80211/mlme.c440
-rw-r--r--net/mac80211/ocb.c15
-rw-r--r--net/mac80211/offchannel.c22
-rw-r--r--net/mac80211/rate.c19
-rw-r--r--net/mac80211/rate.h8
-rw-r--r--net/mac80211/rx.c98
-rw-r--r--net/mac80211/scan.c2
-rw-r--r--net/mac80211/sta_info.c391
-rw-r--r--net/mac80211/sta_info.h46
-rw-r--r--net/mac80211/status.c43
-rw-r--r--net/mac80211/tdls.c31
-rw-r--r--net/mac80211/trace.h1156
-rw-r--r--net/mac80211/tx.c743
-rw-r--r--net/mac80211/util.c152
-rw-r--r--net/mac80211/vht.c177
-rw-r--r--net/mac80211/wme.c3
-rw-r--r--net/mac80211/wpa.c133
-rw-r--r--net/mac80211/wpa.h5
-rw-r--r--net/mptcp/pm_netlink.c131
-rw-r--r--net/mptcp/protocol.c60
-rw-r--r--net/mptcp/protocol.h3
-rw-r--r--net/mptcp/subflow.c2
-rw-r--r--net/netfilter/Kconfig9
-rw-r--r--net/netfilter/Makefile1
-rw-r--r--net/netfilter/ipvs/ip_vs_mh.c5
-rw-r--r--net/netfilter/nf_conntrack_broadcast.c6
-rw-r--r--net/netfilter/nf_conntrack_core.c8
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c260
-rw-r--r--net/netfilter/nf_conntrack_helper.c4
-rw-r--r--net/netfilter/nf_conntrack_netlink.c9
-rw-r--r--net/netfilter/nf_conntrack_pptp.c2
-rw-r--r--net/netfilter/nf_conntrack_sip.c9
-rw-r--r--net/netfilter/nf_conntrack_timeout.c18
-rw-r--r--net/netfilter/nf_flow_table_core.c73
-rw-r--r--net/netfilter/nf_flow_table_offload.c17
-rw-r--r--net/netfilter/nf_flow_table_procfs.c80
-rw-r--r--net/netfilter/nfnetlink.c2
-rw-r--r--net/netfilter/nfnetlink_cthelper.c10
-rw-r--r--net/netfilter/nft_byteorder.c3
-rw-r--r--net/netfilter/nft_cmp.c18
-rw-r--r--net/netfilter/nft_ct.c4
-rw-r--r--net/netfilter/nft_exthdr.c10
-rw-r--r--net/netfilter/nft_osf.c2
-rw-r--r--net/netfilter/nft_set_bitmap.c4
-rw-r--r--net/netfilter/nft_socket.c8
-rw-r--r--net/netfilter/nft_tproxy.c6
-rw-r--r--net/netfilter/nft_tunnel.c3
-rw-r--r--net/netfilter/nft_xfrm.c8
-rw-r--r--net/netfilter/xt_CT.c23
-rw-r--r--net/netfilter/xt_DSCP.c8
-rw-r--r--net/netfilter/xt_TCPMSS.c4
-rw-r--r--net/netfilter/xt_TPROXY.c25
-rw-r--r--net/netfilter/xt_connlimit.c6
-rw-r--r--net/openvswitch/vport-netdev.c6
-rw-r--r--net/packet/af_packet.c12
-rw-r--r--net/rxrpc/rxkad.c2
-rw-r--r--net/sched/act_ct.c5
-rw-r--r--net/sched/act_mirred.c6
-rw-r--r--net/sched/cls_api.c5
-rw-r--r--net/sched/cls_flower.c8
-rw-r--r--net/sched/sch_api.c2
-rw-r--r--net/sched/sch_cbq.c79
-rw-r--r--net/sched/sch_generic.c11
-rw-r--r--net/sched/sch_taprio.c5
-rw-r--r--net/sctp/protocol.c4
-rw-r--r--net/sctp/sm_statefuns.c2
-rw-r--r--net/sctp/socket.c12
-rw-r--r--net/sctp/stream_interleave.c2
-rw-r--r--net/sctp/ulpqueue.c4
-rw-r--r--net/smc/af_smc.c68
-rw-r--r--net/smc/smc_clc.c8
-rw-r--r--net/smc/smc_clc.h2
-rw-r--r--net/smc/smc_core.c246
-rw-r--r--net/smc/smc_core.h20
-rw-r--r--net/smc/smc_ib.c44
-rw-r--r--net/smc/smc_ib.h2
-rw-r--r--net/smc/smc_llc.c33
-rw-r--r--net/smc/smc_pnet.c7
-rw-r--r--net/smc/smc_rx.c92
-rw-r--r--net/smc/smc_sysctl.c11
-rw-r--r--net/smc/smc_tx.c10
-rw-r--r--net/socket.c17
-rw-r--r--net/strparser/strparser.c3
-rw-r--r--net/switchdev/switchdev.c4
-rw-r--r--net/tipc/bearer.c4
-rw-r--r--net/tipc/name_table.c11
-rw-r--r--net/tipc/name_table.h1
-rw-r--r--net/tls/Makefile2
-rw-r--r--net/tls/tls.h298
-rw-r--r--net/tls/tls_device.c28
-rw-r--r--net/tls/tls_device_fallback.c8
-rw-r--r--net/tls/tls_main.c97
-rw-r--r--net/tls/tls_proc.c4
-rw-r--r--net/tls/tls_strp.c17
-rw-r--r--net/tls/tls_sw.c668
-rw-r--r--net/tls/tls_toe.c2
-rw-r--r--net/unix/af_unix.c294
-rw-r--r--net/unix/diag.c49
-rw-r--r--net/unix/sysctl_net_unix.c19
-rw-r--r--net/wireless/ap.c46
-rw-r--r--net/wireless/chan.c206
-rw-r--r--net/wireless/core.c34
-rw-r--r--net/wireless/core.h24
-rw-r--r--net/wireless/ibss.c57
-rw-r--r--net/wireless/mesh.c31
-rw-r--r--net/wireless/mlme.c163
-rw-r--r--net/wireless/nl80211.c1022
-rw-r--r--net/wireless/ocb.c5
-rw-r--r--net/wireless/rdev-ops.h58
-rw-r--r--net/wireless/reg.c139
-rw-r--r--net/wireless/scan.c8
-rw-r--r--net/wireless/sme.c512
-rw-r--r--net/wireless/trace.h378
-rw-r--r--net/wireless/util.c44
-rw-r--r--net/wireless/wext-compat.c48
-rw-r--r--net/wireless/wext-sme.c29
-rw-r--r--net/xdp/xdp_umem.c6
-rw-r--r--net/xfrm/xfrm_device.c2
246 files changed, 9598 insertions, 6538 deletions
diff --git a/net/6lowpan/nhc.c b/net/6lowpan/nhc.c
index d6bbbd4ab38b..7b374595328d 100644
--- a/net/6lowpan/nhc.c
+++ b/net/6lowpan/nhc.c
@@ -12,77 +12,26 @@
#include "nhc.h"
-static struct rb_root rb_root = RB_ROOT;
-static struct lowpan_nhc *lowpan_nexthdr_nhcs[NEXTHDR_MAX + 1];
+static const struct lowpan_nhc *lowpan_nexthdr_nhcs[NEXTHDR_MAX + 1];
static DEFINE_SPINLOCK(lowpan_nhc_lock);
-static int lowpan_nhc_insert(struct lowpan_nhc *nhc)
+static const struct lowpan_nhc *lowpan_nhc_by_nhcid(struct sk_buff *skb)
{
- struct rb_node **new = &rb_root.rb_node, *parent = NULL;
-
- /* Figure out where to put new node */
- while (*new) {
- struct lowpan_nhc *this = rb_entry(*new, struct lowpan_nhc,
- node);
- int result, len_dif, len;
-
- len_dif = nhc->idlen - this->idlen;
-
- if (nhc->idlen < this->idlen)
- len = nhc->idlen;
- else
- len = this->idlen;
-
- result = memcmp(nhc->id, this->id, len);
- if (!result)
- result = len_dif;
-
- parent = *new;
- if (result < 0)
- new = &((*new)->rb_left);
- else if (result > 0)
- new = &((*new)->rb_right);
- else
- return -EEXIST;
- }
+ const struct lowpan_nhc *nhc;
+ int i;
+ u8 id;
- /* Add new node and rebalance tree. */
- rb_link_node(&nhc->node, parent, new);
- rb_insert_color(&nhc->node, &rb_root);
+ if (!pskb_may_pull(skb, 1))
+ return NULL;
- return 0;
-}
+ id = *skb->data;
-static void lowpan_nhc_remove(struct lowpan_nhc *nhc)
-{
- rb_erase(&nhc->node, &rb_root);
-}
+ for (i = 0; i < NEXTHDR_MAX + 1; i++) {
+ nhc = lowpan_nexthdr_nhcs[i];
+ if (!nhc)
+ continue;
-static struct lowpan_nhc *lowpan_nhc_by_nhcid(const struct sk_buff *skb)
-{
- struct rb_node *node = rb_root.rb_node;
- const u8 *nhcid_skb_ptr = skb->data;
-
- while (node) {
- struct lowpan_nhc *nhc = rb_entry(node, struct lowpan_nhc,
- node);
- u8 nhcid_skb_ptr_masked[LOWPAN_NHC_MAX_ID_LEN];
- int result, i;
-
- if (nhcid_skb_ptr + nhc->idlen > skb->data + skb->len)
- return NULL;
-
- /* copy and mask afterwards the nhid value from skb */
- memcpy(nhcid_skb_ptr_masked, nhcid_skb_ptr, nhc->idlen);
- for (i = 0; i < nhc->idlen; i++)
- nhcid_skb_ptr_masked[i] &= nhc->idmask[i];
-
- result = memcmp(nhcid_skb_ptr_masked, nhc->id, nhc->idlen);
- if (result < 0)
- node = node->rb_left;
- else if (result > 0)
- node = node->rb_right;
- else
+ if ((id & nhc->idmask) == nhc->id)
return nhc;
}
@@ -92,7 +41,7 @@ static struct lowpan_nhc *lowpan_nhc_by_nhcid(const struct sk_buff *skb)
int lowpan_nhc_check_compression(struct sk_buff *skb,
const struct ipv6hdr *hdr, u8 **hc_ptr)
{
- struct lowpan_nhc *nhc;
+ const struct lowpan_nhc *nhc;
int ret = 0;
spin_lock_bh(&lowpan_nhc_lock);
@@ -110,7 +59,7 @@ int lowpan_nhc_do_compression(struct sk_buff *skb, const struct ipv6hdr *hdr,
u8 **hc_ptr)
{
int ret;
- struct lowpan_nhc *nhc;
+ const struct lowpan_nhc *nhc;
spin_lock_bh(&lowpan_nhc_lock);
@@ -153,7 +102,7 @@ int lowpan_nhc_do_uncompression(struct sk_buff *skb,
const struct net_device *dev,
struct ipv6hdr *hdr)
{
- struct lowpan_nhc *nhc;
+ const struct lowpan_nhc *nhc;
int ret;
spin_lock_bh(&lowpan_nhc_lock);
@@ -189,18 +138,9 @@ int lowpan_nhc_do_uncompression(struct sk_buff *skb,
return 0;
}
-int lowpan_nhc_add(struct lowpan_nhc *nhc)
+int lowpan_nhc_add(const struct lowpan_nhc *nhc)
{
- int ret;
-
- if (!nhc->idlen || !nhc->idsetup)
- return -EINVAL;
-
- WARN_ONCE(nhc->idlen > LOWPAN_NHC_MAX_ID_LEN,
- "LOWPAN_NHC_MAX_ID_LEN should be updated to %zd.\n",
- nhc->idlen);
-
- nhc->idsetup(nhc);
+ int ret = 0;
spin_lock_bh(&lowpan_nhc_lock);
@@ -209,10 +149,6 @@ int lowpan_nhc_add(struct lowpan_nhc *nhc)
goto out;
}
- ret = lowpan_nhc_insert(nhc);
- if (ret < 0)
- goto out;
-
lowpan_nexthdr_nhcs[nhc->nexthdr] = nhc;
out:
spin_unlock_bh(&lowpan_nhc_lock);
@@ -220,11 +156,10 @@ out:
}
EXPORT_SYMBOL(lowpan_nhc_add);
-void lowpan_nhc_del(struct lowpan_nhc *nhc)
+void lowpan_nhc_del(const struct lowpan_nhc *nhc)
{
spin_lock_bh(&lowpan_nhc_lock);
- lowpan_nhc_remove(nhc);
lowpan_nexthdr_nhcs[nhc->nexthdr] = NULL;
spin_unlock_bh(&lowpan_nhc_lock);
diff --git a/net/6lowpan/nhc.h b/net/6lowpan/nhc.h
index 67951c40734b..ab7b4977c32b 100644
--- a/net/6lowpan/nhc.h
+++ b/net/6lowpan/nhc.h
@@ -16,24 +16,20 @@
* @_name: const char * of common header compression name.
* @_nexthdr: ipv6 nexthdr field for the header compression.
* @_nexthdrlen: ipv6 nexthdr len for the reserved space.
- * @_idsetup: callback to setup id and mask values.
- * @_idlen: len for the next header id and mask, should be always the same.
+ * @_id: one byte nhc id value.
+ * @_idmask: one byte nhc id mask value.
* @_uncompress: callback for uncompression call.
* @_compress: callback for compression call.
*/
#define LOWPAN_NHC(__nhc, _name, _nexthdr, \
- _hdrlen, _idsetup, _idlen, \
+ _hdrlen, _id, _idmask, \
_uncompress, _compress) \
-static u8 __nhc##_val[_idlen]; \
-static u8 __nhc##_mask[_idlen]; \
-static struct lowpan_nhc __nhc = { \
+static const struct lowpan_nhc __nhc = { \
.name = _name, \
.nexthdr = _nexthdr, \
.nexthdrlen = _hdrlen, \
- .id = __nhc##_val, \
- .idmask = __nhc##_mask, \
- .idlen = _idlen, \
- .idsetup = _idsetup, \
+ .id = _id, \
+ .idmask = _idmask, \
.uncompress = _uncompress, \
.compress = _compress, \
}
@@ -53,27 +49,21 @@ module_exit(__nhc##_exit);
/**
* struct lowpan_nhc - hold 6lowpan next hdr compression ifnformation
*
- * @node: holder for the rbtree.
* @name: name of the specific next header compression
* @nexthdr: next header value of the protocol which should be compressed.
* @nexthdrlen: ipv6 nexthdr len for the reserved space.
- * @id: array for nhc id. Note this need to be in network byteorder.
- * @mask: array for nhc id mask. Note this need to be in network byteorder.
- * @len: the length of the next header id and mask.
- * @setup: callback to setup fill the next header id value and mask.
+ * @id: one byte nhc id value.
+ * @idmask: one byte nhc id mask value.
* @compress: callback to do the header compression.
* @uncompress: callback to do the header uncompression.
*/
struct lowpan_nhc {
- struct rb_node node;
const char *name;
- const u8 nexthdr;
- const size_t nexthdrlen;
- u8 *id;
- u8 *idmask;
- const size_t idlen;
+ u8 nexthdr;
+ size_t nexthdrlen;
+ u8 id;
+ u8 idmask;
- void (*idsetup)(struct lowpan_nhc *nhc);
int (*uncompress)(struct sk_buff *skb, size_t needed);
int (*compress)(struct sk_buff *skb, u8 **hc_ptr);
};
@@ -126,14 +116,14 @@ int lowpan_nhc_do_uncompression(struct sk_buff *skb,
*
* @nhc: nhc which should be add.
*/
-int lowpan_nhc_add(struct lowpan_nhc *nhc);
+int lowpan_nhc_add(const struct lowpan_nhc *nhc);
/**
* lowpan_nhc_del - delete a next header compression from framework
*
* @nhc: nhc which should be delete.
*/
-void lowpan_nhc_del(struct lowpan_nhc *nhc);
+void lowpan_nhc_del(const struct lowpan_nhc *nhc);
/**
* lowpan_nhc_init - adding all default nhcs
diff --git a/net/6lowpan/nhc_dest.c b/net/6lowpan/nhc_dest.c
index 4768a9459212..0cbcc7806469 100644
--- a/net/6lowpan/nhc_dest.c
+++ b/net/6lowpan/nhc_dest.c
@@ -6,18 +6,11 @@
#include "nhc.h"
-#define LOWPAN_NHC_DEST_IDLEN 1
#define LOWPAN_NHC_DEST_ID_0 0xe6
#define LOWPAN_NHC_DEST_MASK_0 0xfe
-static void dest_nhid_setup(struct lowpan_nhc *nhc)
-{
- nhc->id[0] = LOWPAN_NHC_DEST_ID_0;
- nhc->idmask[0] = LOWPAN_NHC_DEST_MASK_0;
-}
-
LOWPAN_NHC(nhc_dest, "RFC6282 Destination Options", NEXTHDR_DEST, 0,
- dest_nhid_setup, LOWPAN_NHC_DEST_IDLEN, NULL, NULL);
+ LOWPAN_NHC_DEST_ID_0, LOWPAN_NHC_DEST_MASK_0, NULL, NULL);
module_lowpan_nhc(nhc_dest);
MODULE_DESCRIPTION("6LoWPAN next header RFC6282 Destination Options compression");
diff --git a/net/6lowpan/nhc_fragment.c b/net/6lowpan/nhc_fragment.c
index be85f07715bd..9414552df0ac 100644
--- a/net/6lowpan/nhc_fragment.c
+++ b/net/6lowpan/nhc_fragment.c
@@ -5,18 +5,11 @@
#include "nhc.h"
-#define LOWPAN_NHC_FRAGMENT_IDLEN 1
#define LOWPAN_NHC_FRAGMENT_ID_0 0xe4
#define LOWPAN_NHC_FRAGMENT_MASK_0 0xfe
-static void fragment_nhid_setup(struct lowpan_nhc *nhc)
-{
- nhc->id[0] = LOWPAN_NHC_FRAGMENT_ID_0;
- nhc->idmask[0] = LOWPAN_NHC_FRAGMENT_MASK_0;
-}
-
LOWPAN_NHC(nhc_fragment, "RFC6282 Fragment", NEXTHDR_FRAGMENT, 0,
- fragment_nhid_setup, LOWPAN_NHC_FRAGMENT_IDLEN, NULL, NULL);
+ LOWPAN_NHC_FRAGMENT_ID_0, LOWPAN_NHC_FRAGMENT_MASK_0, NULL, NULL);
module_lowpan_nhc(nhc_fragment);
MODULE_DESCRIPTION("6LoWPAN next header RFC6282 Fragment compression");
diff --git a/net/6lowpan/nhc_ghc_ext_dest.c b/net/6lowpan/nhc_ghc_ext_dest.c
index a9137f1733be..e4745ddd10a8 100644
--- a/net/6lowpan/nhc_ghc_ext_dest.c
+++ b/net/6lowpan/nhc_ghc_ext_dest.c
@@ -5,18 +5,11 @@
#include "nhc.h"
-#define LOWPAN_GHC_EXT_DEST_IDLEN 1
#define LOWPAN_GHC_EXT_DEST_ID_0 0xb6
#define LOWPAN_GHC_EXT_DEST_MASK_0 0xfe
-static void dest_ghid_setup(struct lowpan_nhc *nhc)
-{
- nhc->id[0] = LOWPAN_GHC_EXT_DEST_ID_0;
- nhc->idmask[0] = LOWPAN_GHC_EXT_DEST_MASK_0;
-}
-
LOWPAN_NHC(ghc_ext_dest, "RFC7400 Destination Extension Header", NEXTHDR_DEST,
- 0, dest_ghid_setup, LOWPAN_GHC_EXT_DEST_IDLEN, NULL, NULL);
+ 0, LOWPAN_GHC_EXT_DEST_ID_0, LOWPAN_GHC_EXT_DEST_MASK_0, NULL, NULL);
module_lowpan_nhc(ghc_ext_dest);
MODULE_DESCRIPTION("6LoWPAN generic header destination extension compression");
diff --git a/net/6lowpan/nhc_ghc_ext_frag.c b/net/6lowpan/nhc_ghc_ext_frag.c
index d49b745918e0..220e5abfa946 100644
--- a/net/6lowpan/nhc_ghc_ext_frag.c
+++ b/net/6lowpan/nhc_ghc_ext_frag.c
@@ -5,19 +5,12 @@
#include "nhc.h"
-#define LOWPAN_GHC_EXT_FRAG_IDLEN 1
#define LOWPAN_GHC_EXT_FRAG_ID_0 0xb4
#define LOWPAN_GHC_EXT_FRAG_MASK_0 0xfe
-static void frag_ghid_setup(struct lowpan_nhc *nhc)
-{
- nhc->id[0] = LOWPAN_GHC_EXT_FRAG_ID_0;
- nhc->idmask[0] = LOWPAN_GHC_EXT_FRAG_MASK_0;
-}
-
LOWPAN_NHC(ghc_ext_frag, "RFC7400 Fragmentation Extension Header",
- NEXTHDR_FRAGMENT, 0, frag_ghid_setup,
- LOWPAN_GHC_EXT_FRAG_IDLEN, NULL, NULL);
+ NEXTHDR_FRAGMENT, 0, LOWPAN_GHC_EXT_FRAG_ID_0,
+ LOWPAN_GHC_EXT_FRAG_MASK_0, NULL, NULL);
module_lowpan_nhc(ghc_ext_frag);
MODULE_DESCRIPTION("6LoWPAN generic header fragmentation extension compression");
diff --git a/net/6lowpan/nhc_ghc_ext_hop.c b/net/6lowpan/nhc_ghc_ext_hop.c
index 3beedf5140a3..9b0de4da7379 100644
--- a/net/6lowpan/nhc_ghc_ext_hop.c
+++ b/net/6lowpan/nhc_ghc_ext_hop.c
@@ -5,18 +5,11 @@
#include "nhc.h"
-#define LOWPAN_GHC_EXT_HOP_IDLEN 1
#define LOWPAN_GHC_EXT_HOP_ID_0 0xb0
#define LOWPAN_GHC_EXT_HOP_MASK_0 0xfe
-static void hop_ghid_setup(struct lowpan_nhc *nhc)
-{
- nhc->id[0] = LOWPAN_GHC_EXT_HOP_ID_0;
- nhc->idmask[0] = LOWPAN_GHC_EXT_HOP_MASK_0;
-}
-
LOWPAN_NHC(ghc_ext_hop, "RFC7400 Hop-by-Hop Extension Header", NEXTHDR_HOP, 0,
- hop_ghid_setup, LOWPAN_GHC_EXT_HOP_IDLEN, NULL, NULL);
+ LOWPAN_GHC_EXT_HOP_ID_0, LOWPAN_GHC_EXT_HOP_MASK_0, NULL, NULL);
module_lowpan_nhc(ghc_ext_hop);
MODULE_DESCRIPTION("6LoWPAN generic header hop-by-hop extension compression");
diff --git a/net/6lowpan/nhc_ghc_ext_route.c b/net/6lowpan/nhc_ghc_ext_route.c
index 70dc0ea3cf66..3e86faec59c9 100644
--- a/net/6lowpan/nhc_ghc_ext_route.c
+++ b/net/6lowpan/nhc_ghc_ext_route.c
@@ -5,18 +5,11 @@
#include "nhc.h"
-#define LOWPAN_GHC_EXT_ROUTE_IDLEN 1
#define LOWPAN_GHC_EXT_ROUTE_ID_0 0xb2
#define LOWPAN_GHC_EXT_ROUTE_MASK_0 0xfe
-static void route_ghid_setup(struct lowpan_nhc *nhc)
-{
- nhc->id[0] = LOWPAN_GHC_EXT_ROUTE_ID_0;
- nhc->idmask[0] = LOWPAN_GHC_EXT_ROUTE_MASK_0;
-}
-
LOWPAN_NHC(ghc_ext_route, "RFC7400 Routing Extension Header", NEXTHDR_ROUTING,
- 0, route_ghid_setup, LOWPAN_GHC_EXT_ROUTE_IDLEN, NULL, NULL);
+ 0, LOWPAN_GHC_EXT_ROUTE_ID_0, LOWPAN_GHC_EXT_ROUTE_MASK_0, NULL, NULL);
module_lowpan_nhc(ghc_ext_route);
MODULE_DESCRIPTION("6LoWPAN generic header routing extension compression");
diff --git a/net/6lowpan/nhc_ghc_icmpv6.c b/net/6lowpan/nhc_ghc_icmpv6.c
index 339ceffc25a9..1634f3eb0be8 100644
--- a/net/6lowpan/nhc_ghc_icmpv6.c
+++ b/net/6lowpan/nhc_ghc_icmpv6.c
@@ -5,18 +5,11 @@
#include "nhc.h"
-#define LOWPAN_GHC_ICMPV6_IDLEN 1
#define LOWPAN_GHC_ICMPV6_ID_0 0xdf
#define LOWPAN_GHC_ICMPV6_MASK_0 0xff
-static void icmpv6_ghid_setup(struct lowpan_nhc *nhc)
-{
- nhc->id[0] = LOWPAN_GHC_ICMPV6_ID_0;
- nhc->idmask[0] = LOWPAN_GHC_ICMPV6_MASK_0;
-}
-
LOWPAN_NHC(ghc_icmpv6, "RFC7400 ICMPv6", NEXTHDR_ICMP, 0,
- icmpv6_ghid_setup, LOWPAN_GHC_ICMPV6_IDLEN, NULL, NULL);
+ LOWPAN_GHC_ICMPV6_ID_0, LOWPAN_GHC_ICMPV6_MASK_0, NULL, NULL);
module_lowpan_nhc(ghc_icmpv6);
MODULE_DESCRIPTION("6LoWPAN generic header ICMPv6 compression");
diff --git a/net/6lowpan/nhc_ghc_udp.c b/net/6lowpan/nhc_ghc_udp.c
index f47fec601e73..4ac4813b77ad 100644
--- a/net/6lowpan/nhc_ghc_udp.c
+++ b/net/6lowpan/nhc_ghc_udp.c
@@ -5,18 +5,11 @@
#include "nhc.h"
-#define LOWPAN_GHC_UDP_IDLEN 1
#define LOWPAN_GHC_UDP_ID_0 0xd0
#define LOWPAN_GHC_UDP_MASK_0 0xf8
-static void udp_ghid_setup(struct lowpan_nhc *nhc)
-{
- nhc->id[0] = LOWPAN_GHC_UDP_ID_0;
- nhc->idmask[0] = LOWPAN_GHC_UDP_MASK_0;
-}
-
LOWPAN_NHC(ghc_udp, "RFC7400 UDP", NEXTHDR_UDP, 0,
- udp_ghid_setup, LOWPAN_GHC_UDP_IDLEN, NULL, NULL);
+ LOWPAN_GHC_UDP_ID_0, LOWPAN_GHC_UDP_MASK_0, NULL, NULL);
module_lowpan_nhc(ghc_udp);
MODULE_DESCRIPTION("6LoWPAN generic header UDP compression");
diff --git a/net/6lowpan/nhc_hop.c b/net/6lowpan/nhc_hop.c
index 158fc1906327..182087dfd09d 100644
--- a/net/6lowpan/nhc_hop.c
+++ b/net/6lowpan/nhc_hop.c
@@ -5,18 +5,11 @@
#include "nhc.h"
-#define LOWPAN_NHC_HOP_IDLEN 1
#define LOWPAN_NHC_HOP_ID_0 0xe0
#define LOWPAN_NHC_HOP_MASK_0 0xfe
-static void hop_nhid_setup(struct lowpan_nhc *nhc)
-{
- nhc->id[0] = LOWPAN_NHC_HOP_ID_0;
- nhc->idmask[0] = LOWPAN_NHC_HOP_MASK_0;
-}
-
LOWPAN_NHC(nhc_hop, "RFC6282 Hop-by-Hop Options", NEXTHDR_HOP, 0,
- hop_nhid_setup, LOWPAN_NHC_HOP_IDLEN, NULL, NULL);
+ LOWPAN_NHC_HOP_ID_0, LOWPAN_NHC_HOP_MASK_0, NULL, NULL);
module_lowpan_nhc(nhc_hop);
MODULE_DESCRIPTION("6LoWPAN next header RFC6282 Hop-by-Hop Options compression");
diff --git a/net/6lowpan/nhc_ipv6.c b/net/6lowpan/nhc_ipv6.c
index 08b7589e5b38..20242360b1d4 100644
--- a/net/6lowpan/nhc_ipv6.c
+++ b/net/6lowpan/nhc_ipv6.c
@@ -5,18 +5,11 @@
#include "nhc.h"
-#define LOWPAN_NHC_IPV6_IDLEN 1
#define LOWPAN_NHC_IPV6_ID_0 0xee
#define LOWPAN_NHC_IPV6_MASK_0 0xfe
-static void ipv6_nhid_setup(struct lowpan_nhc *nhc)
-{
- nhc->id[0] = LOWPAN_NHC_IPV6_ID_0;
- nhc->idmask[0] = LOWPAN_NHC_IPV6_MASK_0;
-}
-
-LOWPAN_NHC(nhc_ipv6, "RFC6282 IPv6", NEXTHDR_IPV6, 0, ipv6_nhid_setup,
- LOWPAN_NHC_IPV6_IDLEN, NULL, NULL);
+LOWPAN_NHC(nhc_ipv6, "RFC6282 IPv6", NEXTHDR_IPV6, 0, LOWPAN_NHC_IPV6_ID_0,
+ LOWPAN_NHC_IPV6_MASK_0, NULL, NULL);
module_lowpan_nhc(nhc_ipv6);
MODULE_DESCRIPTION("6LoWPAN next header RFC6282 IPv6 compression");
diff --git a/net/6lowpan/nhc_mobility.c b/net/6lowpan/nhc_mobility.c
index ac8fca689828..1c31d872c804 100644
--- a/net/6lowpan/nhc_mobility.c
+++ b/net/6lowpan/nhc_mobility.c
@@ -5,18 +5,11 @@
#include "nhc.h"
-#define LOWPAN_NHC_MOBILITY_IDLEN 1
#define LOWPAN_NHC_MOBILITY_ID_0 0xe8
#define LOWPAN_NHC_MOBILITY_MASK_0 0xfe
-static void mobility_nhid_setup(struct lowpan_nhc *nhc)
-{
- nhc->id[0] = LOWPAN_NHC_MOBILITY_ID_0;
- nhc->idmask[0] = LOWPAN_NHC_MOBILITY_MASK_0;
-}
-
LOWPAN_NHC(nhc_mobility, "RFC6282 Mobility", NEXTHDR_MOBILITY, 0,
- mobility_nhid_setup, LOWPAN_NHC_MOBILITY_IDLEN, NULL, NULL);
+ LOWPAN_NHC_MOBILITY_ID_0, LOWPAN_NHC_MOBILITY_MASK_0, NULL, NULL);
module_lowpan_nhc(nhc_mobility);
MODULE_DESCRIPTION("6LoWPAN next header RFC6282 Mobility compression");
diff --git a/net/6lowpan/nhc_routing.c b/net/6lowpan/nhc_routing.c
index 1c174023de42..dae03ebf7021 100644
--- a/net/6lowpan/nhc_routing.c
+++ b/net/6lowpan/nhc_routing.c
@@ -5,18 +5,11 @@
#include "nhc.h"
-#define LOWPAN_NHC_ROUTING_IDLEN 1
#define LOWPAN_NHC_ROUTING_ID_0 0xe2
#define LOWPAN_NHC_ROUTING_MASK_0 0xfe
-static void routing_nhid_setup(struct lowpan_nhc *nhc)
-{
- nhc->id[0] = LOWPAN_NHC_ROUTING_ID_0;
- nhc->idmask[0] = LOWPAN_NHC_ROUTING_MASK_0;
-}
-
LOWPAN_NHC(nhc_routing, "RFC6282 Routing", NEXTHDR_ROUTING, 0,
- routing_nhid_setup, LOWPAN_NHC_ROUTING_IDLEN, NULL, NULL);
+ LOWPAN_NHC_ROUTING_ID_0, LOWPAN_NHC_ROUTING_MASK_0, NULL, NULL);
module_lowpan_nhc(nhc_routing);
MODULE_DESCRIPTION("6LoWPAN next header RFC6282 Routing compression");
diff --git a/net/6lowpan/nhc_udp.c b/net/6lowpan/nhc_udp.c
index 33f17bd8cda7..0a506c77283d 100644
--- a/net/6lowpan/nhc_udp.c
+++ b/net/6lowpan/nhc_udp.c
@@ -14,7 +14,6 @@
#define LOWPAN_NHC_UDP_MASK 0xF8
#define LOWPAN_NHC_UDP_ID 0xF0
-#define LOWPAN_NHC_UDP_IDLEN 1
#define LOWPAN_NHC_UDP_4BIT_PORT 0xF0B0
#define LOWPAN_NHC_UDP_4BIT_MASK 0xFFF0
@@ -169,14 +168,8 @@ static int udp_compress(struct sk_buff *skb, u8 **hc_ptr)
return 0;
}
-static void udp_nhid_setup(struct lowpan_nhc *nhc)
-{
- nhc->id[0] = LOWPAN_NHC_UDP_ID;
- nhc->idmask[0] = LOWPAN_NHC_UDP_MASK;
-}
-
LOWPAN_NHC(nhc_udp, "RFC6282 UDP", NEXTHDR_UDP, sizeof(struct udphdr),
- udp_nhid_setup, LOWPAN_NHC_UDP_IDLEN, udp_uncompress, udp_compress);
+ LOWPAN_NHC_UDP_ID, LOWPAN_NHC_UDP_MASK, udp_uncompress, udp_compress);
module_lowpan_nhc(nhc_udp);
MODULE_DESCRIPTION("6LoWPAN next header RFC6282 UDP compression");
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index acf8c791f320..5aa8144101dc 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -63,10 +63,10 @@ bool vlan_do_receive(struct sk_buff **skbp)
rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats);
u64_stats_update_begin(&rx_stats->syncp);
- rx_stats->rx_packets++;
- rx_stats->rx_bytes += skb->len;
+ u64_stats_inc(&rx_stats->rx_packets);
+ u64_stats_add(&rx_stats->rx_bytes, skb->len);
if (skb->pkt_type == PACKET_MULTICAST)
- rx_stats->rx_multicast++;
+ u64_stats_inc(&rx_stats->rx_multicast);
u64_stats_update_end(&rx_stats->syncp);
return true;
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 839f2020b015..035812b0461c 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -128,8 +128,8 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
stats = this_cpu_ptr(vlan->vlan_pcpu_stats);
u64_stats_update_begin(&stats->syncp);
- stats->tx_packets++;
- stats->tx_bytes += len;
+ u64_stats_inc(&stats->tx_packets);
+ u64_stats_add(&stats->tx_bytes, len);
u64_stats_update_end(&stats->syncp);
} else {
this_cpu_inc(vlan->vlan_pcpu_stats->tx_dropped);
@@ -615,7 +615,7 @@ static int vlan_dev_init(struct net_device *dev)
return -ENOMEM;
/* Get vlan's reference to real_dev */
- dev_hold_track(real_dev, &vlan->dev_tracker, GFP_KERNEL);
+ netdev_hold(real_dev, &vlan->dev_tracker, GFP_KERNEL);
return 0;
}
@@ -713,11 +713,11 @@ static void vlan_dev_get_stats64(struct net_device *dev,
p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
do {
start = u64_stats_fetch_begin_irq(&p->syncp);
- rxpackets = p->rx_packets;
- rxbytes = p->rx_bytes;
- rxmulticast = p->rx_multicast;
- txpackets = p->tx_packets;
- txbytes = p->tx_bytes;
+ rxpackets = u64_stats_read(&p->rx_packets);
+ rxbytes = u64_stats_read(&p->rx_bytes);
+ rxmulticast = u64_stats_read(&p->rx_multicast);
+ txpackets = u64_stats_read(&p->tx_packets);
+ txbytes = u64_stats_read(&p->tx_bytes);
} while (u64_stats_fetch_retry_irq(&p->syncp, start));
stats->rx_packets += rxpackets;
@@ -726,8 +726,8 @@ static void vlan_dev_get_stats64(struct net_device *dev,
stats->tx_packets += txpackets;
stats->tx_bytes += txbytes;
/* rx_errors & tx_dropped are u32 */
- rx_errors += p->rx_errors;
- tx_dropped += p->tx_dropped;
+ rx_errors += READ_ONCE(p->rx_errors);
+ tx_dropped += READ_ONCE(p->tx_dropped);
}
stats->rx_errors = rx_errors;
stats->tx_dropped = tx_dropped;
@@ -852,7 +852,7 @@ static void vlan_dev_free(struct net_device *dev)
vlan->vlan_pcpu_stats = NULL;
/* Get rid of the vlan's reference to real_dev */
- dev_put_track(vlan->real_dev, &vlan->dev_tracker);
+ netdev_put(vlan->real_dev, &vlan->dev_tracker);
}
void vlan_setup(struct net_device *dev)
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 4c7030ed8d33..bbac3cb4dc99 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -102,7 +102,8 @@ again:
ax25_disconnect(s, ENETUNREACH);
s->ax25_dev = NULL;
if (sk->sk_socket) {
- dev_put_track(ax25_dev->dev, &ax25_dev->dev_tracker);
+ netdev_put(ax25_dev->dev,
+ &ax25_dev->dev_tracker);
ax25_dev_put(ax25_dev);
}
ax25_cb_del(s);
@@ -1065,7 +1066,7 @@ static int ax25_release(struct socket *sock)
del_timer_sync(&ax25->t3timer);
del_timer_sync(&ax25->idletimer);
}
- dev_put_track(ax25_dev->dev, &ax25_dev->dev_tracker);
+ netdev_put(ax25_dev->dev, &ax25_dev->dev_tracker);
ax25_dev_put(ax25_dev);
}
@@ -1146,7 +1147,7 @@ static int ax25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
if (ax25_dev) {
ax25_fillin_cb(ax25, ax25_dev);
- dev_hold_track(ax25_dev->dev, &ax25_dev->dev_tracker, GFP_ATOMIC);
+ netdev_hold(ax25_dev->dev, &ax25_dev->dev_tracker, GFP_ATOMIC);
}
done:
diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
index 95a76d571c44..c5462486dbca 100644
--- a/net/ax25/ax25_dev.c
+++ b/net/ax25/ax25_dev.c
@@ -52,7 +52,8 @@ void ax25_dev_device_up(struct net_device *dev)
{
ax25_dev *ax25_dev;
- if ((ax25_dev = kzalloc(sizeof(*ax25_dev), GFP_ATOMIC)) == NULL) {
+ ax25_dev = kzalloc(sizeof(*ax25_dev), GFP_KERNEL);
+ if (!ax25_dev) {
printk(KERN_ERR "AX.25: ax25_dev_device_up - out of memory\n");
return;
}
@@ -60,7 +61,7 @@ void ax25_dev_device_up(struct net_device *dev)
refcount_set(&ax25_dev->refcount, 1);
dev->ax25_ptr = ax25_dev;
ax25_dev->dev = dev;
- dev_hold_track(dev, &ax25_dev->dev_tracker, GFP_ATOMIC);
+ netdev_hold(dev, &ax25_dev->dev_tracker, GFP_KERNEL);
ax25_dev->forward = NULL;
ax25_dev->device_up = true;
@@ -136,7 +137,7 @@ unlock_put:
spin_unlock_bh(&ax25_dev_lock);
ax25_dev_put(ax25_dev);
dev->ax25_ptr = NULL;
- dev_put_track(dev, &ax25_dev->dev_tracker);
+ netdev_put(dev, &ax25_dev->dev_tracker);
ax25_dev_put(ax25_dev);
}
@@ -205,7 +206,7 @@ void __exit ax25_dev_free(void)
ax25_dev = ax25_dev_list;
while (ax25_dev != NULL) {
s = ax25_dev;
- dev_put_track(ax25_dev->dev, &ax25_dev->dev_tracker);
+ netdev_put(ax25_dev->dev, &ax25_dev->dev_tracker);
ax25_dev = ax25_dev->next;
kfree(s);
}
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index 56f059b3c242..2ca96acbc50a 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -1420,9 +1420,6 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
void *data;
int ret;
- if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
- return -EINVAL;
-
if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
return -EINVAL;
@@ -1487,9 +1484,6 @@ int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kat
u32 retval, duration;
int ret = -EINVAL;
- if (prog->type != BPF_PROG_TYPE_SK_LOOKUP)
- return -EINVAL;
-
if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
return -EINVAL;
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 47fcbade7389..a84a7cfb9d6d 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -274,7 +274,7 @@ static void destroy_nbp(struct net_bridge_port *p)
p->br = NULL;
p->dev = NULL;
- dev_put_track(dev, &p->dev_tracker);
+ netdev_put(dev, &p->dev_tracker);
kobject_put(&p->kobj);
}
@@ -423,7 +423,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
return ERR_PTR(-ENOMEM);
p->br = br;
- dev_hold_track(dev, &p->dev_tracker, GFP_KERNEL);
+ netdev_hold(dev, &p->dev_tracker, GFP_KERNEL);
p->dev = dev;
p->path_cost = port_cost(dev);
p->priority = 0x8000 >> BR_PORT_BITS;
@@ -434,7 +434,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
br_stp_port_timer_init(p);
err = br_multicast_add_port(p);
if (err) {
- dev_put_track(dev, &p->dev_tracker);
+ netdev_put(dev, &p->dev_tracker);
kfree(p);
p = ERR_PTR(err);
}
@@ -615,7 +615,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev,
err = dev_set_allmulti(dev, 1);
if (err) {
br_multicast_del_port(p);
- dev_put_track(dev, &p->dev_tracker);
+ netdev_put(dev, &p->dev_tracker);
kfree(p); /* kobject not yet init'd, manually free */
goto err1;
}
@@ -725,7 +725,7 @@ err3:
sysfs_remove_link(br->ifobj, p->dev->name);
err2:
br_multicast_del_port(p);
- dev_put_track(dev, &p->dev_tracker);
+ netdev_put(dev, &p->dev_tracker);
kobject_put(&p->kobj);
dev_set_allmulti(dev, -1);
err1:
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index fdcc641fc89a..589ff497d50c 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -1025,8 +1025,8 @@ static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
NL_SET_ERR_MSG_MOD(extack, "Port belongs to a different bridge device");
return -EINVAL;
}
- if (p->state == BR_STATE_DISABLED) {
- NL_SET_ERR_MSG_MOD(extack, "Port is in disabled state");
+ if (p->state == BR_STATE_DISABLED && entry->state != MDB_PERMANENT) {
+ NL_SET_ERR_MSG_MOD(extack, "Port is in disabled state and entry is not permanent");
return -EINVAL;
}
vg = nbp_vlan_group(p);
@@ -1086,9 +1086,6 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry,
if (!p->key.port || p->key.port->dev->ifindex != entry->ifindex)
continue;
- if (p->key.port->state == BR_STATE_DISABLED)
- goto unlock;
-
br_multicast_del_pg(mp, p, pp);
err = 0;
break;
@@ -1124,8 +1121,14 @@ static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
return -ENODEV;
p = br_port_get_rtnl(pdev);
- if (!p || p->br != br || p->state == BR_STATE_DISABLED)
+ if (!p) {
+ NL_SET_ERR_MSG_MOD(extack, "Net device is not a bridge port");
+ return -EINVAL;
+ }
+ if (p->br != br) {
+ NL_SET_ERR_MSG_MOD(extack, "Port belongs to a different bridge device");
return -EINVAL;
+ }
vg = nbp_vlan_group(p);
} else {
vg = br_vlan_group(br);
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index bb01776d2d88..1ef14a099c6b 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -1770,10 +1770,10 @@ static int br_fill_linkxstats(struct sk_buff *skb,
if (v->vid == pvid)
vxi.flags |= BRIDGE_VLAN_INFO_PVID;
br_vlan_get_stats(v, &stats);
- vxi.rx_bytes = stats.rx_bytes;
- vxi.rx_packets = stats.rx_packets;
- vxi.tx_bytes = stats.tx_bytes;
- vxi.tx_packets = stats.tx_packets;
+ vxi.rx_bytes = u64_stats_read(&stats.rx_bytes);
+ vxi.rx_packets = u64_stats_read(&stats.rx_packets);
+ vxi.tx_bytes = u64_stats_read(&stats.tx_bytes);
+ vxi.tx_packets = u64_stats_read(&stats.tx_packets);
if (nla_put(skb, BRIDGE_XSTATS_VLAN, sizeof(vxi), &vxi))
goto nla_put_failure;
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 0f5e75ccac79..6e53dc991409 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -505,8 +505,8 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br,
if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
stats = this_cpu_ptr(v->stats);
u64_stats_update_begin(&stats->syncp);
- stats->tx_bytes += skb->len;
- stats->tx_packets++;
+ u64_stats_add(&stats->tx_bytes, skb->len);
+ u64_stats_inc(&stats->tx_packets);
u64_stats_update_end(&stats->syncp);
}
@@ -624,8 +624,8 @@ static bool __allowed_ingress(const struct net_bridge *br,
if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
stats = this_cpu_ptr(v->stats);
u64_stats_update_begin(&stats->syncp);
- stats->rx_bytes += skb->len;
- stats->rx_packets++;
+ u64_stats_add(&stats->rx_bytes, skb->len);
+ u64_stats_inc(&stats->rx_packets);
u64_stats_update_end(&stats->syncp);
}
@@ -1379,16 +1379,16 @@ void br_vlan_get_stats(const struct net_bridge_vlan *v,
cpu_stats = per_cpu_ptr(v->stats, i);
do {
start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
- rxpackets = cpu_stats->rx_packets;
- rxbytes = cpu_stats->rx_bytes;
- txbytes = cpu_stats->tx_bytes;
- txpackets = cpu_stats->tx_packets;
+ rxpackets = u64_stats_read(&cpu_stats->rx_packets);
+ rxbytes = u64_stats_read(&cpu_stats->rx_bytes);
+ txbytes = u64_stats_read(&cpu_stats->tx_bytes);
+ txpackets = u64_stats_read(&cpu_stats->tx_packets);
} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
- stats->rx_packets += rxpackets;
- stats->rx_bytes += rxbytes;
- stats->tx_bytes += txbytes;
- stats->tx_packets += txpackets;
+ u64_stats_add(&stats->rx_packets, rxpackets);
+ u64_stats_add(&stats->rx_bytes, rxbytes);
+ u64_stats_add(&stats->tx_bytes, txbytes);
+ u64_stats_add(&stats->tx_packets, txpackets);
}
}
@@ -1779,14 +1779,18 @@ static bool br_vlan_stats_fill(struct sk_buff *skb,
return false;
br_vlan_get_stats(v, &stats);
- if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_BYTES, stats.rx_bytes,
+ if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_BYTES,
+ u64_stats_read(&stats.rx_bytes),
BRIDGE_VLANDB_STATS_PAD) ||
nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_PACKETS,
- stats.rx_packets, BRIDGE_VLANDB_STATS_PAD) ||
- nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_BYTES, stats.tx_bytes,
+ u64_stats_read(&stats.rx_packets),
+ BRIDGE_VLANDB_STATS_PAD) ||
+ nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_BYTES,
+ u64_stats_read(&stats.tx_bytes),
BRIDGE_VLANDB_STATS_PAD) ||
nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_PACKETS,
- stats.tx_packets, BRIDGE_VLANDB_STATS_PAD))
+ u64_stats_read(&stats.tx_packets),
+ BRIDGE_VLANDB_STATS_PAD))
goto out_err;
nla_nest_end(skb, nest);
diff --git a/net/bridge/netfilter/nft_meta_bridge.c b/net/bridge/netfilter/nft_meta_bridge.c
index 8c3eaba87ad2..c3ecd77e25cb 100644
--- a/net/bridge/netfilter/nft_meta_bridge.c
+++ b/net/bridge/netfilter/nft_meta_bridge.c
@@ -53,7 +53,7 @@ static void nft_meta_bridge_get_eval(const struct nft_expr *expr,
goto err;
br_vlan_get_proto(br_dev, &p_proto);
- nft_reg_store16(dest, htons(p_proto));
+ nft_reg_store_be16(dest, htons(p_proto));
return;
}
default:
diff --git a/net/can/Kconfig b/net/can/Kconfig
index a9ac5ffab286..cb56be8e3862 100644
--- a/net/can/Kconfig
+++ b/net/can/Kconfig
@@ -15,7 +15,8 @@ menuconfig CAN
PF_CAN is contained in <Documentation/networking/can.rst>.
If you want CAN support you should say Y here and also to the
- specific driver for your controller(s) below.
+ specific driver for your controller(s) under the Network device
+ support section.
if CAN
@@ -69,6 +70,4 @@ config CAN_ISOTP
If you want to perform automotive vehicle diagnostic services (UDS),
say 'y'.
-source "drivers/net/can/Kconfig"
-
endif
diff --git a/net/compat.c b/net/compat.c
index 210fc3b4d0d8..6cd2e7683dd0 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -80,6 +80,7 @@ int __get_compat_msghdr(struct msghdr *kmsg,
return -EMSGSIZE;
kmsg->msg_iocb = NULL;
+ kmsg->msg_ubuf = NULL;
*ptr = msg.msg_iov;
*len = msg.msg_iovlen;
return 0;
diff --git a/net/core/.gitignore b/net/core/.gitignore
new file mode 100644
index 000000000000..df1e74372cce
--- /dev/null
+++ b/net/core/.gitignore
@@ -0,0 +1 @@
+dropreason_str.c
diff --git a/net/core/Makefile b/net/core/Makefile
index a8e4f737692b..e8ce3bd283a6 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -4,7 +4,8 @@
#
obj-y := sock.o request_sock.o skbuff.o datagram.o stream.o scm.o \
- gen_stats.o gen_estimator.o net_namespace.o secure_seq.o flow_dissector.o
+ gen_stats.o gen_estimator.o net_namespace.o secure_seq.o \
+ flow_dissector.o dropreason_str.o
obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
@@ -39,3 +40,23 @@ obj-$(CONFIG_NET_SOCK_MSG) += skmsg.o
obj-$(CONFIG_BPF_SYSCALL) += sock_map.o
obj-$(CONFIG_BPF_SYSCALL) += bpf_sk_storage.o
obj-$(CONFIG_OF) += of_net.o
+
+clean-files := dropreason_str.c
+
+quiet_cmd_dropreason_str = GEN $@
+cmd_dropreason_str = awk -F ',' 'BEGIN{ print "\#include <net/dropreason.h>\n"; \
+ print "const char * const drop_reasons[] = {" }\
+ /^enum skb_drop/ { dr=1; }\
+ /^\};/ { dr=0; }\
+ /^\tSKB_DROP_REASON_/ {\
+ if (dr) {\
+ sub(/\tSKB_DROP_REASON_/, "", $$1);\
+ printf "\t[SKB_DROP_REASON_%s] = \"%s\",\n", $$1, $$1;\
+ }\
+ }\
+ END{ print "};" }' $< > $@
+
+$(obj)/dropreason_str.c: $(srctree)/include/net/dropreason.h
+ $(call cmd,dropreason_str)
+
+$(obj)/dropreason_str.o: $(obj)/dropreason_str.c
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 50f4faeea76c..8c702904d960 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -320,7 +320,6 @@ EXPORT_SYMBOL(skb_recv_datagram);
void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
{
consume_skb(skb);
- sk_mem_reclaim_partial(sk);
}
EXPORT_SYMBOL(skb_free_datagram);
@@ -336,7 +335,6 @@ void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len)
slow = lock_sock_fast(sk);
sk_peek_offset_bwd(sk, len);
skb_orphan(skb);
- sk_mem_reclaim_partial(sk);
unlock_sock_fast(sk, slow);
/* skb is now orphaned, can be freed outside of locked section */
@@ -396,7 +394,6 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
NULL);
kfree_skb(skb);
- sk_mem_reclaim_partial(sk);
return err;
}
EXPORT_SYMBOL(skb_kill_datagram);
@@ -613,10 +610,16 @@ fault:
}
EXPORT_SYMBOL(skb_copy_datagram_from_iter);
-int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb,
- struct iov_iter *from, size_t length)
+int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb, struct iov_iter *from,
+ size_t length)
{
- int frag = skb_shinfo(skb)->nr_frags;
+ int frag;
+
+ if (msg && msg->sg_from_iter)
+ return msg->sg_from_iter(sk, skb, from, length);
+
+ frag = skb_shinfo(skb)->nr_frags;
while (length && iov_iter_count(from)) {
struct page *pages[MAX_SKB_FRAGS];
@@ -702,7 +705,7 @@ int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *from)
if (skb_copy_datagram_from_iter(skb, 0, from, copy))
return -EFAULT;
- return __zerocopy_sg_from_iter(NULL, skb, from, ~0U);
+ return __zerocopy_sg_from_iter(NULL, NULL, skb, from, ~0U);
}
EXPORT_SYMBOL(zerocopy_sg_from_iter);
diff --git a/net/core/dev.c b/net/core/dev.c
index 30a1603a7225..d588fd0a54ce 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3927,7 +3927,7 @@ int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
skb->pkt_type = PACKET_LOOPBACK;
if (skb->ip_summed == CHECKSUM_NONE)
skb->ip_summed = CHECKSUM_UNNECESSARY;
- WARN_ON(!skb_dst(skb));
+ DEBUG_NET_WARN_ON_ONCE(!skb_dst(skb));
skb_dst_force(skb);
netif_rx(skb);
return 0;
@@ -6357,6 +6357,23 @@ int dev_set_threaded(struct net_device *dev, bool threaded)
}
EXPORT_SYMBOL(dev_set_threaded);
+/* Double check that napi_get_frags() allocates skbs with
+ * skb->head being backed by slab, not a page fragment.
+ * This is to make sure bug fixed in 3226b158e67c
+ * ("net: avoid 32 x truesize under-estimation for tiny skbs")
+ * does not accidentally come back.
+ */
+static void napi_get_frags_check(struct napi_struct *napi)
+{
+ struct sk_buff *skb;
+
+ local_bh_disable();
+ skb = napi_get_frags(napi);
+ WARN_ON_ONCE(skb && skb->head_frag);
+ napi_free_frags(napi);
+ local_bh_enable();
+}
+
void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
int (*poll)(struct napi_struct *, int), int weight)
{
@@ -6384,6 +6401,7 @@ void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
set_bit(NAPI_STATE_NPSVC, &napi->state);
list_add_rcu(&napi->dev_list, &dev->napi_list);
napi_hash_add(napi);
+ napi_get_frags_check(napi);
/* Create kthread for this napi if dev->threaded is set.
* Clear dev->threaded if kthread creation failed so that
* threaded mode will not be enabled in napi_enable().
@@ -7469,7 +7487,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
adj->ref_nr = 1;
adj->private = private;
adj->ignore = false;
- dev_hold_track(adj_dev, &adj->dev_tracker, GFP_KERNEL);
+ netdev_hold(adj_dev, &adj->dev_tracker, GFP_KERNEL);
pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
@@ -7498,7 +7516,7 @@ remove_symlinks:
if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
free_adj:
- dev_put_track(adj_dev, &adj->dev_tracker);
+ netdev_put(adj_dev, &adj->dev_tracker);
kfree(adj);
return ret;
@@ -7540,7 +7558,7 @@ static void __netdev_adjacent_dev_remove(struct net_device *dev,
list_del_rcu(&adj->list);
pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
adj_dev->name, dev->name, adj_dev->name);
- dev_put_track(adj_dev, &adj->dev_tracker);
+ netdev_put(adj_dev, &adj->dev_tracker);
kfree_rcu(adj, rcu);
}
@@ -10068,7 +10086,7 @@ int register_netdevice(struct net_device *dev)
dev_init_scheduler(dev);
- dev_hold_track(dev, &dev->dev_registered_tracker, GFP_KERNEL);
+ netdev_hold(dev, &dev->dev_registered_tracker, GFP_KERNEL);
list_netdevice(dev);
add_device_randomness(dev->dev_addr, dev->addr_len);
@@ -10467,23 +10485,23 @@ void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
int cpu;
for_each_possible_cpu(cpu) {
+ u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
const struct pcpu_sw_netstats *stats;
- struct pcpu_sw_netstats tmp;
unsigned int start;
stats = per_cpu_ptr(netstats, cpu);
do {
start = u64_stats_fetch_begin_irq(&stats->syncp);
- tmp.rx_packets = stats->rx_packets;
- tmp.rx_bytes = stats->rx_bytes;
- tmp.tx_packets = stats->tx_packets;
- tmp.tx_bytes = stats->tx_bytes;
+ rx_packets = u64_stats_read(&stats->rx_packets);
+ rx_bytes = u64_stats_read(&stats->rx_bytes);
+ tx_packets = u64_stats_read(&stats->tx_packets);
+ tx_bytes = u64_stats_read(&stats->tx_bytes);
} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
- s->rx_packets += tmp.rx_packets;
- s->rx_bytes += tmp.rx_bytes;
- s->tx_packets += tmp.tx_packets;
- s->tx_bytes += tmp.tx_bytes;
+ s->rx_packets += rx_packets;
+ s->rx_bytes += rx_bytes;
+ s->tx_packets += tx_packets;
+ s->tx_bytes += tx_bytes;
}
}
EXPORT_SYMBOL_GPL(dev_fetch_sw_netstats);
@@ -10877,7 +10895,7 @@ void unregister_netdevice_many(struct list_head *head)
synchronize_net();
list_for_each_entry(dev, head, unreg_list) {
- dev_put_track(dev, &dev->dev_registered_tracker);
+ netdev_put(dev, &dev->dev_registered_tracker);
net_set_todo(dev);
}
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index 4f6be442ae7e..7674bb9f3076 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -384,10 +384,10 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, void __user *data,
return -ENODEV;
if (!netif_is_bridge_master(dev))
return -EOPNOTSUPP;
- dev_hold_track(dev, &dev_tracker, GFP_KERNEL);
+ netdev_hold(dev, &dev_tracker, GFP_KERNEL);
rtnl_unlock();
err = br_ioctl_call(net, netdev_priv(dev), cmd, ifr, NULL);
- dev_put_track(dev, &dev_tracker);
+ netdev_put(dev, &dev_tracker);
rtnl_lock();
return err;
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 5cc88490f18f..98d79feeb3dc 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -66,6 +66,7 @@ struct devlink {
* port, sb, dpipe, resource, params, region, traps and more.
*/
struct mutex lock;
+ struct lock_class_key lock_key;
u8 reload_failed:1;
refcount_t refcount;
struct completion comp;
@@ -265,6 +266,12 @@ void devl_lock(struct devlink *devlink)
}
EXPORT_SYMBOL_GPL(devl_lock);
+int devl_trylock(struct devlink *devlink)
+{
+ return mutex_trylock(&devlink->lock);
+}
+EXPORT_SYMBOL_GPL(devl_trylock);
+
void devl_unlock(struct devlink *devlink)
{
mutex_unlock(&devlink->lock);
@@ -711,7 +718,7 @@ static int devlink_nl_pre_doit(const struct genl_ops *ops,
return PTR_ERR(devlink);
}
if (~ops->internal_flags & DEVLINK_NL_FLAG_NO_LOCK)
- mutex_lock(&devlink->lock);
+ devl_lock(devlink);
info->user_ptr[0] = devlink;
if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT) {
devlink_port = devlink_port_get_from_info(devlink, info);
@@ -754,7 +761,7 @@ static int devlink_nl_pre_doit(const struct genl_ops *ops,
unlock:
if (~ops->internal_flags & DEVLINK_NL_FLAG_NO_LOCK)
- mutex_unlock(&devlink->lock);
+ devl_unlock(devlink);
devlink_put(devlink);
mutex_unlock(&devlink_mutex);
return err;
@@ -772,7 +779,7 @@ static void devlink_nl_post_doit(const struct genl_ops *ops,
devlink_linecard_put(linecard);
}
if (~ops->internal_flags & DEVLINK_NL_FLAG_NO_LOCK)
- mutex_unlock(&devlink->lock);
+ devl_unlock(devlink);
devlink_put(devlink);
mutex_unlock(&devlink_mutex);
}
@@ -1329,7 +1336,7 @@ static int devlink_nl_cmd_rate_get_dumpit(struct sk_buff *msg,
if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
goto retry;
- mutex_lock(&devlink->lock);
+ devl_lock(devlink);
list_for_each_entry(devlink_rate, &devlink->rate_list, list) {
enum devlink_command cmd = DEVLINK_CMD_RATE_NEW;
u32 id = NETLINK_CB(cb->skb).portid;
@@ -1342,13 +1349,13 @@ static int devlink_nl_cmd_rate_get_dumpit(struct sk_buff *msg,
cb->nlh->nlmsg_seq,
NLM_F_MULTI, NULL);
if (err) {
- mutex_unlock(&devlink->lock);
+ devl_unlock(devlink);
devlink_put(devlink);
goto out;
}
idx++;
}
- mutex_unlock(&devlink->lock);
+ devl_unlock(devlink);
retry:
devlink_put(devlink);
}
@@ -1495,7 +1502,7 @@ static int devlink_nl_cmd_port_get_dumpit(struct sk_buff *msg,
if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
goto retry;
- mutex_lock(&devlink->lock);
+ devl_lock(devlink);
list_for_each_entry(devlink_port, &devlink->port_list, list) {
if (idx < start) {
idx++;
@@ -1507,13 +1514,13 @@ static int devlink_nl_cmd_port_get_dumpit(struct sk_buff *msg,
cb->nlh->nlmsg_seq,
NLM_F_MULTI, cb->extack);
if (err) {
- mutex_unlock(&devlink->lock);
+ devl_unlock(devlink);
devlink_put(devlink);
goto out;
}
idx++;
}
- mutex_unlock(&devlink->lock);
+ devl_unlock(devlink);
retry:
devlink_put(devlink);
}
@@ -1700,9 +1707,9 @@ static int devlink_nl_cmd_port_unsplit_doit(struct sk_buff *skb,
return devlink->ops->port_unsplit(devlink, devlink_port, info->extack);
}
-static int devlink_port_new_notifiy(struct devlink *devlink,
- unsigned int port_index,
- struct genl_info *info)
+static int devlink_port_new_notify(struct devlink *devlink,
+ unsigned int port_index,
+ struct genl_info *info)
{
struct devlink_port *devlink_port;
struct sk_buff *msg;
@@ -1712,7 +1719,7 @@ static int devlink_port_new_notifiy(struct devlink *devlink,
if (!msg)
return -ENOMEM;
- mutex_lock(&devlink->lock);
+ lockdep_assert_held(&devlink->lock);
devlink_port = devlink_port_get_by_index(devlink, port_index);
if (!devlink_port) {
err = -ENODEV;
@@ -1724,12 +1731,9 @@ static int devlink_port_new_notifiy(struct devlink *devlink,
if (err)
goto out;
- err = genlmsg_reply(msg, info);
- mutex_unlock(&devlink->lock);
- return err;
+ return genlmsg_reply(msg, info);
out:
- mutex_unlock(&devlink->lock);
nlmsg_free(msg);
return err;
}
@@ -1777,7 +1781,7 @@ static int devlink_nl_cmd_port_new_doit(struct sk_buff *skb,
if (err)
return err;
- err = devlink_port_new_notifiy(devlink, new_port_index, info);
+ err = devlink_port_new_notify(devlink, new_port_index, info);
if (err && err != -ENODEV) {
/* Fail to send the response; destroy newly created port. */
devlink->ops->port_del(devlink, new_port_index, extack);
@@ -2452,7 +2456,7 @@ static int devlink_nl_cmd_sb_get_dumpit(struct sk_buff *msg,
if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
goto retry;
- mutex_lock(&devlink->lock);
+ devl_lock(devlink);
list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
if (idx < start) {
idx++;
@@ -2464,13 +2468,13 @@ static int devlink_nl_cmd_sb_get_dumpit(struct sk_buff *msg,
cb->nlh->nlmsg_seq,
NLM_F_MULTI);
if (err) {
- mutex_unlock(&devlink->lock);
+ devl_unlock(devlink);
devlink_put(devlink);
goto out;
}
idx++;
}
- mutex_unlock(&devlink->lock);
+ devl_unlock(devlink);
retry:
devlink_put(devlink);
}
@@ -2605,7 +2609,7 @@ static int devlink_nl_cmd_sb_pool_get_dumpit(struct sk_buff *msg,
!devlink->ops->sb_pool_get)
goto retry;
- mutex_lock(&devlink->lock);
+ devl_lock(devlink);
list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
err = __sb_pool_get_dumpit(msg, start, &idx, devlink,
devlink_sb,
@@ -2614,12 +2618,12 @@ static int devlink_nl_cmd_sb_pool_get_dumpit(struct sk_buff *msg,
if (err == -EOPNOTSUPP) {
err = 0;
} else if (err) {
- mutex_unlock(&devlink->lock);
+ devl_unlock(devlink);
devlink_put(devlink);
goto out;
}
}
- mutex_unlock(&devlink->lock);
+ devl_unlock(devlink);
retry:
devlink_put(devlink);
}
@@ -2826,7 +2830,7 @@ static int devlink_nl_cmd_sb_port_pool_get_dumpit(struct sk_buff *msg,
!devlink->ops->sb_port_pool_get)
goto retry;
- mutex_lock(&devlink->lock);
+ devl_lock(devlink);
list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
err = __sb_port_pool_get_dumpit(msg, start, &idx,
devlink, devlink_sb,
@@ -2835,12 +2839,12 @@ static int devlink_nl_cmd_sb_port_pool_get_dumpit(struct sk_buff *msg,
if (err == -EOPNOTSUPP) {
err = 0;
} else if (err) {
- mutex_unlock(&devlink->lock);
+ devl_unlock(devlink);
devlink_put(devlink);
goto out;
}
}
- mutex_unlock(&devlink->lock);
+ devl_unlock(devlink);
retry:
devlink_put(devlink);
}
@@ -3075,7 +3079,7 @@ devlink_nl_cmd_sb_tc_pool_bind_get_dumpit(struct sk_buff *msg,
!devlink->ops->sb_tc_pool_bind_get)
goto retry;
- mutex_lock(&devlink->lock);
+ devl_lock(devlink);
list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
err = __sb_tc_pool_bind_get_dumpit(msg, start, &idx,
devlink,
@@ -3085,12 +3089,12 @@ devlink_nl_cmd_sb_tc_pool_bind_get_dumpit(struct sk_buff *msg,
if (err == -EOPNOTSUPP) {
err = 0;
} else if (err) {
- mutex_unlock(&devlink->lock);
+ devl_unlock(devlink);
devlink_put(devlink);
goto out;
}
}
- mutex_unlock(&devlink->lock);
+ devl_unlock(devlink);
retry:
devlink_put(devlink);
}
@@ -5161,7 +5165,7 @@ static int devlink_nl_cmd_param_get_dumpit(struct sk_buff *msg,
if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
goto retry;
- mutex_lock(&devlink->lock);
+ devl_lock(devlink);
list_for_each_entry(param_item, &devlink->param_list, list) {
if (idx < start) {
idx++;
@@ -5175,13 +5179,13 @@ static int devlink_nl_cmd_param_get_dumpit(struct sk_buff *msg,
if (err == -EOPNOTSUPP) {
err = 0;
} else if (err) {
- mutex_unlock(&devlink->lock);
+ devl_unlock(devlink);
devlink_put(devlink);
goto out;
}
idx++;
}
- mutex_unlock(&devlink->lock);
+ devl_unlock(devlink);
retry:
devlink_put(devlink);
}
@@ -5396,7 +5400,7 @@ static int devlink_nl_cmd_port_param_get_dumpit(struct sk_buff *msg,
if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
goto retry;
- mutex_lock(&devlink->lock);
+ devl_lock(devlink);
list_for_each_entry(devlink_port, &devlink->port_list, list) {
list_for_each_entry(param_item,
&devlink_port->param_list, list) {
@@ -5414,14 +5418,14 @@ static int devlink_nl_cmd_port_param_get_dumpit(struct sk_buff *msg,
if (err == -EOPNOTSUPP) {
err = 0;
} else if (err) {
- mutex_unlock(&devlink->lock);
+ devl_unlock(devlink);
devlink_put(devlink);
goto out;
}
idx++;
}
}
- mutex_unlock(&devlink->lock);
+ devl_unlock(devlink);
retry:
devlink_put(devlink);
}
@@ -5673,7 +5677,7 @@ static int __devlink_snapshot_id_increment(struct devlink *devlink, u32 id)
unsigned long count;
void *p;
- lockdep_assert_held(&devlink->lock);
+ devl_assert_locked(devlink);
p = xa_load(&devlink->snapshot_ids, id);
if (WARN_ON(!p))
@@ -5709,7 +5713,7 @@ static void __devlink_snapshot_id_decrement(struct devlink *devlink, u32 id)
unsigned long count;
void *p;
- lockdep_assert_held(&devlink->lock);
+ devl_assert_locked(devlink);
p = xa_load(&devlink->snapshot_ids, id);
if (WARN_ON(!p))
@@ -5748,7 +5752,7 @@ static void __devlink_snapshot_id_decrement(struct devlink *devlink, u32 id)
*/
static int __devlink_snapshot_id_insert(struct devlink *devlink, u32 id)
{
- lockdep_assert_held(&devlink->lock);
+ devl_assert_locked(devlink);
if (xa_load(&devlink->snapshot_ids, id))
return -EEXIST;
@@ -5775,7 +5779,7 @@ static int __devlink_snapshot_id_insert(struct devlink *devlink, u32 id)
*/
static int __devlink_region_snapshot_id_get(struct devlink *devlink, u32 *id)
{
- lockdep_assert_held(&devlink->lock);
+ devl_assert_locked(devlink);
return xa_alloc(&devlink->snapshot_ids, id, xa_mk_value(1),
xa_limit_32b, GFP_KERNEL);
@@ -5803,7 +5807,7 @@ __devlink_region_snapshot_create(struct devlink_region *region,
struct devlink_snapshot *snapshot;
int err;
- lockdep_assert_held(&devlink->lock);
+ devl_assert_locked(devlink);
/* check if region can hold one more snapshot */
if (region->cur_snapshots == region->max_snapshots)
@@ -5841,7 +5845,7 @@ static void devlink_region_snapshot_del(struct devlink_region *region,
{
struct devlink *devlink = region->devlink;
- lockdep_assert_held(&devlink->lock);
+ devl_assert_locked(devlink);
devlink_nl_region_notify(region, snapshot, DEVLINK_CMD_REGION_DEL);
region->cur_snapshots--;
@@ -5935,7 +5939,7 @@ static int devlink_nl_cmd_region_get_devlink_dumpit(struct sk_buff *msg,
struct devlink_port *port;
int err = 0;
- mutex_lock(&devlink->lock);
+ devl_lock(devlink);
list_for_each_entry(region, &devlink->region_list, list) {
if (*idx < start) {
(*idx)++;
@@ -5959,7 +5963,7 @@ static int devlink_nl_cmd_region_get_devlink_dumpit(struct sk_buff *msg,
}
out:
- mutex_unlock(&devlink->lock);
+ devl_unlock(devlink);
return err;
}
@@ -6249,7 +6253,7 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
goto out_dev;
}
- mutex_lock(&devlink->lock);
+ devl_lock(devlink);
if (!attrs[DEVLINK_ATTR_REGION_NAME] ||
!attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID]) {
@@ -6345,7 +6349,7 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
nla_nest_end(skb, chunks_attr);
genlmsg_end(skb, hdr);
- mutex_unlock(&devlink->lock);
+ devl_unlock(devlink);
devlink_put(devlink);
mutex_unlock(&devlink_mutex);
@@ -6354,7 +6358,7 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
nla_put_failure:
genlmsg_cancel(skb, hdr);
out_unlock:
- mutex_unlock(&devlink->lock);
+ devl_unlock(devlink);
devlink_put(devlink);
out_dev:
mutex_unlock(&devlink_mutex);
@@ -6517,12 +6521,12 @@ static int devlink_nl_cmd_info_get_dumpit(struct sk_buff *msg,
if (idx < start || !devlink->ops->info_get)
goto inc;
- mutex_lock(&devlink->lock);
+ devl_lock(devlink);
err = devlink_nl_info_fill(msg, devlink, DEVLINK_CMD_INFO_GET,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
cb->extack);
- mutex_unlock(&devlink->lock);
+ devl_unlock(devlink);
if (err == -EOPNOTSUPP)
err = 0;
else if (err) {
@@ -7724,7 +7728,7 @@ retry_rep:
if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
goto retry_port;
- mutex_lock(&devlink->lock);
+ devl_lock(devlink);
list_for_each_entry(port, &devlink->port_list, list) {
mutex_lock(&port->reporters_lock);
list_for_each_entry(reporter, &port->reporter_list, list) {
@@ -7739,7 +7743,7 @@ retry_rep:
cb->nlh->nlmsg_seq, NLM_F_MULTI);
if (err) {
mutex_unlock(&port->reporters_lock);
- mutex_unlock(&devlink->lock);
+ devl_unlock(devlink);
devlink_put(devlink);
goto out;
}
@@ -7747,7 +7751,7 @@ retry_rep:
}
mutex_unlock(&port->reporters_lock);
}
- mutex_unlock(&devlink->lock);
+ devl_unlock(devlink);
retry_port:
devlink_put(devlink);
}
@@ -7946,8 +7950,8 @@ static int devlink_nl_cmd_health_reporter_test_doit(struct sk_buff *skb,
}
struct devlink_stats {
- u64 rx_bytes;
- u64 rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t rx_packets;
struct u64_stats_sync syncp;
};
@@ -8104,12 +8108,12 @@ static void devlink_trap_stats_read(struct devlink_stats __percpu *trap_stats,
cpu_stats = per_cpu_ptr(trap_stats, i);
do {
start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
- rx_packets = cpu_stats->rx_packets;
- rx_bytes = cpu_stats->rx_bytes;
+ rx_packets = u64_stats_read(&cpu_stats->rx_packets);
+ rx_bytes = u64_stats_read(&cpu_stats->rx_bytes);
} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
- stats->rx_packets += rx_packets;
- stats->rx_bytes += rx_bytes;
+ u64_stats_add(&stats->rx_packets, rx_packets);
+ u64_stats_add(&stats->rx_bytes, rx_bytes);
}
}
@@ -8127,11 +8131,13 @@ devlink_trap_group_stats_put(struct sk_buff *msg,
return -EMSGSIZE;
if (nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_PACKETS,
- stats.rx_packets, DEVLINK_ATTR_PAD))
+ u64_stats_read(&stats.rx_packets),
+ DEVLINK_ATTR_PAD))
goto nla_put_failure;
if (nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_BYTES,
- stats.rx_bytes, DEVLINK_ATTR_PAD))
+ u64_stats_read(&stats.rx_bytes),
+ DEVLINK_ATTR_PAD))
goto nla_put_failure;
nla_nest_end(msg, attr);
@@ -8171,11 +8177,13 @@ static int devlink_trap_stats_put(struct sk_buff *msg, struct devlink *devlink,
goto nla_put_failure;
if (nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_PACKETS,
- stats.rx_packets, DEVLINK_ATTR_PAD))
+ u64_stats_read(&stats.rx_packets),
+ DEVLINK_ATTR_PAD))
goto nla_put_failure;
if (nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_BYTES,
- stats.rx_bytes, DEVLINK_ATTR_PAD))
+ u64_stats_read(&stats.rx_bytes),
+ DEVLINK_ATTR_PAD))
goto nla_put_failure;
nla_nest_end(msg, attr);
@@ -8290,7 +8298,7 @@ static int devlink_nl_cmd_trap_get_dumpit(struct sk_buff *msg,
if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
goto retry;
- mutex_lock(&devlink->lock);
+ devl_lock(devlink);
list_for_each_entry(trap_item, &devlink->trap_list, list) {
if (idx < start) {
idx++;
@@ -8302,13 +8310,13 @@ static int devlink_nl_cmd_trap_get_dumpit(struct sk_buff *msg,
cb->nlh->nlmsg_seq,
NLM_F_MULTI);
if (err) {
- mutex_unlock(&devlink->lock);
+ devl_unlock(devlink);
devlink_put(devlink);
goto out;
}
idx++;
}
- mutex_unlock(&devlink->lock);
+ devl_unlock(devlink);
retry:
devlink_put(devlink);
}
@@ -8517,7 +8525,7 @@ static int devlink_nl_cmd_trap_group_get_dumpit(struct sk_buff *msg,
if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
goto retry;
- mutex_lock(&devlink->lock);
+ devl_lock(devlink);
list_for_each_entry(group_item, &devlink->trap_group_list,
list) {
if (idx < start) {
@@ -8530,13 +8538,13 @@ static int devlink_nl_cmd_trap_group_get_dumpit(struct sk_buff *msg,
cb->nlh->nlmsg_seq,
NLM_F_MULTI);
if (err) {
- mutex_unlock(&devlink->lock);
+ devl_unlock(devlink);
devlink_put(devlink);
goto out;
}
idx++;
}
- mutex_unlock(&devlink->lock);
+ devl_unlock(devlink);
retry:
devlink_put(devlink);
}
@@ -8831,7 +8839,7 @@ static int devlink_nl_cmd_trap_policer_get_dumpit(struct sk_buff *msg,
if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
goto retry;
- mutex_lock(&devlink->lock);
+ devl_lock(devlink);
list_for_each_entry(policer_item, &devlink->trap_policer_list,
list) {
if (idx < start) {
@@ -8844,13 +8852,13 @@ static int devlink_nl_cmd_trap_policer_get_dumpit(struct sk_buff *msg,
cb->nlh->nlmsg_seq,
NLM_F_MULTI);
if (err) {
- mutex_unlock(&devlink->lock);
+ devl_unlock(devlink);
devlink_put(devlink);
goto out;
}
idx++;
}
- mutex_unlock(&devlink->lock);
+ devl_unlock(devlink);
retry:
devlink_put(devlink);
}
@@ -9063,13 +9071,11 @@ static const struct genl_small_ops devlink_nl_ops[] = {
.cmd = DEVLINK_CMD_PORT_NEW,
.doit = devlink_nl_cmd_port_new_doit,
.flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NO_LOCK,
},
{
.cmd = DEVLINK_CMD_PORT_DEL,
.doit = devlink_nl_cmd_port_del_doit,
.flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NO_LOCK,
},
{
.cmd = DEVLINK_CMD_LINECARD_GET,
@@ -9473,7 +9479,9 @@ struct devlink *devlink_alloc_ns(const struct devlink_ops *ops,
INIT_LIST_HEAD(&devlink->trap_list);
INIT_LIST_HEAD(&devlink->trap_group_list);
INIT_LIST_HEAD(&devlink->trap_policer_list);
+ lockdep_register_key(&devlink->lock_key);
mutex_init(&devlink->lock);
+ lockdep_set_class(&devlink->lock, &devlink->lock_key);
mutex_init(&devlink->reporters_lock);
mutex_init(&devlink->linecards_lock);
refcount_set(&devlink->refcount, 1);
@@ -9620,6 +9628,7 @@ void devlink_free(struct devlink *devlink)
mutex_destroy(&devlink->linecards_lock);
mutex_destroy(&devlink->reporters_lock);
mutex_destroy(&devlink->lock);
+ lockdep_unregister_key(&devlink->lock_key);
WARN_ON(!list_empty(&devlink->trap_policer_list));
WARN_ON(!list_empty(&devlink->trap_group_list));
WARN_ON(!list_empty(&devlink->trap_list));
@@ -9673,11 +9682,24 @@ static void devlink_port_type_warn_cancel(struct devlink_port *devlink_port)
cancel_delayed_work_sync(&devlink_port->type_warn_dw);
}
+/**
+ * devl_port_register() - Register devlink port
+ *
+ * @devlink: devlink
+ * @devlink_port: devlink port
+ * @port_index: driver-specific numerical identifier of the port
+ *
+ * Register devlink port with provided port index. User can use
+ * any indexing, even hw-related one. devlink_port structure
+ * is convenient to be embedded inside user driver private structure.
+ * Note that the caller should take care of zeroing the devlink_port
+ * structure.
+ */
int devl_port_register(struct devlink *devlink,
struct devlink_port *devlink_port,
unsigned int port_index)
{
- lockdep_assert_held(&devlink->lock);
+ devl_assert_locked(devlink);
if (devlink_port_index_exists(devlink, port_index))
return -EEXIST;
@@ -9711,6 +9733,8 @@ EXPORT_SYMBOL_GPL(devl_port_register);
* is convenient to be embedded inside user driver private structure.
* Note that the caller should take care of zeroing the devlink_port
* structure.
+ *
+ * Context: Takes and release devlink->lock <mutex>.
*/
int devlink_port_register(struct devlink *devlink,
struct devlink_port *devlink_port,
@@ -9718,13 +9742,18 @@ int devlink_port_register(struct devlink *devlink,
{
int err;
- mutex_lock(&devlink->lock);
+ devl_lock(devlink);
err = devl_port_register(devlink, devlink_port, port_index);
- mutex_unlock(&devlink->lock);
+ devl_unlock(devlink);
return err;
}
EXPORT_SYMBOL_GPL(devlink_port_register);
+/**
+ * devl_port_unregister() - Unregister devlink port
+ *
+ * @devlink_port: devlink port
+ */
void devl_port_unregister(struct devlink_port *devlink_port)
{
lockdep_assert_held(&devlink_port->devlink->lock);
@@ -9742,14 +9771,16 @@ EXPORT_SYMBOL_GPL(devl_port_unregister);
* devlink_port_unregister - Unregister devlink port
*
* @devlink_port: devlink port
+ *
+ * Context: Takes and release devlink->lock <mutex>.
*/
void devlink_port_unregister(struct devlink_port *devlink_port)
{
struct devlink *devlink = devlink_port->devlink;
- mutex_lock(&devlink->lock);
+ devl_lock(devlink);
devl_port_unregister(devlink_port);
- mutex_unlock(&devlink->lock);
+ devl_unlock(devlink);
}
EXPORT_SYMBOL_GPL(devlink_port_unregister);
@@ -10002,20 +10033,13 @@ int devl_rate_leaf_create(struct devlink_port *devlink_port, void *priv)
}
EXPORT_SYMBOL_GPL(devl_rate_leaf_create);
-int
-devlink_rate_leaf_create(struct devlink_port *devlink_port, void *priv)
-{
- struct devlink *devlink = devlink_port->devlink;
- int ret;
-
- mutex_lock(&devlink->lock);
- ret = devl_rate_leaf_create(devlink_port, priv);
- mutex_unlock(&devlink->lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(devlink_rate_leaf_create);
-
+/**
+ * devl_rate_leaf_destroy - destroy devlink rate leaf
+ *
+ * @devlink_port: devlink port linked to the rate object
+ *
+ * Destroy the devlink rate object of type leaf on provided @devlink_port.
+ */
void devl_rate_leaf_destroy(struct devlink_port *devlink_port)
{
struct devlink_rate *devlink_rate = devlink_port->devlink_rate;
@@ -10034,27 +10058,6 @@ void devl_rate_leaf_destroy(struct devlink_port *devlink_port)
EXPORT_SYMBOL_GPL(devl_rate_leaf_destroy);
/**
- * devlink_rate_leaf_destroy - destroy devlink rate leaf
- *
- * @devlink_port: devlink port linked to the rate object
- *
- * Context: Takes and release devlink->lock <mutex>.
- */
-void devlink_rate_leaf_destroy(struct devlink_port *devlink_port)
-{
- struct devlink_rate *devlink_rate = devlink_port->devlink_rate;
- struct devlink *devlink = devlink_port->devlink;
-
- if (!devlink_rate)
- return;
-
- mutex_lock(&devlink->lock);
- devl_rate_leaf_destroy(devlink_port);
- mutex_unlock(&devlink->lock);
-}
-EXPORT_SYMBOL_GPL(devlink_rate_leaf_destroy);
-
-/**
* devl_rate_nodes_destroy - destroy all devlink rate nodes on device
* @devlink: devlink instance
*
@@ -10092,24 +10095,6 @@ void devl_rate_nodes_destroy(struct devlink *devlink)
EXPORT_SYMBOL_GPL(devl_rate_nodes_destroy);
/**
- * devlink_rate_nodes_destroy - destroy all devlink rate nodes on device
- *
- * @devlink: devlink instance
- *
- * Unset parent for all rate objects and destroy all rate nodes
- * on specified device.
- *
- * Context: Takes and release devlink->lock <mutex>.
- */
-void devlink_rate_nodes_destroy(struct devlink *devlink)
-{
- mutex_lock(&devlink->lock);
- devl_rate_nodes_destroy(devlink);
- mutex_unlock(&devlink->lock);
-}
-EXPORT_SYMBOL_GPL(devlink_rate_nodes_destroy);
-
-/**
* devlink_port_linecard_set - Link port with a linecard
*
* @devlink_port: devlink port
@@ -10396,25 +10381,21 @@ void devlink_linecard_deactivate(struct devlink_linecard *linecard)
}
EXPORT_SYMBOL_GPL(devlink_linecard_deactivate);
-int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
- u32 size, u16 ingress_pools_count,
- u16 egress_pools_count, u16 ingress_tc_count,
- u16 egress_tc_count)
+int devl_sb_register(struct devlink *devlink, unsigned int sb_index,
+ u32 size, u16 ingress_pools_count,
+ u16 egress_pools_count, u16 ingress_tc_count,
+ u16 egress_tc_count)
{
struct devlink_sb *devlink_sb;
- int err = 0;
- mutex_lock(&devlink->lock);
- if (devlink_sb_index_exists(devlink, sb_index)) {
- err = -EEXIST;
- goto unlock;
- }
+ lockdep_assert_held(&devlink->lock);
+
+ if (devlink_sb_index_exists(devlink, sb_index))
+ return -EEXIST;
devlink_sb = kzalloc(sizeof(*devlink_sb), GFP_KERNEL);
- if (!devlink_sb) {
- err = -ENOMEM;
- goto unlock;
- }
+ if (!devlink_sb)
+ return -ENOMEM;
devlink_sb->index = sb_index;
devlink_sb->size = size;
devlink_sb->ingress_pools_count = ingress_pools_count;
@@ -10422,57 +10403,78 @@ int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
devlink_sb->ingress_tc_count = ingress_tc_count;
devlink_sb->egress_tc_count = egress_tc_count;
list_add_tail(&devlink_sb->list, &devlink->sb_list);
-unlock:
- mutex_unlock(&devlink->lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devl_sb_register);
+
+int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
+ u32 size, u16 ingress_pools_count,
+ u16 egress_pools_count, u16 ingress_tc_count,
+ u16 egress_tc_count)
+{
+ int err;
+
+ devl_lock(devlink);
+ err = devl_sb_register(devlink, sb_index, size, ingress_pools_count,
+ egress_pools_count, ingress_tc_count,
+ egress_tc_count);
+ devl_unlock(devlink);
return err;
}
EXPORT_SYMBOL_GPL(devlink_sb_register);
-void devlink_sb_unregister(struct devlink *devlink, unsigned int sb_index)
+void devl_sb_unregister(struct devlink *devlink, unsigned int sb_index)
{
struct devlink_sb *devlink_sb;
- mutex_lock(&devlink->lock);
+ lockdep_assert_held(&devlink->lock);
+
devlink_sb = devlink_sb_get_by_index(devlink, sb_index);
WARN_ON(!devlink_sb);
list_del(&devlink_sb->list);
- mutex_unlock(&devlink->lock);
kfree(devlink_sb);
}
+EXPORT_SYMBOL_GPL(devl_sb_unregister);
+
+void devlink_sb_unregister(struct devlink *devlink, unsigned int sb_index)
+{
+ devl_lock(devlink);
+ devl_sb_unregister(devlink, sb_index);
+ devl_unlock(devlink);
+}
EXPORT_SYMBOL_GPL(devlink_sb_unregister);
/**
- * devlink_dpipe_headers_register - register dpipe headers
+ * devl_dpipe_headers_register - register dpipe headers
*
- * @devlink: devlink
- * @dpipe_headers: dpipe header array
+ * @devlink: devlink
+ * @dpipe_headers: dpipe header array
*
- * Register the headers supported by hardware.
+ * Register the headers supported by hardware.
*/
-int devlink_dpipe_headers_register(struct devlink *devlink,
- struct devlink_dpipe_headers *dpipe_headers)
+void devl_dpipe_headers_register(struct devlink *devlink,
+ struct devlink_dpipe_headers *dpipe_headers)
{
- mutex_lock(&devlink->lock);
+ lockdep_assert_held(&devlink->lock);
+
devlink->dpipe_headers = dpipe_headers;
- mutex_unlock(&devlink->lock);
- return 0;
}
-EXPORT_SYMBOL_GPL(devlink_dpipe_headers_register);
+EXPORT_SYMBOL_GPL(devl_dpipe_headers_register);
/**
- * devlink_dpipe_headers_unregister - unregister dpipe headers
+ * devl_dpipe_headers_unregister - unregister dpipe headers
*
- * @devlink: devlink
+ * @devlink: devlink
*
- * Unregister the headers supported by hardware.
+ * Unregister the headers supported by hardware.
*/
-void devlink_dpipe_headers_unregister(struct devlink *devlink)
+void devl_dpipe_headers_unregister(struct devlink *devlink)
{
- mutex_lock(&devlink->lock);
+ lockdep_assert_held(&devlink->lock);
+
devlink->dpipe_headers = NULL;
- mutex_unlock(&devlink->lock);
}
-EXPORT_SYMBOL_GPL(devlink_dpipe_headers_unregister);
+EXPORT_SYMBOL_GPL(devl_dpipe_headers_unregister);
/**
* devlink_dpipe_table_counter_enabled - check if counter allocation
@@ -10506,38 +10508,33 @@ bool devlink_dpipe_table_counter_enabled(struct devlink *devlink,
EXPORT_SYMBOL_GPL(devlink_dpipe_table_counter_enabled);
/**
- * devlink_dpipe_table_register - register dpipe table
+ * devl_dpipe_table_register - register dpipe table
*
- * @devlink: devlink
- * @table_name: table name
- * @table_ops: table ops
- * @priv: priv
- * @counter_control_extern: external control for counters
+ * @devlink: devlink
+ * @table_name: table name
+ * @table_ops: table ops
+ * @priv: priv
+ * @counter_control_extern: external control for counters
*/
-int devlink_dpipe_table_register(struct devlink *devlink,
- const char *table_name,
- struct devlink_dpipe_table_ops *table_ops,
- void *priv, bool counter_control_extern)
+int devl_dpipe_table_register(struct devlink *devlink,
+ const char *table_name,
+ struct devlink_dpipe_table_ops *table_ops,
+ void *priv, bool counter_control_extern)
{
struct devlink_dpipe_table *table;
- int err = 0;
+
+ lockdep_assert_held(&devlink->lock);
if (WARN_ON(!table_ops->size_get))
return -EINVAL;
- mutex_lock(&devlink->lock);
-
if (devlink_dpipe_table_find(&devlink->dpipe_table_list, table_name,
- devlink)) {
- err = -EEXIST;
- goto unlock;
- }
+ devlink))
+ return -EEXIST;
table = kzalloc(sizeof(*table), GFP_KERNEL);
- if (!table) {
- err = -ENOMEM;
- goto unlock;
- }
+ if (!table)
+ return -ENOMEM;
table->name = table_name;
table->table_ops = table_ops;
@@ -10545,77 +10542,69 @@ int devlink_dpipe_table_register(struct devlink *devlink,
table->counter_control_extern = counter_control_extern;
list_add_tail_rcu(&table->list, &devlink->dpipe_table_list);
-unlock:
- mutex_unlock(&devlink->lock);
- return err;
+
+ return 0;
}
-EXPORT_SYMBOL_GPL(devlink_dpipe_table_register);
+EXPORT_SYMBOL_GPL(devl_dpipe_table_register);
/**
- * devlink_dpipe_table_unregister - unregister dpipe table
+ * devl_dpipe_table_unregister - unregister dpipe table
*
- * @devlink: devlink
- * @table_name: table name
+ * @devlink: devlink
+ * @table_name: table name
*/
-void devlink_dpipe_table_unregister(struct devlink *devlink,
- const char *table_name)
+void devl_dpipe_table_unregister(struct devlink *devlink,
+ const char *table_name)
{
struct devlink_dpipe_table *table;
- mutex_lock(&devlink->lock);
+ lockdep_assert_held(&devlink->lock);
+
table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
table_name, devlink);
if (!table)
- goto unlock;
+ return;
list_del_rcu(&table->list);
- mutex_unlock(&devlink->lock);
kfree_rcu(table, rcu);
- return;
-unlock:
- mutex_unlock(&devlink->lock);
}
-EXPORT_SYMBOL_GPL(devlink_dpipe_table_unregister);
+EXPORT_SYMBOL_GPL(devl_dpipe_table_unregister);
/**
- * devlink_resource_register - devlink resource register
+ * devl_resource_register - devlink resource register
*
- * @devlink: devlink
- * @resource_name: resource's name
- * @resource_size: resource's size
- * @resource_id: resource's id
- * @parent_resource_id: resource's parent id
- * @size_params: size parameters
+ * @devlink: devlink
+ * @resource_name: resource's name
+ * @resource_size: resource's size
+ * @resource_id: resource's id
+ * @parent_resource_id: resource's parent id
+ * @size_params: size parameters
*
- * Generic resources should reuse the same names across drivers.
- * Please see the generic resources list at:
- * Documentation/networking/devlink/devlink-resource.rst
+ * Generic resources should reuse the same names across drivers.
+ * Please see the generic resources list at:
+ * Documentation/networking/devlink/devlink-resource.rst
*/
-int devlink_resource_register(struct devlink *devlink,
- const char *resource_name,
- u64 resource_size,
- u64 resource_id,
- u64 parent_resource_id,
- const struct devlink_resource_size_params *size_params)
+int devl_resource_register(struct devlink *devlink,
+ const char *resource_name,
+ u64 resource_size,
+ u64 resource_id,
+ u64 parent_resource_id,
+ const struct devlink_resource_size_params *size_params)
{
struct devlink_resource *resource;
struct list_head *resource_list;
bool top_hierarchy;
- int err = 0;
+
+ lockdep_assert_held(&devlink->lock);
top_hierarchy = parent_resource_id == DEVLINK_RESOURCE_ID_PARENT_TOP;
- mutex_lock(&devlink->lock);
resource = devlink_resource_find(devlink, NULL, resource_id);
- if (resource) {
- err = -EINVAL;
- goto out;
- }
+ if (resource)
+ return -EINVAL;
resource = kzalloc(sizeof(*resource), GFP_KERNEL);
- if (!resource) {
- err = -ENOMEM;
- goto out;
- }
+ if (!resource)
+ return -ENOMEM;
if (top_hierarchy) {
resource_list = &devlink->resource_list;
@@ -10629,8 +10618,7 @@ int devlink_resource_register(struct devlink *devlink,
resource->parent = parent_resource;
} else {
kfree(resource);
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
}
@@ -10643,8 +10631,40 @@ int devlink_resource_register(struct devlink *devlink,
sizeof(resource->size_params));
INIT_LIST_HEAD(&resource->resource_list);
list_add_tail(&resource->list, resource_list);
-out:
- mutex_unlock(&devlink->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devl_resource_register);
+
+/**
+ * devlink_resource_register - devlink resource register
+ *
+ * @devlink: devlink
+ * @resource_name: resource's name
+ * @resource_size: resource's size
+ * @resource_id: resource's id
+ * @parent_resource_id: resource's parent id
+ * @size_params: size parameters
+ *
+ * Generic resources should reuse the same names across drivers.
+ * Please see the generic resources list at:
+ * Documentation/networking/devlink/devlink-resource.rst
+ *
+ * Context: Takes and release devlink->lock <mutex>.
+ */
+int devlink_resource_register(struct devlink *devlink,
+ const char *resource_name,
+ u64 resource_size,
+ u64 resource_id,
+ u64 parent_resource_id,
+ const struct devlink_resource_size_params *size_params)
+{
+ int err;
+
+ devl_lock(devlink);
+ err = devl_resource_register(devlink, resource_name, resource_size,
+ resource_id, parent_resource_id, size_params);
+ devl_unlock(devlink);
return err;
}
EXPORT_SYMBOL_GPL(devlink_resource_register);
@@ -10663,15 +10683,15 @@ static void devlink_resource_unregister(struct devlink *devlink,
}
/**
- * devlink_resources_unregister - free all resources
+ * devl_resources_unregister - free all resources
*
- * @devlink: devlink
+ * @devlink: devlink
*/
-void devlink_resources_unregister(struct devlink *devlink)
+void devl_resources_unregister(struct devlink *devlink)
{
struct devlink_resource *tmp, *child_resource;
- mutex_lock(&devlink->lock);
+ lockdep_assert_held(&devlink->lock);
list_for_each_entry_safe(child_resource, tmp, &devlink->resource_list,
list) {
@@ -10679,69 +10699,100 @@ void devlink_resources_unregister(struct devlink *devlink)
list_del(&child_resource->list);
kfree(child_resource);
}
+}
+EXPORT_SYMBOL_GPL(devl_resources_unregister);
- mutex_unlock(&devlink->lock);
+/**
+ * devlink_resources_unregister - free all resources
+ *
+ * @devlink: devlink
+ *
+ * Context: Takes and release devlink->lock <mutex>.
+ */
+void devlink_resources_unregister(struct devlink *devlink)
+{
+ devl_lock(devlink);
+ devl_resources_unregister(devlink);
+ devl_unlock(devlink);
}
EXPORT_SYMBOL_GPL(devlink_resources_unregister);
/**
- * devlink_resource_size_get - get and update size
+ * devl_resource_size_get - get and update size
*
- * @devlink: devlink
- * @resource_id: the requested resource id
- * @p_resource_size: ptr to update
+ * @devlink: devlink
+ * @resource_id: the requested resource id
+ * @p_resource_size: ptr to update
*/
-int devlink_resource_size_get(struct devlink *devlink,
- u64 resource_id,
- u64 *p_resource_size)
+int devl_resource_size_get(struct devlink *devlink,
+ u64 resource_id,
+ u64 *p_resource_size)
{
struct devlink_resource *resource;
- int err = 0;
- mutex_lock(&devlink->lock);
+ lockdep_assert_held(&devlink->lock);
+
resource = devlink_resource_find(devlink, NULL, resource_id);
- if (!resource) {
- err = -EINVAL;
- goto out;
- }
+ if (!resource)
+ return -EINVAL;
*p_resource_size = resource->size_new;
resource->size = resource->size_new;
-out:
- mutex_unlock(&devlink->lock);
- return err;
+ return 0;
}
-EXPORT_SYMBOL_GPL(devlink_resource_size_get);
+EXPORT_SYMBOL_GPL(devl_resource_size_get);
/**
- * devlink_dpipe_table_resource_set - set the resource id
+ * devl_dpipe_table_resource_set - set the resource id
*
- * @devlink: devlink
- * @table_name: table name
- * @resource_id: resource id
- * @resource_units: number of resource's units consumed per table's entry
+ * @devlink: devlink
+ * @table_name: table name
+ * @resource_id: resource id
+ * @resource_units: number of resource's units consumed per table's entry
*/
-int devlink_dpipe_table_resource_set(struct devlink *devlink,
- const char *table_name, u64 resource_id,
- u64 resource_units)
+int devl_dpipe_table_resource_set(struct devlink *devlink,
+ const char *table_name, u64 resource_id,
+ u64 resource_units)
{
struct devlink_dpipe_table *table;
- int err = 0;
- mutex_lock(&devlink->lock);
table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
table_name, devlink);
- if (!table) {
- err = -EINVAL;
- goto out;
- }
+ if (!table)
+ return -EINVAL;
+
table->resource_id = resource_id;
table->resource_units = resource_units;
table->resource_valid = true;
-out:
- mutex_unlock(&devlink->lock);
- return err;
+ return 0;
}
-EXPORT_SYMBOL_GPL(devlink_dpipe_table_resource_set);
+EXPORT_SYMBOL_GPL(devl_dpipe_table_resource_set);
+
+/**
+ * devl_resource_occ_get_register - register occupancy getter
+ *
+ * @devlink: devlink
+ * @resource_id: resource id
+ * @occ_get: occupancy getter callback
+ * @occ_get_priv: occupancy getter callback priv
+ */
+void devl_resource_occ_get_register(struct devlink *devlink,
+ u64 resource_id,
+ devlink_resource_occ_get_t *occ_get,
+ void *occ_get_priv)
+{
+ struct devlink_resource *resource;
+
+ lockdep_assert_held(&devlink->lock);
+
+ resource = devlink_resource_find(devlink, NULL, resource_id);
+ if (WARN_ON(!resource))
+ return;
+ WARN_ON(resource->occ_get);
+
+ resource->occ_get = occ_get;
+ resource->occ_get_priv = occ_get_priv;
+}
+EXPORT_SYMBOL_GPL(devl_resource_occ_get_register);
/**
* devlink_resource_occ_get_register - register occupancy getter
@@ -10750,48 +10801,58 @@ EXPORT_SYMBOL_GPL(devlink_dpipe_table_resource_set);
* @resource_id: resource id
* @occ_get: occupancy getter callback
* @occ_get_priv: occupancy getter callback priv
+ *
+ * Context: Takes and release devlink->lock <mutex>.
*/
void devlink_resource_occ_get_register(struct devlink *devlink,
u64 resource_id,
devlink_resource_occ_get_t *occ_get,
void *occ_get_priv)
{
+ devl_lock(devlink);
+ devl_resource_occ_get_register(devlink, resource_id,
+ occ_get, occ_get_priv);
+ devl_unlock(devlink);
+}
+EXPORT_SYMBOL_GPL(devlink_resource_occ_get_register);
+
+/**
+ * devl_resource_occ_get_unregister - unregister occupancy getter
+ *
+ * @devlink: devlink
+ * @resource_id: resource id
+ */
+void devl_resource_occ_get_unregister(struct devlink *devlink,
+ u64 resource_id)
+{
struct devlink_resource *resource;
- mutex_lock(&devlink->lock);
+ lockdep_assert_held(&devlink->lock);
+
resource = devlink_resource_find(devlink, NULL, resource_id);
if (WARN_ON(!resource))
- goto out;
- WARN_ON(resource->occ_get);
+ return;
+ WARN_ON(!resource->occ_get);
- resource->occ_get = occ_get;
- resource->occ_get_priv = occ_get_priv;
-out:
- mutex_unlock(&devlink->lock);
+ resource->occ_get = NULL;
+ resource->occ_get_priv = NULL;
}
-EXPORT_SYMBOL_GPL(devlink_resource_occ_get_register);
+EXPORT_SYMBOL_GPL(devl_resource_occ_get_unregister);
/**
* devlink_resource_occ_get_unregister - unregister occupancy getter
*
* @devlink: devlink
* @resource_id: resource id
+ *
+ * Context: Takes and release devlink->lock <mutex>.
*/
void devlink_resource_occ_get_unregister(struct devlink *devlink,
u64 resource_id)
{
- struct devlink_resource *resource;
-
- mutex_lock(&devlink->lock);
- resource = devlink_resource_find(devlink, NULL, resource_id);
- if (WARN_ON(!resource))
- goto out;
- WARN_ON(!resource->occ_get);
-
- resource->occ_get = NULL;
- resource->occ_get_priv = NULL;
-out:
- mutex_unlock(&devlink->lock);
+ devl_lock(devlink);
+ devl_resource_occ_get_unregister(devlink, resource_id);
+ devl_unlock(devlink);
}
EXPORT_SYMBOL_GPL(devlink_resource_occ_get_unregister);
@@ -11012,36 +11073,31 @@ void devlink_param_value_changed(struct devlink *devlink, u32 param_id)
EXPORT_SYMBOL_GPL(devlink_param_value_changed);
/**
- * devlink_region_create - create a new address region
+ * devl_region_create - create a new address region
*
- * @devlink: devlink
- * @ops: region operations and name
- * @region_max_snapshots: Maximum supported number of snapshots for region
- * @region_size: size of region
+ * @devlink: devlink
+ * @ops: region operations and name
+ * @region_max_snapshots: Maximum supported number of snapshots for region
+ * @region_size: size of region
*/
-struct devlink_region *
-devlink_region_create(struct devlink *devlink,
- const struct devlink_region_ops *ops,
- u32 region_max_snapshots, u64 region_size)
+struct devlink_region *devl_region_create(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
+ u32 region_max_snapshots,
+ u64 region_size)
{
struct devlink_region *region;
- int err = 0;
+
+ devl_assert_locked(devlink);
if (WARN_ON(!ops) || WARN_ON(!ops->destructor))
return ERR_PTR(-EINVAL);
- mutex_lock(&devlink->lock);
-
- if (devlink_region_get_by_name(devlink, ops->name)) {
- err = -EEXIST;
- goto unlock;
- }
+ if (devlink_region_get_by_name(devlink, ops->name))
+ return ERR_PTR(-EEXIST);
region = kzalloc(sizeof(*region), GFP_KERNEL);
- if (!region) {
- err = -ENOMEM;
- goto unlock;
- }
+ if (!region)
+ return ERR_PTR(-ENOMEM);
region->devlink = devlink;
region->max_snapshots = region_max_snapshots;
@@ -11051,12 +11107,32 @@ devlink_region_create(struct devlink *devlink,
list_add_tail(&region->list, &devlink->region_list);
devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_NEW);
- mutex_unlock(&devlink->lock);
return region;
+}
+EXPORT_SYMBOL_GPL(devl_region_create);
-unlock:
- mutex_unlock(&devlink->lock);
- return ERR_PTR(err);
+/**
+ * devlink_region_create - create a new address region
+ *
+ * @devlink: devlink
+ * @ops: region operations and name
+ * @region_max_snapshots: Maximum supported number of snapshots for region
+ * @region_size: size of region
+ *
+ * Context: Takes and release devlink->lock <mutex>.
+ */
+struct devlink_region *
+devlink_region_create(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
+ u32 region_max_snapshots, u64 region_size)
+{
+ struct devlink_region *region;
+
+ devl_lock(devlink);
+ region = devl_region_create(devlink, ops, region_max_snapshots,
+ region_size);
+ devl_unlock(devlink);
+ return region;
}
EXPORT_SYMBOL_GPL(devlink_region_create);
@@ -11067,6 +11143,8 @@ EXPORT_SYMBOL_GPL(devlink_region_create);
* @ops: region operations and name
* @region_max_snapshots: Maximum supported number of snapshots for region
* @region_size: size of region
+ *
+ * Context: Takes and release devlink->lock <mutex>.
*/
struct devlink_region *
devlink_port_region_create(struct devlink_port *port,
@@ -11080,7 +11158,7 @@ devlink_port_region_create(struct devlink_port *port,
if (WARN_ON(!ops) || WARN_ON(!ops->destructor))
return ERR_PTR(-EINVAL);
- mutex_lock(&devlink->lock);
+ devl_lock(devlink);
if (devlink_port_region_get_by_name(port, ops->name)) {
err = -EEXIST;
@@ -11102,26 +11180,26 @@ devlink_port_region_create(struct devlink_port *port,
list_add_tail(&region->list, &port->region_list);
devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_NEW);
- mutex_unlock(&devlink->lock);
+ devl_unlock(devlink);
return region;
unlock:
- mutex_unlock(&devlink->lock);
+ devl_unlock(devlink);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(devlink_port_region_create);
/**
- * devlink_region_destroy - destroy address region
+ * devl_region_destroy - destroy address region
*
- * @region: devlink region to destroy
+ * @region: devlink region to destroy
*/
-void devlink_region_destroy(struct devlink_region *region)
+void devl_region_destroy(struct devlink_region *region)
{
struct devlink *devlink = region->devlink;
struct devlink_snapshot *snapshot, *ts;
- mutex_lock(&devlink->lock);
+ devl_assert_locked(devlink);
/* Free all snapshots of region */
list_for_each_entry_safe(snapshot, ts, &region->snapshot_list, list)
@@ -11130,9 +11208,25 @@ void devlink_region_destroy(struct devlink_region *region)
list_del(&region->list);
devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_DEL);
- mutex_unlock(&devlink->lock);
kfree(region);
}
+EXPORT_SYMBOL_GPL(devl_region_destroy);
+
+/**
+ * devlink_region_destroy - destroy address region
+ *
+ * @region: devlink region to destroy
+ *
+ * Context: Takes and release devlink->lock <mutex>.
+ */
+void devlink_region_destroy(struct devlink_region *region)
+{
+ struct devlink *devlink = region->devlink;
+
+ devl_lock(devlink);
+ devl_region_destroy(region);
+ devl_unlock(devlink);
+}
EXPORT_SYMBOL_GPL(devlink_region_destroy);
/**
@@ -11154,9 +11248,9 @@ int devlink_region_snapshot_id_get(struct devlink *devlink, u32 *id)
{
int err;
- mutex_lock(&devlink->lock);
+ devl_lock(devlink);
err = __devlink_region_snapshot_id_get(devlink, id);
- mutex_unlock(&devlink->lock);
+ devl_unlock(devlink);
return err;
}
@@ -11174,9 +11268,9 @@ EXPORT_SYMBOL_GPL(devlink_region_snapshot_id_get);
*/
void devlink_region_snapshot_id_put(struct devlink *devlink, u32 id)
{
- mutex_lock(&devlink->lock);
+ devl_lock(devlink);
__devlink_snapshot_id_decrement(devlink, id);
- mutex_unlock(&devlink->lock);
+ devl_unlock(devlink);
}
EXPORT_SYMBOL_GPL(devlink_region_snapshot_id_put);
@@ -11198,9 +11292,9 @@ int devlink_region_snapshot_create(struct devlink_region *region,
struct devlink *devlink = region->devlink;
int err;
- mutex_lock(&devlink->lock);
+ devl_lock(devlink);
err = __devlink_region_snapshot_create(region, data, snapshot_id);
- mutex_unlock(&devlink->lock);
+ devl_unlock(devlink);
return err;
}
@@ -11566,7 +11660,7 @@ static void devlink_trap_disable(struct devlink *devlink,
}
/**
- * devlink_traps_register - Register packet traps with devlink.
+ * devl_traps_register - Register packet traps with devlink.
* @devlink: devlink.
* @traps: Packet traps.
* @traps_count: Count of provided packet traps.
@@ -11574,16 +11668,16 @@ static void devlink_trap_disable(struct devlink *devlink,
*
* Return: Non-zero value on failure.
*/
-int devlink_traps_register(struct devlink *devlink,
- const struct devlink_trap *traps,
- size_t traps_count, void *priv)
+int devl_traps_register(struct devlink *devlink,
+ const struct devlink_trap *traps,
+ size_t traps_count, void *priv)
{
int i, err;
if (!devlink->ops->trap_init || !devlink->ops->trap_action_set)
return -EINVAL;
- mutex_lock(&devlink->lock);
+ devl_assert_locked(devlink);
for (i = 0; i < traps_count; i++) {
const struct devlink_trap *trap = &traps[i];
@@ -11595,7 +11689,6 @@ int devlink_traps_register(struct devlink *devlink,
if (err)
goto err_trap_register;
}
- mutex_unlock(&devlink->lock);
return 0;
@@ -11603,24 +11696,47 @@ err_trap_register:
err_trap_verify:
for (i--; i >= 0; i--)
devlink_trap_unregister(devlink, &traps[i]);
- mutex_unlock(&devlink->lock);
+ return err;
+}
+EXPORT_SYMBOL_GPL(devl_traps_register);
+
+/**
+ * devlink_traps_register - Register packet traps with devlink.
+ * @devlink: devlink.
+ * @traps: Packet traps.
+ * @traps_count: Count of provided packet traps.
+ * @priv: Driver private information.
+ *
+ * Context: Takes and release devlink->lock <mutex>.
+ *
+ * Return: Non-zero value on failure.
+ */
+int devlink_traps_register(struct devlink *devlink,
+ const struct devlink_trap *traps,
+ size_t traps_count, void *priv)
+{
+ int err;
+
+ devl_lock(devlink);
+ err = devl_traps_register(devlink, traps, traps_count, priv);
+ devl_unlock(devlink);
return err;
}
EXPORT_SYMBOL_GPL(devlink_traps_register);
/**
- * devlink_traps_unregister - Unregister packet traps from devlink.
+ * devl_traps_unregister - Unregister packet traps from devlink.
* @devlink: devlink.
* @traps: Packet traps.
* @traps_count: Count of provided packet traps.
*/
-void devlink_traps_unregister(struct devlink *devlink,
- const struct devlink_trap *traps,
- size_t traps_count)
+void devl_traps_unregister(struct devlink *devlink,
+ const struct devlink_trap *traps,
+ size_t traps_count)
{
int i;
- mutex_lock(&devlink->lock);
+ devl_assert_locked(devlink);
/* Make sure we do not have any packets in-flight while unregistering
* traps by disabling all of them and waiting for a grace period.
*/
@@ -11629,7 +11745,24 @@ void devlink_traps_unregister(struct devlink *devlink,
synchronize_rcu();
for (i = traps_count - 1; i >= 0; i--)
devlink_trap_unregister(devlink, &traps[i]);
- mutex_unlock(&devlink->lock);
+}
+EXPORT_SYMBOL_GPL(devl_traps_unregister);
+
+/**
+ * devlink_traps_unregister - Unregister packet traps from devlink.
+ * @devlink: devlink.
+ * @traps: Packet traps.
+ * @traps_count: Count of provided packet traps.
+ *
+ * Context: Takes and release devlink->lock <mutex>.
+ */
+void devlink_traps_unregister(struct devlink *devlink,
+ const struct devlink_trap *traps,
+ size_t traps_count)
+{
+ devl_lock(devlink);
+ devl_traps_unregister(devlink, traps, traps_count);
+ devl_unlock(devlink);
}
EXPORT_SYMBOL_GPL(devlink_traps_unregister);
@@ -11641,8 +11774,8 @@ devlink_trap_stats_update(struct devlink_stats __percpu *trap_stats,
stats = this_cpu_ptr(trap_stats);
u64_stats_update_begin(&stats->syncp);
- stats->rx_bytes += skb_len;
- stats->rx_packets++;
+ u64_stats_add(&stats->rx_bytes, skb_len);
+ u64_stats_inc(&stats->rx_packets);
u64_stats_update_end(&stats->syncp);
}
@@ -11788,20 +11921,20 @@ devlink_trap_group_unregister(struct devlink *devlink,
}
/**
- * devlink_trap_groups_register - Register packet trap groups with devlink.
+ * devl_trap_groups_register - Register packet trap groups with devlink.
* @devlink: devlink.
* @groups: Packet trap groups.
* @groups_count: Count of provided packet trap groups.
*
* Return: Non-zero value on failure.
*/
-int devlink_trap_groups_register(struct devlink *devlink,
- const struct devlink_trap_group *groups,
- size_t groups_count)
+int devl_trap_groups_register(struct devlink *devlink,
+ const struct devlink_trap_group *groups,
+ size_t groups_count)
{
int i, err;
- mutex_lock(&devlink->lock);
+ devl_assert_locked(devlink);
for (i = 0; i < groups_count; i++) {
const struct devlink_trap_group *group = &groups[i];
@@ -11813,7 +11946,6 @@ int devlink_trap_groups_register(struct devlink *devlink,
if (err)
goto err_trap_group_register;
}
- mutex_unlock(&devlink->lock);
return 0;
@@ -11821,27 +11953,66 @@ err_trap_group_register:
err_trap_group_verify:
for (i--; i >= 0; i--)
devlink_trap_group_unregister(devlink, &groups[i]);
- mutex_unlock(&devlink->lock);
+ return err;
+}
+EXPORT_SYMBOL_GPL(devl_trap_groups_register);
+
+/**
+ * devlink_trap_groups_register - Register packet trap groups with devlink.
+ * @devlink: devlink.
+ * @groups: Packet trap groups.
+ * @groups_count: Count of provided packet trap groups.
+ *
+ * Context: Takes and release devlink->lock <mutex>.
+ *
+ * Return: Non-zero value on failure.
+ */
+int devlink_trap_groups_register(struct devlink *devlink,
+ const struct devlink_trap_group *groups,
+ size_t groups_count)
+{
+ int err;
+
+ devl_lock(devlink);
+ err = devl_trap_groups_register(devlink, groups, groups_count);
+ devl_unlock(devlink);
return err;
}
EXPORT_SYMBOL_GPL(devlink_trap_groups_register);
/**
- * devlink_trap_groups_unregister - Unregister packet trap groups from devlink.
+ * devl_trap_groups_unregister - Unregister packet trap groups from devlink.
* @devlink: devlink.
* @groups: Packet trap groups.
* @groups_count: Count of provided packet trap groups.
*/
-void devlink_trap_groups_unregister(struct devlink *devlink,
- const struct devlink_trap_group *groups,
- size_t groups_count)
+void devl_trap_groups_unregister(struct devlink *devlink,
+ const struct devlink_trap_group *groups,
+ size_t groups_count)
{
int i;
- mutex_lock(&devlink->lock);
+ devl_assert_locked(devlink);
for (i = groups_count - 1; i >= 0; i--)
devlink_trap_group_unregister(devlink, &groups[i]);
- mutex_unlock(&devlink->lock);
+}
+EXPORT_SYMBOL_GPL(devl_trap_groups_unregister);
+
+/**
+ * devlink_trap_groups_unregister - Unregister packet trap groups from devlink.
+ * @devlink: devlink.
+ * @groups: Packet trap groups.
+ * @groups_count: Count of provided packet trap groups.
+ *
+ * Context: Takes and release devlink->lock <mutex>.
+ */
+void devlink_trap_groups_unregister(struct devlink *devlink,
+ const struct devlink_trap_group *groups,
+ size_t groups_count)
+{
+ devl_lock(devlink);
+ devl_trap_groups_unregister(devlink, groups, groups_count);
+ devl_unlock(devlink);
}
EXPORT_SYMBOL_GPL(devlink_trap_groups_unregister);
@@ -11927,7 +12098,7 @@ devlink_trap_policer_unregister(struct devlink *devlink,
}
/**
- * devlink_trap_policers_register - Register packet trap policers with devlink.
+ * devl_trap_policers_register - Register packet trap policers with devlink.
* @devlink: devlink.
* @policers: Packet trap policers.
* @policers_count: Count of provided packet trap policers.
@@ -11935,13 +12106,13 @@ devlink_trap_policer_unregister(struct devlink *devlink,
* Return: Non-zero value on failure.
*/
int
-devlink_trap_policers_register(struct devlink *devlink,
- const struct devlink_trap_policer *policers,
- size_t policers_count)
+devl_trap_policers_register(struct devlink *devlink,
+ const struct devlink_trap_policer *policers,
+ size_t policers_count)
{
int i, err;
- mutex_lock(&devlink->lock);
+ devl_assert_locked(devlink);
for (i = 0; i < policers_count; i++) {
const struct devlink_trap_policer *policer = &policers[i];
@@ -11956,38 +12127,34 @@ devlink_trap_policers_register(struct devlink *devlink,
if (err)
goto err_trap_policer_register;
}
- mutex_unlock(&devlink->lock);
-
return 0;
err_trap_policer_register:
err_trap_policer_verify:
for (i--; i >= 0; i--)
devlink_trap_policer_unregister(devlink, &policers[i]);
- mutex_unlock(&devlink->lock);
return err;
}
-EXPORT_SYMBOL_GPL(devlink_trap_policers_register);
+EXPORT_SYMBOL_GPL(devl_trap_policers_register);
/**
- * devlink_trap_policers_unregister - Unregister packet trap policers from devlink.
+ * devl_trap_policers_unregister - Unregister packet trap policers from devlink.
* @devlink: devlink.
* @policers: Packet trap policers.
* @policers_count: Count of provided packet trap policers.
*/
void
-devlink_trap_policers_unregister(struct devlink *devlink,
- const struct devlink_trap_policer *policers,
- size_t policers_count)
+devl_trap_policers_unregister(struct devlink *devlink,
+ const struct devlink_trap_policer *policers,
+ size_t policers_count)
{
int i;
- mutex_lock(&devlink->lock);
+ devl_assert_locked(devlink);
for (i = policers_count - 1; i >= 0; i--)
devlink_trap_policer_unregister(devlink, &policers[i]);
- mutex_unlock(&devlink->lock);
}
-EXPORT_SYMBOL_GPL(devlink_trap_policers_unregister);
+EXPORT_SYMBOL_GPL(devl_trap_policers_unregister);
static void __devlink_compat_running_version(struct devlink *devlink,
char *buf, size_t len)
@@ -12039,9 +12206,9 @@ void devlink_compat_running_version(struct devlink *devlink,
if (!devlink->ops->info_get)
return;
- mutex_lock(&devlink->lock);
+ devl_lock(devlink);
__devlink_compat_running_version(devlink, buf, len);
- mutex_unlock(&devlink->lock);
+ devl_unlock(devlink);
}
int devlink_compat_flash_update(struct devlink *devlink, const char *file_name)
@@ -12056,11 +12223,11 @@ int devlink_compat_flash_update(struct devlink *devlink, const char *file_name)
if (ret)
return ret;
- mutex_lock(&devlink->lock);
+ devl_lock(devlink);
devlink_flash_update_begin_notify(devlink);
ret = devlink->ops->flash_update(devlink, &params, NULL);
devlink_flash_update_end_notify(devlink);
- mutex_unlock(&devlink->lock);
+ devl_unlock(devlink);
release_firmware(params.fw);
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 41cac0e4834e..75501e1bdd25 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -48,19 +48,6 @@
static int trace_state = TRACE_OFF;
static bool monitor_hw;
-#undef EM
-#undef EMe
-
-#define EM(a, b) [a] = #b,
-#define EMe(a, b) [a] = #b
-
-/* drop_reasons is used to translate 'enum skb_drop_reason' to string,
- * which is reported to user space.
- */
-static const char * const drop_reasons[] = {
- TRACE_SKB_DROP_REASON
-};
-
/* net_dm_mutex
*
* An overall lock guarding every operation coming from userspace.
@@ -68,7 +55,7 @@ static const char * const drop_reasons[] = {
static DEFINE_MUTEX(net_dm_mutex);
struct net_dm_stats {
- u64 dropped;
+ u64_stats_t dropped;
struct u64_stats_sync syncp;
};
@@ -543,7 +530,7 @@ static void net_dm_packet_trace_kfree_skb_hit(void *ignore,
unlock_free:
spin_unlock_irqrestore(&data->drop_queue.lock, flags);
u64_stats_update_begin(&data->stats.syncp);
- data->stats.dropped++;
+ u64_stats_inc(&data->stats.dropped);
u64_stats_update_end(&data->stats.syncp);
consume_skb(nskb);
}
@@ -877,7 +864,8 @@ net_dm_hw_metadata_copy(const struct devlink_trap_metadata *metadata)
}
hw_metadata->input_dev = metadata->input_dev;
- dev_hold_track(hw_metadata->input_dev, &hw_metadata->dev_tracker, GFP_ATOMIC);
+ netdev_hold(hw_metadata->input_dev, &hw_metadata->dev_tracker,
+ GFP_ATOMIC);
return hw_metadata;
@@ -893,7 +881,7 @@ free_hw_metadata:
static void
net_dm_hw_metadata_free(struct devlink_trap_metadata *hw_metadata)
{
- dev_put_track(hw_metadata->input_dev, &hw_metadata->dev_tracker);
+ netdev_put(hw_metadata->input_dev, &hw_metadata->dev_tracker);
kfree(hw_metadata->fa_cookie);
kfree(hw_metadata->trap_name);
kfree(hw_metadata->trap_group_name);
@@ -998,7 +986,7 @@ net_dm_hw_trap_packet_probe(void *ignore, const struct devlink *devlink,
unlock_free:
spin_unlock_irqrestore(&hw_data->drop_queue.lock, flags);
u64_stats_update_begin(&hw_data->stats.syncp);
- hw_data->stats.dropped++;
+ u64_stats_inc(&hw_data->stats.dropped);
u64_stats_update_end(&hw_data->stats.syncp);
net_dm_hw_metadata_free(n_hw_metadata);
free:
@@ -1445,10 +1433,10 @@ static void net_dm_stats_read(struct net_dm_stats *stats)
do {
start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
- dropped = cpu_stats->dropped;
+ dropped = u64_stats_read(&cpu_stats->dropped);
} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
- stats->dropped += dropped;
+ u64_stats_add(&stats->dropped, dropped);
}
}
@@ -1464,7 +1452,7 @@ static int net_dm_stats_put(struct sk_buff *msg)
return -EMSGSIZE;
if (nla_put_u64_64bit(msg, NET_DM_ATTR_STATS_DROPPED,
- stats.dropped, NET_DM_ATTR_PAD))
+ u64_stats_read(&stats.dropped), NET_DM_ATTR_PAD))
goto nla_put_failure;
nla_nest_end(msg, attr);
@@ -1489,10 +1477,10 @@ static void net_dm_hw_stats_read(struct net_dm_stats *stats)
do {
start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
- dropped = cpu_stats->dropped;
+ dropped = u64_stats_read(&cpu_stats->dropped);
} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
- stats->dropped += dropped;
+ u64_stats_add(&stats->dropped, dropped);
}
}
@@ -1508,7 +1496,7 @@ static int net_dm_hw_stats_put(struct sk_buff *msg)
return -EMSGSIZE;
if (nla_put_u64_64bit(msg, NET_DM_ATTR_STATS_DROPPED,
- stats.dropped, NET_DM_ATTR_PAD))
+ u64_stats_read(&stats.dropped), NET_DM_ATTR_PAD))
goto nla_put_failure;
nla_nest_end(msg, attr);
diff --git a/net/core/dst.c b/net/core/dst.c
index d16c2c9bfebd..bc9c9be4e080 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -49,7 +49,7 @@ void dst_init(struct dst_entry *dst, struct dst_ops *ops,
unsigned short flags)
{
dst->dev = dev;
- dev_hold_track(dev, &dst->dev_tracker, GFP_ATOMIC);
+ netdev_hold(dev, &dst->dev_tracker, GFP_ATOMIC);
dst->ops = ops;
dst_init_metrics(dst, dst_default_metrics.metrics, true);
dst->expires = 0UL;
@@ -117,7 +117,7 @@ struct dst_entry *dst_destroy(struct dst_entry * dst)
if (dst->ops->destroy)
dst->ops->destroy(dst);
- dev_put_track(dst->dev, &dst->dev_tracker);
+ netdev_put(dst->dev, &dst->dev_tracker);
lwtstate_put(dst->lwtstate);
@@ -159,8 +159,8 @@ void dst_dev_put(struct dst_entry *dst)
dst->input = dst_discard;
dst->output = dst_discard_out;
dst->dev = blackhole_netdev;
- dev_replace_track(dev, blackhole_netdev, &dst->dev_tracker,
- GFP_ATOMIC);
+ netdev_ref_replace(dev, blackhole_netdev, &dst->dev_tracker,
+ GFP_ATOMIC);
}
EXPORT_SYMBOL(dst_dev_put);
diff --git a/net/core/failover.c b/net/core/failover.c
index dcaa92a85ea2..864d2d83eff4 100644
--- a/net/core/failover.c
+++ b/net/core/failover.c
@@ -252,7 +252,7 @@ struct failover *failover_register(struct net_device *dev,
return ERR_PTR(-ENOMEM);
rcu_assign_pointer(failover->ops, ops);
- dev_hold_track(dev, &failover->dev_tracker, GFP_KERNEL);
+ netdev_hold(dev, &failover->dev_tracker, GFP_KERNEL);
dev->priv_flags |= IFF_FAILOVER;
rcu_assign_pointer(failover->failover_dev, dev);
@@ -285,7 +285,7 @@ void failover_unregister(struct failover *failover)
failover_dev->name);
failover_dev->priv_flags &= ~IFF_FAILOVER;
- dev_put_track(failover_dev, &failover->dev_tracker);
+ netdev_put(failover_dev, &failover->dev_tracker);
spin_lock(&failover_lock);
list_del(&failover->list);
diff --git a/net/core/filter.c b/net/core/filter.c
index 7950f7520765..a0c61094aeac 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -5012,8 +5012,8 @@ static const struct bpf_func_proto bpf_get_socket_uid_proto = {
.arg1_type = ARG_PTR_TO_CTX,
};
-static int _bpf_setsockopt(struct sock *sk, int level, int optname,
- char *optval, int optlen)
+static int __bpf_setsockopt(struct sock *sk, int level, int optname,
+ char *optval, int optlen)
{
char devname[IFNAMSIZ];
int val, valbool;
@@ -5024,8 +5024,6 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname,
if (!sk_fullsock(sk))
return -EINVAL;
- sock_owned_by_me(sk);
-
if (level == SOL_SOCKET) {
if (optlen != sizeof(int) && optname != SO_BINDTODEVICE)
return -EINVAL;
@@ -5258,14 +5256,20 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname,
return ret;
}
-static int _bpf_getsockopt(struct sock *sk, int level, int optname,
+static int _bpf_setsockopt(struct sock *sk, int level, int optname,
char *optval, int optlen)
{
+ if (sk_fullsock(sk))
+ sock_owned_by_me(sk);
+ return __bpf_setsockopt(sk, level, optname, optval, optlen);
+}
+
+static int __bpf_getsockopt(struct sock *sk, int level, int optname,
+ char *optval, int optlen)
+{
if (!sk_fullsock(sk))
goto err_clear;
- sock_owned_by_me(sk);
-
if (level == SOL_SOCKET) {
if (optlen != sizeof(int))
goto err_clear;
@@ -5360,6 +5364,14 @@ err_clear:
return -EINVAL;
}
+static int _bpf_getsockopt(struct sock *sk, int level, int optname,
+ char *optval, int optlen)
+{
+ if (sk_fullsock(sk))
+ sock_owned_by_me(sk);
+ return __bpf_getsockopt(sk, level, optname, optval, optlen);
+}
+
BPF_CALL_5(bpf_sk_setsockopt, struct sock *, sk, int, level,
int, optname, char *, optval, int, optlen)
{
@@ -5400,6 +5412,40 @@ const struct bpf_func_proto bpf_sk_getsockopt_proto = {
.arg5_type = ARG_CONST_SIZE,
};
+BPF_CALL_5(bpf_unlocked_sk_setsockopt, struct sock *, sk, int, level,
+ int, optname, char *, optval, int, optlen)
+{
+ return __bpf_setsockopt(sk, level, optname, optval, optlen);
+}
+
+const struct bpf_func_proto bpf_unlocked_sk_setsockopt_proto = {
+ .func = bpf_unlocked_sk_setsockopt,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
+ .arg2_type = ARG_ANYTHING,
+ .arg3_type = ARG_ANYTHING,
+ .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
+ .arg5_type = ARG_CONST_SIZE,
+};
+
+BPF_CALL_5(bpf_unlocked_sk_getsockopt, struct sock *, sk, int, level,
+ int, optname, char *, optval, int, optlen)
+{
+ return __bpf_getsockopt(sk, level, optname, optval, optlen);
+}
+
+const struct bpf_func_proto bpf_unlocked_sk_getsockopt_proto = {
+ .func = bpf_unlocked_sk_getsockopt,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
+ .arg2_type = ARG_ANYTHING,
+ .arg3_type = ARG_ANYTHING,
+ .arg4_type = ARG_PTR_TO_UNINIT_MEM,
+ .arg5_type = ARG_CONST_SIZE,
+};
+
BPF_CALL_5(bpf_sock_addr_setsockopt, struct bpf_sock_addr_kern *, ctx,
int, level, int, optname, char *, optval, int, optlen)
{
@@ -6462,8 +6508,6 @@ static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
/* bpf_skc_lookup performs the core lookup for different types of sockets,
* taking a reference on the socket if it doesn't have the flag SOCK_RCU_FREE.
- * Returns the socket as an 'unsigned long' to simplify the casting in the
- * callers to satisfy BPF_CALL declarations.
*/
static struct sock *
__bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
@@ -6471,8 +6515,8 @@ __bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
u64 flags)
{
struct sock *sk = NULL;
- u8 family = AF_UNSPEC;
struct net *net;
+ u8 family;
int sdif;
if (len == sizeof(tuple->ipv4))
@@ -6482,8 +6526,7 @@ __bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
else
return NULL;
- if (unlikely(family == AF_UNSPEC || flags ||
- !((s32)netns_id < 0 || netns_id <= S32_MAX)))
+ if (unlikely(flags || !((s32)netns_id < 0 || netns_id <= S32_MAX)))
goto out;
if (family == AF_INET)
@@ -7465,6 +7508,114 @@ static const struct bpf_func_proto bpf_skb_set_tstamp_proto = {
.arg3_type = ARG_ANYTHING,
};
+#ifdef CONFIG_SYN_COOKIES
+BPF_CALL_3(bpf_tcp_raw_gen_syncookie_ipv4, struct iphdr *, iph,
+ struct tcphdr *, th, u32, th_len)
+{
+ u32 cookie;
+ u16 mss;
+
+ if (unlikely(th_len < sizeof(*th) || th_len != th->doff * 4))
+ return -EINVAL;
+
+ mss = tcp_parse_mss_option(th, 0) ?: TCP_MSS_DEFAULT;
+ cookie = __cookie_v4_init_sequence(iph, th, &mss);
+
+ return cookie | ((u64)mss << 32);
+}
+
+static const struct bpf_func_proto bpf_tcp_raw_gen_syncookie_ipv4_proto = {
+ .func = bpf_tcp_raw_gen_syncookie_ipv4,
+ .gpl_only = true, /* __cookie_v4_init_sequence() is GPL */
+ .pkt_access = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_FIXED_SIZE_MEM,
+ .arg1_size = sizeof(struct iphdr),
+ .arg2_type = ARG_PTR_TO_MEM,
+ .arg3_type = ARG_CONST_SIZE,
+};
+
+BPF_CALL_3(bpf_tcp_raw_gen_syncookie_ipv6, struct ipv6hdr *, iph,
+ struct tcphdr *, th, u32, th_len)
+{
+#if IS_BUILTIN(CONFIG_IPV6)
+ const u16 mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
+ sizeof(struct ipv6hdr);
+ u32 cookie;
+ u16 mss;
+
+ if (unlikely(th_len < sizeof(*th) || th_len != th->doff * 4))
+ return -EINVAL;
+
+ mss = tcp_parse_mss_option(th, 0) ?: mss_clamp;
+ cookie = __cookie_v6_init_sequence(iph, th, &mss);
+
+ return cookie | ((u64)mss << 32);
+#else
+ return -EPROTONOSUPPORT;
+#endif
+}
+
+static const struct bpf_func_proto bpf_tcp_raw_gen_syncookie_ipv6_proto = {
+ .func = bpf_tcp_raw_gen_syncookie_ipv6,
+ .gpl_only = true, /* __cookie_v6_init_sequence() is GPL */
+ .pkt_access = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_FIXED_SIZE_MEM,
+ .arg1_size = sizeof(struct ipv6hdr),
+ .arg2_type = ARG_PTR_TO_MEM,
+ .arg3_type = ARG_CONST_SIZE,
+};
+
+BPF_CALL_2(bpf_tcp_raw_check_syncookie_ipv4, struct iphdr *, iph,
+ struct tcphdr *, th)
+{
+ u32 cookie = ntohl(th->ack_seq) - 1;
+
+ if (__cookie_v4_check(iph, th, cookie) > 0)
+ return 0;
+
+ return -EACCES;
+}
+
+static const struct bpf_func_proto bpf_tcp_raw_check_syncookie_ipv4_proto = {
+ .func = bpf_tcp_raw_check_syncookie_ipv4,
+ .gpl_only = true, /* __cookie_v4_check is GPL */
+ .pkt_access = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_FIXED_SIZE_MEM,
+ .arg1_size = sizeof(struct iphdr),
+ .arg2_type = ARG_PTR_TO_FIXED_SIZE_MEM,
+ .arg2_size = sizeof(struct tcphdr),
+};
+
+BPF_CALL_2(bpf_tcp_raw_check_syncookie_ipv6, struct ipv6hdr *, iph,
+ struct tcphdr *, th)
+{
+#if IS_BUILTIN(CONFIG_IPV6)
+ u32 cookie = ntohl(th->ack_seq) - 1;
+
+ if (__cookie_v6_check(iph, th, cookie) > 0)
+ return 0;
+
+ return -EACCES;
+#else
+ return -EPROTONOSUPPORT;
+#endif
+}
+
+static const struct bpf_func_proto bpf_tcp_raw_check_syncookie_ipv6_proto = {
+ .func = bpf_tcp_raw_check_syncookie_ipv6,
+ .gpl_only = true, /* __cookie_v6_check is GPL */
+ .pkt_access = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_FIXED_SIZE_MEM,
+ .arg1_size = sizeof(struct ipv6hdr),
+ .arg2_type = ARG_PTR_TO_FIXED_SIZE_MEM,
+ .arg2_size = sizeof(struct tcphdr),
+};
+#endif /* CONFIG_SYN_COOKIES */
+
#endif /* CONFIG_INET */
bool bpf_helper_changes_pkt_data(void *func)
@@ -7828,6 +7979,16 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_sk_assign_proto;
case BPF_FUNC_skb_set_tstamp:
return &bpf_skb_set_tstamp_proto;
+#ifdef CONFIG_SYN_COOKIES
+ case BPF_FUNC_tcp_raw_gen_syncookie_ipv4:
+ return &bpf_tcp_raw_gen_syncookie_ipv4_proto;
+ case BPF_FUNC_tcp_raw_gen_syncookie_ipv6:
+ return &bpf_tcp_raw_gen_syncookie_ipv6_proto;
+ case BPF_FUNC_tcp_raw_check_syncookie_ipv4:
+ return &bpf_tcp_raw_check_syncookie_ipv4_proto;
+ case BPF_FUNC_tcp_raw_check_syncookie_ipv6:
+ return &bpf_tcp_raw_check_syncookie_ipv6_proto;
+#endif
#endif
default:
return bpf_sk_base_func_proto(func_id);
@@ -7877,6 +8038,16 @@ xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_tcp_check_syncookie_proto;
case BPF_FUNC_tcp_gen_syncookie:
return &bpf_tcp_gen_syncookie_proto;
+#ifdef CONFIG_SYN_COOKIES
+ case BPF_FUNC_tcp_raw_gen_syncookie_ipv4:
+ return &bpf_tcp_raw_gen_syncookie_ipv4_proto;
+ case BPF_FUNC_tcp_raw_gen_syncookie_ipv6:
+ return &bpf_tcp_raw_gen_syncookie_ipv6_proto;
+ case BPF_FUNC_tcp_raw_check_syncookie_ipv4:
+ return &bpf_tcp_raw_check_syncookie_ipv4_proto;
+ case BPF_FUNC_tcp_raw_check_syncookie_ipv6:
+ return &bpf_tcp_raw_check_syncookie_ipv6_proto;
+#endif
#endif
default:
return bpf_sk_base_func_proto(func_id);
diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c
index 929f6379a279..0d3075d3c8fb 100644
--- a/net/core/flow_offload.c
+++ b/net/core/flow_offload.c
@@ -125,6 +125,13 @@ void flow_rule_match_ports(const struct flow_rule *rule,
}
EXPORT_SYMBOL(flow_rule_match_ports);
+void flow_rule_match_ports_range(const struct flow_rule *rule,
+ struct flow_match_ports_range *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS_RANGE, out);
+}
+EXPORT_SYMBOL(flow_rule_match_ports_range);
+
void flow_rule_match_tcp(const struct flow_rule *rule,
struct flow_match_tcp *out)
{
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index a244d3bade7d..aa6cb1f90966 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -110,7 +110,7 @@ static void linkwatch_add_event(struct net_device *dev)
spin_lock_irqsave(&lweventlist_lock, flags);
if (list_empty(&dev->link_watch_list)) {
list_add_tail(&dev->link_watch_list, &lweventlist);
- dev_hold_track(dev, &dev->linkwatch_dev_tracker, GFP_ATOMIC);
+ netdev_hold(dev, &dev->linkwatch_dev_tracker, GFP_ATOMIC);
}
spin_unlock_irqrestore(&lweventlist_lock, flags);
}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 54625287ee5b..6a8c2596ebab 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -624,7 +624,7 @@ ___neigh_create(struct neigh_table *tbl, const void *pkey,
memcpy(n->primary_key, pkey, key_len);
n->dev = dev;
- dev_hold_track(dev, &n->dev_tracker, GFP_ATOMIC);
+ netdev_hold(dev, &n->dev_tracker, GFP_ATOMIC);
/* Protocol specific setup. */
if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
@@ -770,10 +770,10 @@ struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
write_pnet(&n->net, net);
memcpy(n->key, pkey, key_len);
n->dev = dev;
- dev_hold_track(dev, &n->dev_tracker, GFP_KERNEL);
+ netdev_hold(dev, &n->dev_tracker, GFP_KERNEL);
if (tbl->pconstructor && tbl->pconstructor(n)) {
- dev_put_track(dev, &n->dev_tracker);
+ netdev_put(dev, &n->dev_tracker);
kfree(n);
n = NULL;
goto out;
@@ -805,7 +805,7 @@ int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
write_unlock_bh(&tbl->lock);
if (tbl->pdestructor)
tbl->pdestructor(n);
- dev_put_track(n->dev, &n->dev_tracker);
+ netdev_put(n->dev, &n->dev_tracker);
kfree(n);
return 0;
}
@@ -838,7 +838,7 @@ static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
n->next = NULL;
if (tbl->pdestructor)
tbl->pdestructor(n);
- dev_put_track(n->dev, &n->dev_tracker);
+ netdev_put(n->dev, &n->dev_tracker);
kfree(n);
}
return -ENOENT;
@@ -879,7 +879,7 @@ void neigh_destroy(struct neighbour *neigh)
if (dev->netdev_ops->ndo_neigh_destroy)
dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
- dev_put_track(dev, &neigh->dev_tracker);
+ netdev_put(dev, &neigh->dev_tracker);
neigh_parms_put(neigh->parms);
neigh_dbg(2, "neigh %p is destroyed\n", neigh);
@@ -1579,7 +1579,7 @@ static void neigh_managed_work(struct work_struct *work)
list_for_each_entry(neigh, &tbl->managed_list, managed_list)
neigh_event_send_probe(neigh, NULL, false);
queue_delayed_work(system_power_efficient_wq, &tbl->managed_work,
- max(NEIGH_VAR(&tbl->parms, DELAY_PROBE_TIME), HZ));
+ NEIGH_VAR(&tbl->parms, INTERVAL_PROBE_TIME_MS));
write_unlock_bh(&tbl->lock);
}
@@ -1671,13 +1671,13 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
refcount_set(&p->refcnt, 1);
p->reachable_time =
neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
- dev_hold_track(dev, &p->dev_tracker, GFP_KERNEL);
+ netdev_hold(dev, &p->dev_tracker, GFP_KERNEL);
p->dev = dev;
write_pnet(&p->net, net);
p->sysctl_table = NULL;
if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
- dev_put_track(dev, &p->dev_tracker);
+ netdev_put(dev, &p->dev_tracker);
kfree(p);
return NULL;
}
@@ -1708,7 +1708,7 @@ void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
list_del(&parms->list);
parms->dead = 1;
write_unlock_bh(&tbl->lock);
- dev_put_track(parms->dev, &parms->dev_tracker);
+ netdev_put(parms->dev, &parms->dev_tracker);
call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
}
EXPORT_SYMBOL(neigh_parms_release);
@@ -2100,7 +2100,9 @@ static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
nla_put_msecs(skb, NDTPA_PROXY_DELAY,
NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
nla_put_msecs(skb, NDTPA_LOCKTIME,
- NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD))
+ NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD) ||
+ nla_put_msecs(skb, NDTPA_INTERVAL_PROBE_TIME_MS,
+ NEIGH_VAR(parms, INTERVAL_PROBE_TIME_MS), NDTPA_PAD))
goto nla_put_failure;
return nla_nest_end(skb, nest);
@@ -2255,6 +2257,7 @@ static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
[NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
[NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
[NDTPA_LOCKTIME] = { .type = NLA_U64 },
+ [NDTPA_INTERVAL_PROBE_TIME_MS] = { .type = NLA_U64, .min = 1 },
};
static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -2373,6 +2376,10 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
nla_get_msecs(tbp[i]));
call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
break;
+ case NDTPA_INTERVAL_PROBE_TIME_MS:
+ NEIGH_VAR_SET(p, INTERVAL_PROBE_TIME_MS,
+ nla_get_msecs(tbp[i]));
+ break;
case NDTPA_RETRANS_TIME:
NEIGH_VAR_SET(p, RETRANS_TIME,
nla_get_msecs(tbp[i]));
@@ -3562,6 +3569,22 @@ static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
return ret;
}
+static int neigh_proc_dointvec_ms_jiffies_positive(struct ctl_table *ctl, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ struct ctl_table tmp = *ctl;
+ int ret;
+
+ int min = msecs_to_jiffies(1);
+
+ tmp.extra1 = &min;
+ tmp.extra2 = NULL;
+
+ ret = proc_dointvec_ms_jiffies_minmax(&tmp, write, buffer, lenp, ppos);
+ neigh_proc_update(ctl, write);
+ return ret;
+}
+
int neigh_proc_dointvec(struct ctl_table *ctl, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
@@ -3658,6 +3681,9 @@ static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
#define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
+#define NEIGH_SYSCTL_MS_JIFFIES_POSITIVE_ENTRY(attr, name) \
+ NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies_positive)
+
#define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
@@ -3676,6 +3702,8 @@ static struct neigh_sysctl_table {
NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
+ NEIGH_SYSCTL_MS_JIFFIES_POSITIVE_ENTRY(INTERVAL_PROBE_TIME_MS,
+ "interval_probe_time_ms"),
NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index a3642569fe53..d61afd21aab5 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -1017,7 +1017,7 @@ static void rx_queue_release(struct kobject *kobj)
#endif
memset(kobj, 0, sizeof(*kobj));
- dev_put_track(queue->dev, &queue->dev_tracker);
+ netdev_put(queue->dev, &queue->dev_tracker);
}
static const void *rx_queue_namespace(struct kobject *kobj)
@@ -1057,7 +1057,7 @@ static int rx_queue_add_kobject(struct net_device *dev, int index)
/* Kobject_put later will trigger rx_queue_release call which
* decreases dev refcount: Take that reference here
*/
- dev_hold_track(queue->dev, &queue->dev_tracker, GFP_KERNEL);
+ netdev_hold(queue->dev, &queue->dev_tracker, GFP_KERNEL);
kobj->kset = dev->queues_kset;
error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
@@ -1620,7 +1620,7 @@ static void netdev_queue_release(struct kobject *kobj)
struct netdev_queue *queue = to_netdev_queue(kobj);
memset(kobj, 0, sizeof(*kobj));
- dev_put_track(queue->dev, &queue->dev_tracker);
+ netdev_put(queue->dev, &queue->dev_tracker);
}
static const void *netdev_queue_namespace(struct kobject *kobj)
@@ -1660,7 +1660,7 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index)
/* Kobject_put later will trigger netdev_queue_release call
* which decreases dev refcount: Take that reference here
*/
- dev_hold_track(queue->dev, &queue->dev_tracker, GFP_KERNEL);
+ netdev_hold(queue->dev, &queue->dev_tracker, GFP_KERNEL);
kobj->kset = dev->queues_kset;
error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index db724463e7cd..5d27067b72d5 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -853,7 +853,7 @@ void netpoll_cleanup(struct netpoll *np)
if (!np->dev)
goto out;
__netpoll_cleanup(np);
- dev_put_track(np->dev, &np->dev_tracker);
+ netdev_put(np->dev, &np->dev_tracker);
np->dev = NULL;
out:
rtnl_unlock();
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index f18e6e771993..b74905fcc3a1 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -389,7 +389,8 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
/* Mark empty alloc.cache slots "empty" for alloc_pages_bulk_array */
memset(&pool->alloc.cache, 0, sizeof(void *) * bulk);
- nr_pages = alloc_pages_bulk_array(gfp, bulk, pool->alloc.cache);
+ nr_pages = alloc_pages_bulk_array_node(gfp, pool->p.nid, bulk,
+ pool->alloc.cache);
if (unlikely(!nr_pages))
return NULL;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 84b62cd7bc57..88906ba6d9a7 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2100,7 +2100,7 @@ static int pktgen_setup_dev(const struct pktgen_net *pn,
/* Clean old setups */
if (pkt_dev->odev) {
- dev_put_track(pkt_dev->odev, &pkt_dev->dev_tracker);
+ netdev_put(pkt_dev->odev, &pkt_dev->dev_tracker);
pkt_dev->odev = NULL;
}
@@ -3807,7 +3807,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
return add_dev_to_thread(t, pkt_dev);
out2:
- dev_put_track(pkt_dev->odev, &pkt_dev->dev_tracker);
+ netdev_put(pkt_dev->odev, &pkt_dev->dev_tracker);
out1:
#ifdef CONFIG_XFRM
free_SAs(pkt_dev);
@@ -3901,7 +3901,7 @@ static int pktgen_remove_device(struct pktgen_thread *t,
/* Dis-associate from the interface */
if (pkt_dev->odev) {
- dev_put_track(pkt_dev->odev, &pkt_dev->dev_tracker);
+ netdev_put(pkt_dev->odev, &pkt_dev->dev_tracker);
pkt_dev->odev = NULL;
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 5b3559cb1d82..974bbbbe7138 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -91,6 +91,9 @@ static struct kmem_cache *skbuff_ext_cache __ro_after_init;
int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
EXPORT_SYMBOL(sysctl_max_skb_frags);
+/* The array 'drop_reasons' is auto-generated in dropreason_str.c */
+EXPORT_SYMBOL(drop_reasons);
+
/**
* skb_panic - private function for out-of-line support
* @skb: buffer
@@ -172,13 +175,14 @@ static struct sk_buff *napi_skb_cache_get(void)
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
struct sk_buff *skb;
- if (unlikely(!nc->skb_count))
+ if (unlikely(!nc->skb_count)) {
nc->skb_count = kmem_cache_alloc_bulk(skbuff_head_cache,
GFP_ATOMIC,
NAPI_SKB_CACHE_BULK,
nc->skb_cache);
- if (unlikely(!nc->skb_count))
- return NULL;
+ if (unlikely(!nc->skb_count))
+ return NULL;
+ }
skb = nc->skb_cache[--nc->skb_count];
kasan_unpoison_object_data(skbuff_head_cache, skb);
@@ -450,8 +454,6 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
skb->fclone = SKB_FCLONE_ORIG;
refcount_set(&fclones->fclone_ref, 1);
-
- fclones->skb2.fclone = SKB_FCLONE_CLONE;
}
return skb;
@@ -557,6 +559,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
struct sk_buff *skb;
void *data;
+ DEBUG_NET_WARN_ON_ONCE(!in_softirq());
len += NET_SKB_PAD + NET_IP_ALIGN;
/* If requested length is either too small or too big,
@@ -666,11 +669,18 @@ static void skb_release_data(struct sk_buff *skb)
&shinfo->dataref))
goto exit;
- skb_zcopy_clear(skb, true);
+ if (skb_zcopy(skb)) {
+ bool skip_unref = shinfo->flags & SKBFL_MANAGED_FRAG_REFS;
+
+ skb_zcopy_clear(skb, true);
+ if (skip_unref)
+ goto free_head;
+ }
for (i = 0; i < shinfo->nr_frags; i++)
__skb_frag_unref(&shinfo->frags[i], skb->pp_recycle);
+free_head:
if (shinfo->frag_list)
kfree_skb_list(shinfo->frag_list);
@@ -725,7 +735,7 @@ void skb_release_head_state(struct sk_buff *skb)
{
skb_dst_drop(skb);
if (skb->destructor) {
- WARN_ON(in_hardirq());
+ DEBUG_NET_WARN_ON_ONCE(in_hardirq());
skb->destructor(skb);
}
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
@@ -895,7 +905,10 @@ EXPORT_SYMBOL(skb_dump);
*/
void skb_tx_error(struct sk_buff *skb)
{
- skb_zcopy_clear(skb, true);
+ if (skb) {
+ skb_zcopy_downgrade_managed(skb);
+ skb_zcopy_clear(skb, true);
+ }
}
EXPORT_SYMBOL(skb_tx_error);
@@ -978,7 +991,7 @@ void napi_consume_skb(struct sk_buff *skb, int budget)
return;
}
- lockdep_assert_in_softirq();
+ DEBUG_NET_WARN_ON_ONCE(!in_softirq());
if (!skb_unref(skb))
return;
@@ -1193,7 +1206,7 @@ static struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size)
uarg->len = 1;
uarg->bytelen = size;
uarg->zerocopy = 1;
- uarg->flags = SKBFL_ZEROCOPY_FRAG;
+ uarg->flags = SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN;
refcount_set(&uarg->refcnt, 1);
sock_hold(sk);
@@ -1212,6 +1225,10 @@ struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size,
const u32 byte_limit = 1 << 19; /* limit to a few TSO */
u32 bytelen, next;
+ /* there might be non MSG_ZEROCOPY users */
+ if (uarg->callback != msg_zerocopy_callback)
+ return NULL;
+
/* realloc only when socket is locked (TCP, UDP cork),
* so uarg->len and sk_zckey access is serialized
*/
@@ -1354,7 +1371,7 @@ int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
if (orig_uarg && uarg != orig_uarg)
return -EEXIST;
- err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len);
+ err = __zerocopy_sg_from_iter(msg, sk, skb, &msg->msg_iter, len);
if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) {
struct sock *save_sk = skb->sk;
@@ -1371,6 +1388,16 @@ int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
}
EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream);
+void __skb_zcopy_downgrade_managed(struct sk_buff *skb)
+{
+ int i;
+
+ skb_shinfo(skb)->flags &= ~SKBFL_MANAGED_FRAG_REFS;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+ skb_frag_ref(skb, i);
+}
+EXPORT_SYMBOL_GPL(__skb_zcopy_downgrade_managed);
+
static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig,
gfp_t gfp_mask)
{
@@ -1508,6 +1535,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
refcount_read(&fclones->fclone_ref) == 1) {
n = &fclones->skb2;
refcount_set(&fclones->fclone_ref, 2);
+ n->fclone = SKB_FCLONE_CLONE;
} else {
if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC;
@@ -1688,6 +1716,8 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
BUG_ON(skb_shared(skb));
+ skb_zcopy_downgrade_managed(skb);
+
size = SKB_DATA_ALIGN(size);
if (skb_pfmemalloc(skb))
@@ -3190,9 +3220,7 @@ skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
}
}
- to->truesize += len + plen;
- to->len += len + plen;
- to->data_len += len + plen;
+ skb_len_add(to, len + plen);
if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
skb_tx_error(from);
@@ -3484,6 +3512,8 @@ void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
int pos = skb_headlen(skb);
const int zc_flags = SKBFL_SHARED_FRAG | SKBFL_PURE_ZEROCOPY;
+ skb_zcopy_downgrade_managed(skb);
+
skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & zc_flags;
skb_zerocopy_clone(skb1, skb, 0);
if (len < pos) /* Split line is inside header. */
@@ -3629,13 +3659,8 @@ onlymerged:
tgt->ip_summed = CHECKSUM_PARTIAL;
skb->ip_summed = CHECKSUM_PARTIAL;
- /* Yak, is it really working this way? Some helper please? */
- skb->len -= shiftlen;
- skb->data_len -= shiftlen;
- skb->truesize -= shiftlen;
- tgt->len += shiftlen;
- tgt->data_len += shiftlen;
- tgt->truesize += shiftlen;
+ skb_len_add(skb, -shiftlen);
+ skb_len_add(tgt, shiftlen);
return shiftlen;
}
@@ -3837,6 +3862,7 @@ int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
if (skb_can_coalesce(skb, i, page, offset)) {
skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
} else if (i < MAX_SKB_FRAGS) {
+ skb_zcopy_downgrade_managed(skb);
get_page(page);
skb_fill_page_desc(skb, i, page, offset, size);
} else {
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index b0fcd0200e84..266d3b7b7d0b 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -497,23 +497,27 @@ bool sk_msg_is_readable(struct sock *sk)
}
EXPORT_SYMBOL_GPL(sk_msg_is_readable);
-static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
- struct sk_buff *skb)
+static struct sk_msg *alloc_sk_msg(void)
{
struct sk_msg *msg;
- if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
+ msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_KERNEL);
+ if (unlikely(!msg))
return NULL;
+ sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS);
+ return msg;
+}
- if (!sk_rmem_schedule(sk, skb, skb->truesize))
+static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
+ struct sk_buff *skb)
+{
+ if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
return NULL;
- msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_KERNEL);
- if (unlikely(!msg))
+ if (!sk_rmem_schedule(sk, skb, skb->truesize))
return NULL;
- sk_msg_init(msg);
- return msg;
+ return alloc_sk_msg();
}
static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
@@ -590,13 +594,12 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb,
static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
u32 off, u32 len)
{
- struct sk_msg *msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
+ struct sk_msg *msg = alloc_sk_msg();
struct sock *sk = psock->sk;
int err;
if (unlikely(!msg))
return -EAGAIN;
- sk_msg_init(msg);
skb_set_owner_r(skb, sk);
err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg);
if (err < 0)
@@ -720,6 +723,7 @@ struct sk_psock *sk_psock_init(struct sock *sk, int node)
psock->eval = __SK_NONE;
psock->sk_proto = prot;
psock->saved_unhash = prot->unhash;
+ psock->saved_destroy = prot->destroy;
psock->saved_close = prot->close;
psock->saved_write_space = sk->sk_write_space;
@@ -1164,21 +1168,14 @@ static void sk_psock_done_strp(struct sk_psock *psock)
}
#endif /* CONFIG_BPF_STREAM_PARSER */
-static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
- unsigned int offset, size_t orig_len)
+static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb)
{
- struct sock *sk = (struct sock *)desc->arg.data;
struct sk_psock *psock;
struct bpf_prog *prog;
int ret = __SK_DROP;
- int len = orig_len;
+ int len = skb->len;
- /* clone here so sk_eat_skb() in tcp_read_sock does not drop our data */
- skb = skb_clone(skb, GFP_ATOMIC);
- if (!skb) {
- desc->error = -ENOMEM;
- return 0;
- }
+ skb_get(skb);
rcu_read_lock();
psock = sk_psock(sk);
@@ -1191,12 +1188,10 @@ static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
if (!prog)
prog = READ_ONCE(psock->progs.skb_verdict);
if (likely(prog)) {
- skb->sk = sk;
skb_dst_drop(skb);
skb_bpf_redirect_clear(skb);
ret = bpf_prog_run_pin_on_cpu(prog, skb);
ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
- skb->sk = NULL;
}
if (sk_psock_verdict_apply(psock, skb, ret) < 0)
len = 0;
@@ -1208,16 +1203,10 @@ out:
static void sk_psock_verdict_data_ready(struct sock *sk)
{
struct socket *sock = sk->sk_socket;
- read_descriptor_t desc;
- if (unlikely(!sock || !sock->ops || !sock->ops->read_sock))
+ if (unlikely(!sock || !sock->ops || !sock->ops->read_skb))
return;
-
- desc.arg.data = sk;
- desc.error = 0;
- desc.count = 1;
-
- sock->ops->read_sock(sk, &desc, sk_psock_verdict_recv);
+ sock->ops->read_skb(sk, sk_psock_verdict_recv);
}
void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
diff --git a/net/core/sock.c b/net/core/sock.c
index 2ff40dd0a7a6..4cb957d934a2 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -991,7 +991,7 @@ EXPORT_SYMBOL(sock_set_mark);
static void sock_release_reserved_memory(struct sock *sk, int bytes)
{
/* Round down bytes to multiple of pages */
- bytes &= ~(SK_MEM_QUANTUM - 1);
+ bytes = round_down(bytes, PAGE_SIZE);
WARN_ON(bytes > sk->sk_reserved_mem);
sk->sk_reserved_mem -= bytes;
@@ -1019,7 +1019,8 @@ static int sock_reserve_memory(struct sock *sk, int bytes)
return -ENOMEM;
/* pre-charge to forward_alloc */
- allocated = sk_memory_allocated_add(sk, pages);
+ sk_memory_allocated_add(sk, pages);
+ allocated = sk_memory_allocated(sk);
/* If the system goes into memory pressure with this
* precharge, give up and return error.
*/
@@ -1028,9 +1029,9 @@ static int sock_reserve_memory(struct sock *sk, int bytes)
mem_cgroup_uncharge_skmem(sk->sk_memcg, pages);
return -ENOMEM;
}
- sk->sk_forward_alloc += pages << SK_MEM_QUANTUM_SHIFT;
+ sk->sk_forward_alloc += pages << PAGE_SHIFT;
- sk->sk_reserved_mem += pages << SK_MEM_QUANTUM_SHIFT;
+ sk->sk_reserved_mem += pages << PAGE_SHIFT;
return 0;
}
@@ -2844,7 +2845,7 @@ void __release_sock(struct sock *sk)
do {
next = skb->next;
prefetch(next);
- WARN_ON_ONCE(skb_dst_is_noref(skb));
+ DEBUG_NET_WARN_ON_ONCE(skb_dst_is_noref(skb));
skb_mark_not_on_list(skb);
sk_backlog_rcv(sk, skb);
@@ -2869,6 +2870,7 @@ void __sk_flush_backlog(struct sock *sk)
__release_sock(sk);
spin_unlock_bh(&sk->sk_lock.slock);
}
+EXPORT_SYMBOL_GPL(__sk_flush_backlog);
/**
* sk_wait_data - wait for data to arrive at sk_receive_queue
@@ -2906,11 +2908,13 @@ EXPORT_SYMBOL(sk_wait_data);
*/
int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
{
- struct proto *prot = sk->sk_prot;
- long allocated = sk_memory_allocated_add(sk, amt);
bool memcg_charge = mem_cgroup_sockets_enabled && sk->sk_memcg;
+ struct proto *prot = sk->sk_prot;
bool charged = true;
+ long allocated;
+ sk_memory_allocated_add(sk, amt);
+ allocated = sk_memory_allocated(sk);
if (memcg_charge &&
!(charged = mem_cgroup_charge_skmem(sk->sk_memcg, amt,
gfp_memcg_charge())))
@@ -2987,7 +2991,6 @@ suppress_allocation:
return 0;
}
-EXPORT_SYMBOL(__sk_mem_raise_allocated);
/**
* __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
@@ -3003,10 +3006,10 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind)
{
int ret, amt = sk_mem_pages(size);
- sk->sk_forward_alloc += amt << SK_MEM_QUANTUM_SHIFT;
+ sk->sk_forward_alloc += amt << PAGE_SHIFT;
ret = __sk_mem_raise_allocated(sk, size, amt, kind);
if (!ret)
- sk->sk_forward_alloc -= amt << SK_MEM_QUANTUM_SHIFT;
+ sk->sk_forward_alloc -= amt << PAGE_SHIFT;
return ret;
}
EXPORT_SYMBOL(__sk_mem_schedule);
@@ -3029,17 +3032,16 @@ void __sk_mem_reduce_allocated(struct sock *sk, int amount)
(sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
sk_leave_memory_pressure(sk);
}
-EXPORT_SYMBOL(__sk_mem_reduce_allocated);
/**
* __sk_mem_reclaim - reclaim sk_forward_alloc and memory_allocated
* @sk: socket
- * @amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple)
+ * @amount: number of bytes (rounded down to a PAGE_SIZE multiple)
*/
void __sk_mem_reclaim(struct sock *sk, int amount)
{
- amount >>= SK_MEM_QUANTUM_SHIFT;
- sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT;
+ amount >>= PAGE_SHIFT;
+ sk->sk_forward_alloc -= amount << PAGE_SHIFT;
__sk_mem_reduce_allocated(sk, amount);
}
EXPORT_SYMBOL(__sk_mem_reclaim);
@@ -3798,6 +3800,10 @@ int proto_register(struct proto *prot, int alloc_slab)
pr_err("%s: missing sysctl_mem\n", prot->name);
return -EINVAL;
}
+ if (prot->memory_allocated && !prot->per_cpu_fw_alloc) {
+ pr_err("%s: missing per_cpu_fw_alloc\n", prot->name);
+ return -EINVAL;
+ }
if (alloc_slab) {
prot->slab = kmem_cache_create_usercopy(prot->name,
prot->obj_size, 0,
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index 81d4b4756a02..028813dfecb0 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -1561,6 +1561,29 @@ void sock_map_unhash(struct sock *sk)
}
EXPORT_SYMBOL_GPL(sock_map_unhash);
+void sock_map_destroy(struct sock *sk)
+{
+ void (*saved_destroy)(struct sock *sk);
+ struct sk_psock *psock;
+
+ rcu_read_lock();
+ psock = sk_psock_get(sk);
+ if (unlikely(!psock)) {
+ rcu_read_unlock();
+ if (sk->sk_prot->destroy)
+ sk->sk_prot->destroy(sk);
+ return;
+ }
+
+ saved_destroy = psock->saved_destroy;
+ sock_map_remove_links(sk, psock);
+ rcu_read_unlock();
+ sk_psock_stop(psock, false);
+ sk_psock_put(sk, psock);
+ saved_destroy(sk);
+}
+EXPORT_SYMBOL_GPL(sock_map_destroy);
+
void sock_map_close(struct sock *sk, long timeout)
{
void (*saved_close)(struct sock *sk, long timeout);
diff --git a/net/core/stream.c b/net/core/stream.c
index 06b36c730ce8..ccc083cdef23 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -196,13 +196,13 @@ void sk_stream_kill_queues(struct sock *sk)
__skb_queue_purge(&sk->sk_receive_queue);
/* Next, the write queue. */
- WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
+ WARN_ON_ONCE(!skb_queue_empty(&sk->sk_write_queue));
/* Account for returned memory. */
sk_mem_reclaim_final(sk);
- WARN_ON(sk->sk_wmem_queued);
- WARN_ON(sk->sk_forward_alloc);
+ WARN_ON_ONCE(sk->sk_wmem_queued);
+ WARN_ON_ONCE(sk->sk_forward_alloc);
/* It is _impossible_ for the backlog to contain anything
* when we get here. All user references to this socket
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index dc92a67baea3..aa4f43f52499 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -149,6 +149,7 @@ static DEFINE_RWLOCK(dn_hash_lock);
static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE];
static struct hlist_head dn_wild_sk;
static atomic_long_t decnet_memory_allocated;
+static DEFINE_PER_CPU(int, decnet_memory_per_cpu_fw_alloc);
static int __dn_setsockopt(struct socket *sock, int level, int optname,
sockptr_t optval, unsigned int optlen, int flags);
@@ -454,7 +455,10 @@ static struct proto dn_proto = {
.owner = THIS_MODULE,
.enter_memory_pressure = dn_enter_memory_pressure,
.memory_pressure = &dn_memory_pressure,
+
.memory_allocated = &decnet_memory_allocated,
+ .per_cpu_fw_alloc = &decnet_memory_per_cpu_fw_alloc,
+
.sysctl_mem = sysctl_decnet_mem,
.sysctl_wmem = sysctl_decnet_wmem,
.sysctl_rmem = sysctl_decnet_rmem,
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index fbd98ac853ea..7c569bcc0aca 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -94,6 +94,7 @@ struct neigh_table dn_neigh_table = {
[NEIGH_VAR_RETRANS_TIME] = 1 * HZ,
[NEIGH_VAR_BASE_REACHABLE_TIME] = 30 * HZ,
[NEIGH_VAR_DELAY_PROBE_TIME] = 5 * HZ,
+ [NEIGH_VAR_INTERVAL_PROBE_TIME_MS] = 5 * HZ,
[NEIGH_VAR_GC_STALETIME] = 60 * HZ,
[NEIGH_VAR_QUEUE_LEN_BYTES] = SK_WMEM_MAX,
[NEIGH_VAR_PROXY_QLEN] = 0,
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index 8cb87b5067ee..3eef72ce99a4 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -87,10 +87,10 @@ config NET_DSA_TAG_MTK
Mediatek switches.
config NET_DSA_TAG_KSZ
- tristate "Tag driver for Microchip 8795/9477/9893 families of switches"
+ tristate "Tag driver for Microchip 8795/937x/9477/9893 families of switches"
help
Say Y if you want to enable support for tagging frames for the
- Microchip 8795/9477/9893 families of switches.
+ Microchip 8795/937x/9477/9893 families of switches.
config NET_DSA_TAG_OCELOT
tristate "Tag driver for Ocelot family of switches, using NPI port"
@@ -132,6 +132,13 @@ config NET_DSA_TAG_RTL8_4
Say Y or M if you want to enable support for tagging frames for Realtek
switches with 8 byte protocol 4 tags, such as the Realtek RTL8365MB-VC.
+config NET_DSA_TAG_RZN1_A5PSW
+ tristate "Tag driver for Renesas RZ/N1 A5PSW switch"
+ help
+ Say Y or M if you want to enable support for tagging frames for
+ Renesas RZ/N1 embedded switch that uses an 8 byte tag located after
+ destination MAC address.
+
config NET_DSA_TAG_LAN9303
tristate "Tag driver for SMSC/Microchip LAN9303 family of switches"
help
diff --git a/net/dsa/Makefile b/net/dsa/Makefile
index 9f75820e7c98..af28c24ead18 100644
--- a/net/dsa/Makefile
+++ b/net/dsa/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_NET_DSA_TAG_OCELOT_8021Q) += tag_ocelot_8021q.o
obj-$(CONFIG_NET_DSA_TAG_QCA) += tag_qca.o
obj-$(CONFIG_NET_DSA_TAG_RTL4_A) += tag_rtl4_a.o
obj-$(CONFIG_NET_DSA_TAG_RTL8_4) += tag_rtl8_4.o
+obj-$(CONFIG_NET_DSA_TAG_RZN1_A5PSW) += tag_rzn1_a5psw.o
obj-$(CONFIG_NET_DSA_TAG_SJA1105) += tag_sja1105.o
obj-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o
obj-$(CONFIG_NET_DSA_TAG_XRS700X) += tag_xrs700x.o
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 801a5d445833..ad6a6663feeb 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -935,10 +935,10 @@ static void dsa_slave_get_ethtool_stats(struct net_device *dev,
s = per_cpu_ptr(dev->tstats, i);
do {
start = u64_stats_fetch_begin_irq(&s->syncp);
- tx_packets = s->tx_packets;
- tx_bytes = s->tx_bytes;
- rx_packets = s->rx_packets;
- rx_bytes = s->rx_bytes;
+ tx_packets = u64_stats_read(&s->tx_packets);
+ tx_bytes = u64_stats_read(&s->tx_bytes);
+ rx_packets = u64_stats_read(&s->rx_packets);
+ rx_bytes = u64_stats_read(&s->rx_bytes);
} while (u64_stats_fetch_retry_irq(&s->syncp, start));
data[0] += tx_packets;
data[1] += tx_bytes;
@@ -1002,6 +1002,18 @@ dsa_slave_get_eth_ctrl_stats(struct net_device *dev,
ds->ops->get_eth_ctrl_stats(ds, dp->index, ctrl_stats);
}
+static void
+dsa_slave_get_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+
+ if (ds->ops->get_rmon_stats)
+ ds->ops->get_rmon_stats(ds, dp->index, rmon_stats, ranges);
+}
+
static void dsa_slave_net_selftest(struct net_device *ndev,
struct ethtool_test *etest, u64 *buf)
{
@@ -1097,6 +1109,16 @@ static int dsa_slave_set_link_ksettings(struct net_device *dev,
return phylink_ethtool_ksettings_set(dp->pl, cmd);
}
+static void dsa_slave_get_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+
+ if (ds->ops->get_pause_stats)
+ ds->ops->get_pause_stats(ds, dp->index, pause_stats);
+}
+
static void dsa_slave_get_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *pause)
{
@@ -2081,12 +2103,14 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = {
.get_eth_phy_stats = dsa_slave_get_eth_phy_stats,
.get_eth_mac_stats = dsa_slave_get_eth_mac_stats,
.get_eth_ctrl_stats = dsa_slave_get_eth_ctrl_stats,
+ .get_rmon_stats = dsa_slave_get_rmon_stats,
.set_wol = dsa_slave_set_wol,
.get_wol = dsa_slave_get_wol,
.set_eee = dsa_slave_set_eee,
.get_eee = dsa_slave_get_eee,
.get_link_ksettings = dsa_slave_get_link_ksettings,
.set_link_ksettings = dsa_slave_set_link_ksettings,
+ .get_pause_stats = dsa_slave_get_pause_stats,
.get_pauseparam = dsa_slave_get_pauseparam,
.set_pauseparam = dsa_slave_set_pauseparam,
.get_rxnfc = dsa_slave_get_rxnfc,
@@ -2460,8 +2484,9 @@ static int dsa_slave_changeupper(struct net_device *dev,
if (!err)
dsa_bridge_mtu_normalization(dp);
if (err == -EOPNOTSUPP) {
- NL_SET_ERR_MSG_MOD(extack,
- "Offloading not supported");
+ if (!extack->_msg)
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offloading not supported");
err = 0;
}
err = notifier_from_errno(err);
diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c
index 3509fc967ca9..38fa19c1e2d5 100644
--- a/net/dsa/tag_ksz.c
+++ b/net/dsa/tag_ksz.c
@@ -193,10 +193,69 @@ static const struct dsa_device_ops ksz9893_netdev_ops = {
DSA_TAG_DRIVER(ksz9893_netdev_ops);
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_KSZ9893);
+/* For xmit, 2 bytes are added before FCS.
+ * ---------------------------------------------------------------------------
+ * DA(6bytes)|SA(6bytes)|....|Data(nbytes)|tag0(1byte)|tag1(1byte)|FCS(4bytes)
+ * ---------------------------------------------------------------------------
+ * tag0 : represents tag override, lookup and valid
+ * tag1 : each bit represents port (eg, 0x01=port1, 0x02=port2, 0x80=port8)
+ *
+ * For rcv, 1 byte is added before FCS.
+ * ---------------------------------------------------------------------------
+ * DA(6bytes)|SA(6bytes)|....|Data(nbytes)|tag0(1byte)|FCS(4bytes)
+ * ---------------------------------------------------------------------------
+ * tag0 : zero-based value represents port
+ * (eg, 0x00=port1, 0x02=port3, 0x07=port8)
+ */
+#define LAN937X_EGRESS_TAG_LEN 2
+
+#define LAN937X_TAIL_TAG_BLOCKING_OVERRIDE BIT(11)
+#define LAN937X_TAIL_TAG_LOOKUP BIT(12)
+#define LAN937X_TAIL_TAG_VALID BIT(13)
+#define LAN937X_TAIL_TAG_PORT_MASK 7
+
+static struct sk_buff *lan937x_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ const struct ethhdr *hdr = eth_hdr(skb);
+ __be16 *tag;
+ u16 val;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb))
+ return NULL;
+
+ tag = skb_put(skb, LAN937X_EGRESS_TAG_LEN);
+
+ val = BIT(dp->index);
+
+ if (is_link_local_ether_addr(hdr->h_dest))
+ val |= LAN937X_TAIL_TAG_BLOCKING_OVERRIDE;
+
+ /* Tail tag valid bit - This bit should always be set by the CPU */
+ val |= LAN937X_TAIL_TAG_VALID;
+
+ put_unaligned_be16(val, tag);
+
+ return skb;
+}
+
+static const struct dsa_device_ops lan937x_netdev_ops = {
+ .name = "lan937x",
+ .proto = DSA_TAG_PROTO_LAN937X,
+ .xmit = lan937x_xmit,
+ .rcv = ksz9477_rcv,
+ .needed_tailroom = LAN937X_EGRESS_TAG_LEN,
+};
+
+DSA_TAG_DRIVER(lan937x_netdev_ops);
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_LAN937X);
+
static struct dsa_tag_driver *dsa_tag_driver_array[] = {
&DSA_TAG_DRIVER_NAME(ksz8795_netdev_ops),
&DSA_TAG_DRIVER_NAME(ksz9477_netdev_ops),
&DSA_TAG_DRIVER_NAME(ksz9893_netdev_ops),
+ &DSA_TAG_DRIVER_NAME(lan937x_netdev_ops),
};
module_dsa_tag_drivers(dsa_tag_driver_array);
diff --git a/net/dsa/tag_rzn1_a5psw.c b/net/dsa/tag_rzn1_a5psw.c
new file mode 100644
index 000000000000..e2a5ee6ae688
--- /dev/null
+++ b/net/dsa/tag_rzn1_a5psw.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 Schneider Electric
+ *
+ * Clément Léger <clement.leger@bootlin.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <net/dsa.h>
+
+#include "dsa_priv.h"
+
+/* To define the outgoing port and to discover the incoming port a TAG is
+ * inserted after Src MAC :
+ *
+ * Dest MAC Src MAC TAG Type
+ * ...| 1 2 3 4 5 6 | 1 2 3 4 5 6 | 1 2 3 4 5 6 7 8 | 1 2 |...
+ * |<--------------->|
+ *
+ * See struct a5psw_tag for layout
+ */
+
+#define ETH_P_DSA_A5PSW 0xE001
+#define A5PSW_TAG_LEN 8
+#define A5PSW_CTRL_DATA_FORCE_FORWARD BIT(0)
+/* This is both used for xmit tag and rcv tagging */
+#define A5PSW_CTRL_DATA_PORT GENMASK(3, 0)
+
+struct a5psw_tag {
+ __be16 ctrl_tag;
+ __be16 ctrl_data;
+ __be16 ctrl_data2_hi;
+ __be16 ctrl_data2_lo;
+};
+
+static struct sk_buff *a5psw_tag_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct a5psw_tag *ptag;
+ u32 data2_val;
+
+ BUILD_BUG_ON(sizeof(*ptag) != A5PSW_TAG_LEN);
+
+ /* The Ethernet switch we are interfaced with needs packets to be at
+ * least 60 bytes otherwise they will be discarded when they enter the
+ * switch port logic.
+ */
+ if (__skb_put_padto(skb, ETH_ZLEN, false))
+ return NULL;
+
+ /* provide 'A5PSW_TAG_LEN' bytes additional space */
+ skb_push(skb, A5PSW_TAG_LEN);
+
+ /* make room between MACs and Ether-Type to insert tag */
+ dsa_alloc_etype_header(skb, A5PSW_TAG_LEN);
+
+ ptag = dsa_etype_header_pos_tx(skb);
+
+ data2_val = FIELD_PREP(A5PSW_CTRL_DATA_PORT, BIT(dp->index));
+ ptag->ctrl_tag = htons(ETH_P_DSA_A5PSW);
+ ptag->ctrl_data = htons(A5PSW_CTRL_DATA_FORCE_FORWARD);
+ ptag->ctrl_data2_lo = htons(data2_val);
+ ptag->ctrl_data2_hi = 0;
+
+ return skb;
+}
+
+static struct sk_buff *a5psw_tag_rcv(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct a5psw_tag *tag;
+ int port;
+
+ if (unlikely(!pskb_may_pull(skb, A5PSW_TAG_LEN))) {
+ dev_warn_ratelimited(&dev->dev,
+ "Dropping packet, cannot pull\n");
+ return NULL;
+ }
+
+ tag = dsa_etype_header_pos_rx(skb);
+
+ if (tag->ctrl_tag != htons(ETH_P_DSA_A5PSW)) {
+ dev_warn_ratelimited(&dev->dev, "Dropping packet due to invalid TAG marker\n");
+ return NULL;
+ }
+
+ port = FIELD_GET(A5PSW_CTRL_DATA_PORT, ntohs(tag->ctrl_data));
+
+ skb->dev = dsa_master_find_slave(dev, 0, port);
+ if (!skb->dev)
+ return NULL;
+
+ skb_pull_rcsum(skb, A5PSW_TAG_LEN);
+ dsa_strip_etype_header(skb, A5PSW_TAG_LEN);
+
+ dsa_default_offload_fwd_mark(skb);
+
+ return skb;
+}
+
+static const struct dsa_device_ops a5psw_netdev_ops = {
+ .name = "a5psw",
+ .proto = DSA_TAG_PROTO_RZN1_A5PSW,
+ .xmit = a5psw_tag_xmit,
+ .rcv = a5psw_tag_rcv,
+ .needed_headroom = A5PSW_TAG_LEN,
+};
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_A5PSW);
+module_dsa_tag_driver(a5psw_netdev_ops);
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index 326e14ee05db..6a7308de192d 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -369,22 +369,9 @@ EXPORT_SYMBOL(ethtool_convert_legacy_u32_to_link_mode);
bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
const unsigned long *src)
{
- bool retval = true;
-
- /* TODO: following test will soon always be true */
- if (__ETHTOOL_LINK_MODE_MASK_NBITS > 32) {
- __ETHTOOL_DECLARE_LINK_MODE_MASK(ext);
-
- linkmode_zero(ext);
- bitmap_fill(ext, 32);
- bitmap_complement(ext, ext, __ETHTOOL_LINK_MODE_MASK_NBITS);
- if (linkmode_intersects(ext, src)) {
- /* src mask goes beyond bit 31 */
- retval = false;
- }
- }
*legacy_u32 = src[0];
- return retval;
+ return find_next_bit(src, __ETHTOOL_LINK_MODE_MASK_NBITS, 32) ==
+ __ETHTOOL_LINK_MODE_MASK_NBITS;
}
EXPORT_SYMBOL(ethtool_convert_link_mode_to_legacy_u32);
@@ -2010,7 +1997,7 @@ static int ethtool_phys_id(struct net_device *dev, void __user *useraddr)
* removal of the device.
*/
busy = true;
- dev_hold_track(dev, &dev_tracker, GFP_KERNEL);
+ netdev_hold(dev, &dev_tracker, GFP_KERNEL);
rtnl_unlock();
if (rc == 0) {
@@ -2034,7 +2021,7 @@ static int ethtool_phys_id(struct net_device *dev, void __user *useraddr)
}
rtnl_lock();
- dev_put_track(dev, &dev_tracker);
+ netdev_put(dev, &dev_tracker);
busy = false;
(void) ops->set_phys_id(dev, ETHTOOL_ID_INACTIVE);
diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
index 5fe8f4ae2ceb..e26079e11835 100644
--- a/net/ethtool/netlink.c
+++ b/net/ethtool/netlink.c
@@ -402,7 +402,7 @@ static int ethnl_default_doit(struct sk_buff *skb, struct genl_info *info)
ops->cleanup_data(reply_data);
genlmsg_end(rskb, reply_payload);
- dev_put_track(req_info->dev, &req_info->dev_tracker);
+ netdev_put(req_info->dev, &req_info->dev_tracker);
kfree(reply_data);
kfree(req_info);
return genlmsg_reply(rskb, info);
@@ -414,7 +414,7 @@ err_cleanup:
if (ops->cleanup_data)
ops->cleanup_data(reply_data);
err_dev:
- dev_put_track(req_info->dev, &req_info->dev_tracker);
+ netdev_put(req_info->dev, &req_info->dev_tracker);
kfree(reply_data);
kfree(req_info);
return ret;
@@ -550,7 +550,7 @@ static int ethnl_default_start(struct netlink_callback *cb)
* same parser as for non-dump (doit) requests is used, it
* would take reference to the device if it finds one
*/
- dev_put_track(req_info->dev, &req_info->dev_tracker);
+ netdev_put(req_info->dev, &req_info->dev_tracker);
req_info->dev = NULL;
}
if (ret < 0)
diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h
index 7919ddb2371c..c0d587611854 100644
--- a/net/ethtool/netlink.h
+++ b/net/ethtool/netlink.h
@@ -237,7 +237,7 @@ struct ethnl_req_info {
static inline void ethnl_parse_header_dev_put(struct ethnl_req_info *req_info)
{
- dev_put_track(req_info->dev, &req_info->dev_tracker);
+ netdev_put(req_info->dev, &req_info->dev_tracker);
}
/**
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 252c8bceaba4..3ca0cc467886 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -148,10 +148,10 @@ void inet_sock_destruct(struct sock *sk)
return;
}
- WARN_ON(atomic_read(&sk->sk_rmem_alloc));
- WARN_ON(refcount_read(&sk->sk_wmem_alloc));
- WARN_ON(sk->sk_wmem_queued);
- WARN_ON(sk_forward_alloc_get(sk));
+ WARN_ON_ONCE(atomic_read(&sk->sk_rmem_alloc));
+ WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
+ WARN_ON_ONCE(sk->sk_wmem_queued);
+ WARN_ON_ONCE(sk_forward_alloc_get(sk));
kfree(rcu_dereference_protected(inet->inet_opt, 1));
dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
@@ -1040,6 +1040,7 @@ const struct proto_ops inet_stream_ops = {
.sendpage = inet_sendpage,
.splice_read = tcp_splice_read,
.read_sock = tcp_read_sock,
+ .read_skb = tcp_read_skb,
.sendmsg_locked = tcp_sendmsg_locked,
.sendpage_locked = tcp_sendpage_locked,
.peek_len = tcp_peek_len,
@@ -1067,7 +1068,7 @@ const struct proto_ops inet_dgram_ops = {
.setsockopt = sock_common_setsockopt,
.getsockopt = sock_common_getsockopt,
.sendmsg = inet_sendmsg,
- .read_sock = udp_read_sock,
+ .read_skb = udp_read_skb,
.recvmsg = inet_recvmsg,
.mmap = sock_no_mmap,
.sendpage = inet_sendpage,
@@ -1919,6 +1920,8 @@ static int __init inet_init(void)
sock_skb_cb_check_size(sizeof(struct inet_skb_parm));
+ raw_hashinfo_init(&raw_v4_hashinfo);
+
rc = proto_register(&tcp_prot, 1);
if (rc)
goto out;
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index ab4a5601c82a..87c7e3fc5197 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -168,6 +168,7 @@ struct neigh_table arp_tbl = {
[NEIGH_VAR_RETRANS_TIME] = 1 * HZ,
[NEIGH_VAR_BASE_REACHABLE_TIME] = 30 * HZ,
[NEIGH_VAR_DELAY_PROBE_TIME] = 5 * HZ,
+ [NEIGH_VAR_INTERVAL_PROBE_TIME_MS] = 5 * HZ,
[NEIGH_VAR_GC_STALETIME] = 60 * HZ,
[NEIGH_VAR_QUEUE_LEN_BYTES] = SK_WMEM_MAX,
[NEIGH_VAR_PROXY_QLEN] = 64,
@@ -428,6 +429,26 @@ static int arp_ignore(struct in_device *in_dev, __be32 sip, __be32 tip)
return !inet_confirm_addr(net, in_dev, sip, tip, scope);
}
+static int arp_accept(struct in_device *in_dev, __be32 sip)
+{
+ struct net *net = dev_net(in_dev->dev);
+ int scope = RT_SCOPE_LINK;
+
+ switch (IN_DEV_ARP_ACCEPT(in_dev)) {
+ case 0: /* Don't create new entries from garp */
+ return 0;
+ case 1: /* Create new entries from garp */
+ return 1;
+ case 2: /* Create a neighbor in the arp table only if sip
+ * is in the same subnet as an address configured
+ * on the interface that received the garp message
+ */
+ return !!inet_confirm_addr(net, in_dev, sip, 0, scope);
+ default:
+ return 0;
+ }
+}
+
static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
{
struct rtable *rt;
@@ -867,12 +888,12 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
n = __neigh_lookup(&arp_tbl, &sip, dev, 0);
addr_type = -1;
- if (n || IN_DEV_ARP_ACCEPT(in_dev)) {
+ if (n || arp_accept(in_dev, sip)) {
is_garp = arp_is_garp(net, dev, &addr_type, arp->ar_op,
sip, tip, sha, tha);
}
- if (IN_DEV_ARP_ACCEPT(in_dev)) {
+ if (arp_accept(in_dev, sip)) {
/* Unsolicited ARP is not accepted by default.
It is possible, that this option should be enabled for some
devices (strip is candidate)
diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c
index f79ab942f03b..7a181631b995 100644
--- a/net/ipv4/bpf_tcp_ca.c
+++ b/net/ipv4/bpf_tcp_ca.c
@@ -14,18 +14,6 @@
/* "extern" is to avoid sparse warning. It is only used in bpf_struct_ops.c. */
extern struct bpf_struct_ops bpf_tcp_congestion_ops;
-static u32 optional_ops[] = {
- offsetof(struct tcp_congestion_ops, init),
- offsetof(struct tcp_congestion_ops, release),
- offsetof(struct tcp_congestion_ops, set_state),
- offsetof(struct tcp_congestion_ops, cwnd_event),
- offsetof(struct tcp_congestion_ops, in_ack_event),
- offsetof(struct tcp_congestion_ops, pkts_acked),
- offsetof(struct tcp_congestion_ops, min_tso_segs),
- offsetof(struct tcp_congestion_ops, sndbuf_expand),
- offsetof(struct tcp_congestion_ops, cong_control),
-};
-
static u32 unsupported_ops[] = {
offsetof(struct tcp_congestion_ops, get_info),
};
@@ -51,18 +39,6 @@ static int bpf_tcp_ca_init(struct btf *btf)
return 0;
}
-static bool is_optional(u32 member_offset)
-{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(optional_ops); i++) {
- if (member_offset == optional_ops[i])
- return true;
- }
-
- return false;
-}
-
static bool is_unsupported(u32 member_offset)
{
unsigned int i;
@@ -111,6 +87,12 @@ static int bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log,
}
switch (off) {
+ case offsetof(struct sock, sk_pacing_rate):
+ end = offsetofend(struct sock, sk_pacing_rate);
+ break;
+ case offsetof(struct sock, sk_pacing_status):
+ end = offsetofend(struct sock, sk_pacing_status);
+ break;
case bpf_ctx_range(struct inet_connection_sock, icsk_ca_priv):
end = offsetofend(struct inet_connection_sock, icsk_ca_priv);
break;
@@ -240,7 +222,6 @@ static int bpf_tcp_ca_init_member(const struct btf_type *t,
{
const struct tcp_congestion_ops *utcp_ca;
struct tcp_congestion_ops *tcp_ca;
- int prog_fd;
u32 moff;
utcp_ca = (const struct tcp_congestion_ops *)udata;
@@ -262,14 +243,6 @@ static int bpf_tcp_ca_init_member(const struct btf_type *t,
return 1;
}
- if (!btf_type_resolve_func_ptr(btf_vmlinux, member->type, NULL))
- return 0;
-
- /* Ensure bpf_prog is provided for compulsory func ptr */
- prog_fd = (int)(*(unsigned long *)(udata + moff));
- if (!prog_fd && !is_optional(moff) && !is_unsupported(moff))
- return -EINVAL;
-
return 0;
}
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index b2366ad540e6..92b778e423df 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -244,7 +244,7 @@ void in_dev_finish_destroy(struct in_device *idev)
#ifdef NET_REFCNT_DEBUG
pr_debug("%s: %p=%s\n", __func__, idev, dev ? dev->name : "NIL");
#endif
- dev_put_track(dev, &idev->dev_tracker);
+ netdev_put(dev, &idev->dev_tracker);
if (!idev->dead)
pr_err("Freeing alive in_device %p\n", idev);
else
@@ -272,7 +272,7 @@ static struct in_device *inetdev_init(struct net_device *dev)
if (IPV4_DEVCONF(in_dev->cnf, FORWARDING))
dev_disable_lro(dev);
/* Reference in_dev->dev */
- dev_hold_track(dev, &in_dev->dev_tracker, GFP_KERNEL);
+ netdev_hold(dev, &in_dev->dev_tracker, GFP_KERNEL);
/* Account for reference dev->ip_ptr (below) */
refcount_set(&in_dev->refcnt, 1);
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index b694f352ce7a..5c03eba787e5 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -502,9 +502,7 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
nfrags++;
- skb->len += tailen;
- skb->data_len += tailen;
- skb->truesize += tailen;
+ skb_len_add(skb, tailen);
if (sk && sk_fullsock(sk))
refcount_add(tailen, &sk->sk_wmem_alloc);
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index db7b2503f068..2dc97583d279 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -211,7 +211,7 @@ static void rt_fibinfo_free_cpus(struct rtable __rcu * __percpu *rtp)
void fib_nh_common_release(struct fib_nh_common *nhc)
{
- dev_put_track(nhc->nhc_dev, &nhc->nhc_dev_tracker);
+ netdev_put(nhc->nhc_dev, &nhc->nhc_dev_tracker);
lwtstate_put(nhc->nhc_lwtstate);
rt_fibinfo_free_cpus(nhc->nhc_pcpu_rth_output);
rt_fibinfo_free(&nhc->nhc_rth_input);
@@ -1057,7 +1057,8 @@ static int fib_check_nh_v6_gw(struct net *net, struct fib_nh *nh,
err = ipv6_stub->fib6_nh_init(net, &fib6_nh, &cfg, GFP_KERNEL, extack);
if (!err) {
nh->fib_nh_dev = fib6_nh.fib_nh_dev;
- dev_hold_track(nh->fib_nh_dev, &nh->fib_nh_dev_tracker, GFP_KERNEL);
+ netdev_hold(nh->fib_nh_dev, &nh->fib_nh_dev_tracker,
+ GFP_KERNEL);
nh->fib_nh_oif = nh->fib_nh_dev->ifindex;
nh->fib_nh_scope = RT_SCOPE_LINK;
@@ -1141,7 +1142,7 @@ static int fib_check_nh_v4_gw(struct net *net, struct fib_nh *nh, u32 table,
if (!netif_carrier_ok(dev))
nh->fib_nh_flags |= RTNH_F_LINKDOWN;
nh->fib_nh_dev = dev;
- dev_hold_track(dev, &nh->fib_nh_dev_tracker, GFP_ATOMIC);
+ netdev_hold(dev, &nh->fib_nh_dev_tracker, GFP_ATOMIC);
nh->fib_nh_scope = RT_SCOPE_LINK;
return 0;
}
@@ -1195,7 +1196,7 @@ static int fib_check_nh_v4_gw(struct net *net, struct fib_nh *nh, u32 table,
"No egress device for nexthop gateway");
goto out;
}
- dev_hold_track(dev, &nh->fib_nh_dev_tracker, GFP_ATOMIC);
+ netdev_hold(dev, &nh->fib_nh_dev_tracker, GFP_ATOMIC);
if (!netif_carrier_ok(dev))
nh->fib_nh_flags |= RTNH_F_LINKDOWN;
err = (dev->flags & IFF_UP) ? 0 : -ENETDOWN;
@@ -1229,7 +1230,7 @@ static int fib_check_nh_nongw(struct net *net, struct fib_nh *nh,
}
nh->fib_nh_dev = in_dev->dev;
- dev_hold_track(nh->fib_nh_dev, &nh->fib_nh_dev_tracker, GFP_ATOMIC);
+ netdev_hold(nh->fib_nh_dev, &nh->fib_nh_dev_tracker, GFP_ATOMIC);
nh->fib_nh_scope = RT_SCOPE_LINK;
if (!netif_carrier_ok(nh->fib_nh_dev))
nh->fib_nh_flags |= RTNH_F_LINKDOWN;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 00b4bf26fd93..d7bd1daf022b 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -969,7 +969,6 @@ static int __ip_append_data(struct sock *sk,
struct inet_sock *inet = inet_sk(sk);
struct ubuf_info *uarg = NULL;
struct sk_buff *skb;
-
struct ip_options *opt = cork->opt;
int hh_len;
int exthdrlen;
@@ -977,6 +976,7 @@ static int __ip_append_data(struct sock *sk,
int copy;
int err;
int offset = 0;
+ bool zc = false;
unsigned int maxfraglen, fragheaderlen, maxnonfragsize;
int csummode = CHECKSUM_NONE;
struct rtable *rt = (struct rtable *)cork->dst;
@@ -1017,17 +1017,35 @@ static int __ip_append_data(struct sock *sk,
(!exthdrlen || (rt->dst.dev->features & NETIF_F_HW_ESP_TX_CSUM)))
csummode = CHECKSUM_PARTIAL;
- if (flags & MSG_ZEROCOPY && length && sock_flag(sk, SOCK_ZEROCOPY)) {
- uarg = msg_zerocopy_realloc(sk, length, skb_zcopy(skb));
- if (!uarg)
- return -ENOBUFS;
- extra_uref = !skb_zcopy(skb); /* only ref on new uarg */
- if (rt->dst.dev->features & NETIF_F_SG &&
- csummode == CHECKSUM_PARTIAL) {
- paged = true;
- } else {
- uarg->zerocopy = 0;
- skb_zcopy_set(skb, uarg, &extra_uref);
+ if ((flags & MSG_ZEROCOPY) && length) {
+ struct msghdr *msg = from;
+
+ if (getfrag == ip_generic_getfrag && msg->msg_ubuf) {
+ if (skb_zcopy(skb) && msg->msg_ubuf != skb_zcopy(skb))
+ return -EINVAL;
+
+ /* Leave uarg NULL if can't zerocopy, callers should
+ * be able to handle it.
+ */
+ if ((rt->dst.dev->features & NETIF_F_SG) &&
+ csummode == CHECKSUM_PARTIAL) {
+ paged = true;
+ zc = true;
+ uarg = msg->msg_ubuf;
+ }
+ } else if (sock_flag(sk, SOCK_ZEROCOPY)) {
+ uarg = msg_zerocopy_realloc(sk, length, skb_zcopy(skb));
+ if (!uarg)
+ return -ENOBUFS;
+ extra_uref = !skb_zcopy(skb); /* only ref on new uarg */
+ if (rt->dst.dev->features & NETIF_F_SG &&
+ csummode == CHECKSUM_PARTIAL) {
+ paged = true;
+ zc = true;
+ } else {
+ uarg->zerocopy = 0;
+ skb_zcopy_set(skb, uarg, &extra_uref);
+ }
}
}
@@ -1091,9 +1109,12 @@ alloc_new_skb:
(fraglen + alloc_extra < SKB_MAX_ALLOC ||
!(rt->dst.dev->features & NETIF_F_SG)))
alloclen = fraglen;
- else {
+ else if (!zc) {
alloclen = min_t(int, fraglen, MAX_HEADER);
pagedlen = fraglen - alloclen;
+ } else {
+ alloclen = fragheaderlen + transhdrlen;
+ pagedlen = datalen - transhdrlen;
}
alloclen += alloc_extra;
@@ -1188,13 +1209,14 @@ alloc_new_skb:
err = -EFAULT;
goto error;
}
- } else if (!uarg || !uarg->zerocopy) {
+ } else if (!zc) {
int i = skb_shinfo(skb)->nr_frags;
err = -ENOMEM;
if (!sk_page_frag_refill(sk, pfrag))
goto error;
+ skb_zcopy_downgrade_managed(skb);
if (!skb_can_coalesce(skb, i, pfrag->page,
pfrag->offset)) {
err = -EMSGSIZE;
@@ -1214,9 +1236,7 @@ alloc_new_skb:
pfrag->offset += copy;
skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
- skb->len += copy;
- skb->data_len += copy;
- skb->truesize += copy;
+ skb_len_add(skb, copy);
wmem_alloc_delta += copy;
} else {
err = skb_zerocopy_iter_dgram(skb, from, copy);
@@ -1443,9 +1463,7 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
skb->csum = csum_block_add(skb->csum, csum, skb->len);
}
- skb->len += len;
- skb->data_len += len;
- skb->truesize += len;
+ skb_len_add(skb, len);
refcount_add(len, &sk->sk_wmem_alloc);
offset += len;
size -= len;
@@ -1704,7 +1722,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
tcp_hdr(skb)->source, tcp_hdr(skb)->dest,
arg->uid);
security_skb_classify_flow(skb, flowi4_to_flowi_common(&fl4));
- rt = ip_route_output_key(net, &fl4);
+ rt = ip_route_output_flow(net, &fl4, sk);
if (IS_ERR(rt))
return;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 94017a8c3994..e65e948cab9f 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -242,7 +242,7 @@ static struct net_device *__ip_tunnel_create(struct net *net,
if (parms->name[0]) {
if (!dev_valid_name(parms->name))
goto failed;
- strlcpy(name, parms->name, IFNAMSIZ);
+ strscpy(name, parms->name, IFNAMSIZ);
} else {
if (strlen(ops->kind) > (IFNAMSIZ - 3))
goto failed;
@@ -641,6 +641,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *inner_iph;
unsigned int max_headroom; /* The extra header space needed */
struct rtable *rt = NULL; /* Route to the other host */
+ __be16 payload_protocol;
bool use_cache = false;
struct flowi4 fl4;
bool md = false;
@@ -651,6 +652,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
connected = (tunnel->parms.iph.daddr != 0);
+ payload_protocol = skb_protocol(skb, true);
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
@@ -670,13 +672,12 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
dst = tun_info->key.u.ipv4.dst;
md = true;
connected = true;
- }
- else if (skb->protocol == htons(ETH_P_IP)) {
+ } else if (payload_protocol == htons(ETH_P_IP)) {
rt = skb_rtable(skb);
dst = rt_nexthop(rt, inner_iph->daddr);
}
#if IS_ENABLED(CONFIG_IPV6)
- else if (skb->protocol == htons(ETH_P_IPV6)) {
+ else if (payload_protocol == htons(ETH_P_IPV6)) {
const struct in6_addr *addr6;
struct neighbour *neigh;
bool do_tx_error_icmp;
@@ -716,10 +717,10 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
tos = tnl_params->tos;
if (tos & 0x1) {
tos &= ~0x1;
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (payload_protocol == htons(ETH_P_IP)) {
tos = inner_iph->tos;
connected = false;
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ } else if (payload_protocol == htons(ETH_P_IPV6)) {
tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph);
connected = false;
}
@@ -765,7 +766,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
}
df = tnl_params->frag_off;
- if (skb->protocol == htons(ETH_P_IP) && !tunnel->ignore_df)
+ if (payload_protocol == htons(ETH_P_IP) && !tunnel->ignore_df)
df |= (inner_iph->frag_off & htons(IP_DF));
if (tnl_update_pmtu(dev, skb, rt, df, inner_iph, 0, 0, false)) {
@@ -786,10 +787,10 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
tos = ip_tunnel_ecn_encap(tos, inner_iph, skb);
ttl = tnl_params->ttl;
if (ttl == 0) {
- if (skb->protocol == htons(ETH_P_IP))
+ if (payload_protocol == htons(ETH_P_IP))
ttl = inner_iph->ttl;
#if IS_ENABLED(CONFIG_IPV6)
- else if (skb->protocol == htons(ETH_P_IPV6))
+ else if (payload_protocol == htons(ETH_P_IPV6))
ttl = ((const struct ipv6hdr *)inner_iph)->hop_limit;
#endif
else
@@ -1065,7 +1066,7 @@ int ip_tunnel_init_net(struct net *net, unsigned int ip_tnl_net_id,
memset(&parms, 0, sizeof(parms));
if (devname)
- strlcpy(parms.name, devname, IFNAMSIZ);
+ strscpy(parms.name, devname, IFNAMSIZ);
rtnl_lock();
itn->fb_tunnel_dev = __ip_tunnel_create(net, ops, &parms);
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 9d41d5d5cd1e..f53a0f2453af 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -1759,15 +1759,15 @@ static int __init ip_auto_config_setup(char *addrs)
case 4:
if ((dp = strchr(ip, '.'))) {
*dp++ = '\0';
- strlcpy(utsname()->domainname, dp,
+ strscpy(utsname()->domainname, dp,
sizeof(utsname()->domainname));
}
- strlcpy(utsname()->nodename, ip,
+ strscpy(utsname()->nodename, ip,
sizeof(utsname()->nodename));
ic_host_name_set = 1;
break;
case 5:
- strlcpy(user_dev_name, ip, sizeof(user_dev_name));
+ strscpy(user_dev_name, ip, sizeof(user_dev_name));
break;
case 6:
if (ic_proto_name(ip) == 0 &&
@@ -1814,7 +1814,7 @@ __setup("nfsaddrs=", nfsaddrs_config_setup);
static int __init vendor_class_identifier_setup(char *addrs)
{
- if (strlcpy(vendor_class_identifier, addrs,
+ if (strscpy(vendor_class_identifier, addrs,
sizeof(vendor_class_identifier))
>= sizeof(vendor_class_identifier))
pr_warn("DHCP: vendorclass too long, truncated to \"%s\"\n",
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 13e6329784fb..73651d17e51f 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -77,7 +77,12 @@ struct ipmr_result {
* Note that the changes are semaphored via rtnl_lock.
*/
-static DEFINE_RWLOCK(mrt_lock);
+static DEFINE_SPINLOCK(mrt_lock);
+
+static struct net_device *vif_dev_read(const struct vif_device *vif)
+{
+ return rcu_dereference(vif->dev);
+}
/* Multicast router control variables */
@@ -100,11 +105,11 @@ static void ipmr_free_table(struct mr_table *mrt);
static void ip_mr_forward(struct net *net, struct mr_table *mrt,
struct net_device *dev, struct sk_buff *skb,
struct mfc_cache *cache, int local);
-static int ipmr_cache_report(struct mr_table *mrt,
+static int ipmr_cache_report(const struct mr_table *mrt,
struct sk_buff *pkt, vifi_t vifi, int assert);
static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
int cmd);
-static void igmpmsg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt);
+static void igmpmsg_netlink_event(const struct mr_table *mrt, struct sk_buff *pkt);
static void mroute_clean_tables(struct mr_table *mrt, int flags);
static void ipmr_expire_process(struct timer_list *t);
@@ -501,11 +506,15 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
return err;
}
- read_lock(&mrt_lock);
dev->stats.tx_bytes += skb->len;
dev->stats.tx_packets++;
- ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT);
- read_unlock(&mrt_lock);
+ rcu_read_lock();
+
+ /* Pairs with WRITE_ONCE() in vif_add() and vif_delete() */
+ ipmr_cache_report(mrt, skb, READ_ONCE(mrt->mroute_reg_vif_num),
+ IGMPMSG_WHOLEPKT);
+
+ rcu_read_unlock();
kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -572,6 +581,7 @@ static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
{
struct net_device *reg_dev = NULL;
struct iphdr *encap;
+ int vif_num;
encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
/* Check that:
@@ -584,11 +594,10 @@ static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
ntohs(encap->tot_len) + pimlen > skb->len)
return 1;
- read_lock(&mrt_lock);
- if (mrt->mroute_reg_vif_num >= 0)
- reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
- read_unlock(&mrt_lock);
-
+ /* Pairs with WRITE_ONCE() in vif_add()/vid_delete() */
+ vif_num = READ_ONCE(mrt->mroute_reg_vif_num);
+ if (vif_num >= 0)
+ reg_dev = vif_dev_read(&mrt->vif_table[vif_num]);
if (!reg_dev)
return 1;
@@ -614,10 +623,11 @@ static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
static int call_ipmr_vif_entry_notifiers(struct net *net,
enum fib_event_type event_type,
struct vif_device *vif,
+ struct net_device *vif_dev,
vifi_t vif_index, u32 tb_id)
{
return mr_call_vif_notifiers(net, RTNL_FAMILY_IPMR, event_type,
- vif, vif_index, tb_id,
+ vif, vif_dev, vif_index, tb_id,
&net->ipv4.ipmr_seq);
}
@@ -649,22 +659,19 @@ static int vif_delete(struct mr_table *mrt, int vifi, int notify,
v = &mrt->vif_table[vifi];
- if (VIF_EXISTS(mrt, vifi))
- call_ipmr_vif_entry_notifiers(net, FIB_EVENT_VIF_DEL, v, vifi,
- mrt->id);
-
- write_lock_bh(&mrt_lock);
- dev = v->dev;
- v->dev = NULL;
-
- if (!dev) {
- write_unlock_bh(&mrt_lock);
+ dev = rtnl_dereference(v->dev);
+ if (!dev)
return -EADDRNOTAVAIL;
- }
- if (vifi == mrt->mroute_reg_vif_num)
- mrt->mroute_reg_vif_num = -1;
+ spin_lock(&mrt_lock);
+ call_ipmr_vif_entry_notifiers(net, FIB_EVENT_VIF_DEL, v, dev,
+ vifi, mrt->id);
+ RCU_INIT_POINTER(v->dev, NULL);
+ if (vifi == mrt->mroute_reg_vif_num) {
+ /* Pairs with READ_ONCE() in ipmr_cache_report() and reg_vif_xmit() */
+ WRITE_ONCE(mrt->mroute_reg_vif_num, -1);
+ }
if (vifi + 1 == mrt->maxvif) {
int tmp;
@@ -672,10 +679,10 @@ static int vif_delete(struct mr_table *mrt, int vifi, int notify,
if (VIF_EXISTS(mrt, tmp))
break;
}
- mrt->maxvif = tmp+1;
+ WRITE_ONCE(mrt->maxvif, tmp + 1);
}
- write_unlock_bh(&mrt_lock);
+ spin_unlock(&mrt_lock);
dev_set_allmulti(dev, -1);
@@ -691,7 +698,7 @@ static int vif_delete(struct mr_table *mrt, int vifi, int notify,
if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify)
unregister_netdevice_queue(dev, head);
- dev_put_track(dev, &v->dev_tracker);
+ netdev_put(dev, &v->dev_tracker);
return 0;
}
@@ -777,7 +784,7 @@ out:
spin_unlock(&mfc_unres_lock);
}
-/* Fill oifs list. It is called under write locked mrt_lock. */
+/* Fill oifs list. It is called under locked mrt_lock. */
static void ipmr_update_thresholds(struct mr_table *mrt, struct mr_mfc *cache,
unsigned char *ttls)
{
@@ -889,15 +896,18 @@ static int vif_add(struct net *net, struct mr_table *mrt,
v->remote = vifc->vifc_rmt_addr.s_addr;
/* And finish update writing critical data */
- write_lock_bh(&mrt_lock);
- v->dev = dev;
+ spin_lock(&mrt_lock);
+ rcu_assign_pointer(v->dev, dev);
netdev_tracker_alloc(dev, &v->dev_tracker, GFP_ATOMIC);
- if (v->flags & VIFF_REGISTER)
- mrt->mroute_reg_vif_num = vifi;
+ if (v->flags & VIFF_REGISTER) {
+ /* Pairs with READ_ONCE() in ipmr_cache_report() and reg_vif_xmit() */
+ WRITE_ONCE(mrt->mroute_reg_vif_num, vifi);
+ }
if (vifi+1 > mrt->maxvif)
- mrt->maxvif = vifi+1;
- write_unlock_bh(&mrt_lock);
- call_ipmr_vif_entry_notifiers(net, FIB_EVENT_VIF_ADD, v, vifi, mrt->id);
+ WRITE_ONCE(mrt->maxvif, vifi + 1);
+ spin_unlock(&mrt_lock);
+ call_ipmr_vif_entry_notifiers(net, FIB_EVENT_VIF_ADD, v, dev,
+ vifi, mrt->id);
return 0;
}
@@ -1001,9 +1011,9 @@ static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
/* Bounce a cache query up to mrouted and netlink.
*
- * Called under mrt_lock.
+ * Called under rcu_read_lock().
*/
-static int ipmr_cache_report(struct mr_table *mrt,
+static int ipmr_cache_report(const struct mr_table *mrt,
struct sk_buff *pkt, vifi_t vifi, int assert)
{
const int ihl = ip_hdrlen(pkt);
@@ -1038,8 +1048,11 @@ static int ipmr_cache_report(struct mr_table *mrt,
msg->im_vif = vifi;
msg->im_vif_hi = vifi >> 8;
} else {
- msg->im_vif = mrt->mroute_reg_vif_num;
- msg->im_vif_hi = mrt->mroute_reg_vif_num >> 8;
+ /* Pairs with WRITE_ONCE() in vif_add() and vif_delete() */
+ int vif_num = READ_ONCE(mrt->mroute_reg_vif_num);
+
+ msg->im_vif = vif_num;
+ msg->im_vif_hi = vif_num >> 8;
}
ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
@@ -1064,10 +1077,8 @@ static int ipmr_cache_report(struct mr_table *mrt,
skb->transport_header = skb->network_header;
}
- rcu_read_lock();
mroute_sk = rcu_dereference(mrt->mroute_sk);
if (!mroute_sk) {
- rcu_read_unlock();
kfree_skb(skb);
return -EINVAL;
}
@@ -1076,7 +1087,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
/* Deliver to mrouted */
ret = sock_queue_rcv_skb(mroute_sk, skb);
- rcu_read_unlock();
+
if (ret < 0) {
net_warn_ratelimited("mroute: pending queue full, dropping entries\n");
kfree_skb(skb);
@@ -1086,6 +1097,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
}
/* Queue a packet for resolution. It gets locked cache entry! */
+/* Called under rcu_read_lock() */
static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
struct sk_buff *skb, struct net_device *dev)
{
@@ -1198,12 +1210,12 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
mfc->mfcc_mcastgrp.s_addr, parent);
rcu_read_unlock();
if (c) {
- write_lock_bh(&mrt_lock);
+ spin_lock(&mrt_lock);
c->_c.mfc_parent = mfc->mfcc_parent;
ipmr_update_thresholds(mrt, &c->_c, mfc->mfcc_ttls);
if (!mrtsock)
c->_c.mfc_flags |= MFC_STATIC;
- write_unlock_bh(&mrt_lock);
+ spin_unlock(&mrt_lock);
call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE, c,
mrt->id);
mroute_netlink_event(mrt, c, RTM_NEWROUTE);
@@ -1598,20 +1610,20 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
if (vr.vifi >= mrt->maxvif)
return -EINVAL;
vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif);
- read_lock(&mrt_lock);
+ rcu_read_lock();
vif = &mrt->vif_table[vr.vifi];
if (VIF_EXISTS(mrt, vr.vifi)) {
- vr.icount = vif->pkt_in;
- vr.ocount = vif->pkt_out;
- vr.ibytes = vif->bytes_in;
- vr.obytes = vif->bytes_out;
- read_unlock(&mrt_lock);
+ vr.icount = READ_ONCE(vif->pkt_in);
+ vr.ocount = READ_ONCE(vif->pkt_out);
+ vr.ibytes = READ_ONCE(vif->bytes_in);
+ vr.obytes = READ_ONCE(vif->bytes_out);
+ rcu_read_unlock();
if (copy_to_user(arg, &vr, sizeof(vr)))
return -EFAULT;
return 0;
}
- read_unlock(&mrt_lock);
+ rcu_read_unlock();
return -EADDRNOTAVAIL;
case SIOCGETSGCNT:
if (copy_from_user(&sr, arg, sizeof(sr)))
@@ -1673,20 +1685,20 @@ int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
if (vr.vifi >= mrt->maxvif)
return -EINVAL;
vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif);
- read_lock(&mrt_lock);
+ rcu_read_lock();
vif = &mrt->vif_table[vr.vifi];
if (VIF_EXISTS(mrt, vr.vifi)) {
- vr.icount = vif->pkt_in;
- vr.ocount = vif->pkt_out;
- vr.ibytes = vif->bytes_in;
- vr.obytes = vif->bytes_out;
- read_unlock(&mrt_lock);
+ vr.icount = READ_ONCE(vif->pkt_in);
+ vr.ocount = READ_ONCE(vif->pkt_out);
+ vr.ibytes = READ_ONCE(vif->bytes_in);
+ vr.obytes = READ_ONCE(vif->bytes_out);
+ rcu_read_unlock();
if (copy_to_user(arg, &vr, sizeof(vr)))
return -EFAULT;
return 0;
}
- read_unlock(&mrt_lock);
+ rcu_read_unlock();
return -EADDRNOTAVAIL;
case SIOCGETSGCNT:
if (copy_from_user(&sr, arg, sizeof(sr)))
@@ -1726,7 +1738,7 @@ static int ipmr_device_event(struct notifier_block *this, unsigned long event, v
ipmr_for_each_table(mrt, net) {
v = &mrt->vif_table[0];
for (ct = 0; ct < mrt->maxvif; ct++, v++) {
- if (v->dev == dev)
+ if (rcu_access_pointer(v->dev) == dev)
vif_delete(mrt, ct, 1, NULL);
}
}
@@ -1804,26 +1816,28 @@ static bool ipmr_forward_offloaded(struct sk_buff *skb, struct mr_table *mrt,
}
#endif
-/* Processing handlers for ipmr_forward */
+/* Processing handlers for ipmr_forward, under rcu_read_lock() */
static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
int in_vifi, struct sk_buff *skb, int vifi)
{
const struct iphdr *iph = ip_hdr(skb);
struct vif_device *vif = &mrt->vif_table[vifi];
+ struct net_device *vif_dev;
struct net_device *dev;
struct rtable *rt;
struct flowi4 fl4;
int encap = 0;
- if (!vif->dev)
+ vif_dev = vif_dev_read(vif);
+ if (!vif_dev)
goto out_free;
if (vif->flags & VIFF_REGISTER) {
- vif->pkt_out++;
- vif->bytes_out += skb->len;
- vif->dev->stats.tx_bytes += skb->len;
- vif->dev->stats.tx_packets++;
+ WRITE_ONCE(vif->pkt_out, vif->pkt_out + 1);
+ WRITE_ONCE(vif->bytes_out, vif->bytes_out + skb->len);
+ vif_dev->stats.tx_bytes += skb->len;
+ vif_dev->stats.tx_packets++;
ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
goto out_free;
}
@@ -1868,8 +1882,8 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
goto out_free;
}
- vif->pkt_out++;
- vif->bytes_out += skb->len;
+ WRITE_ONCE(vif->pkt_out, vif->pkt_out + 1);
+ WRITE_ONCE(vif->bytes_out, vif->bytes_out + skb->len);
skb_dst_drop(skb);
skb_dst_set(skb, &rt->dst);
@@ -1881,8 +1895,8 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
if (vif->flags & VIFF_TUNNEL) {
ip_encap(net, skb, vif->local, vif->remote);
/* FIXME: extra output firewall step used to be here. --RR */
- vif->dev->stats.tx_packets++;
- vif->dev->stats.tx_bytes += skb->len;
+ vif_dev->stats.tx_packets++;
+ vif_dev->stats.tx_bytes += skb->len;
}
IPCB(skb)->flags |= IPSKB_FORWARDED;
@@ -1906,18 +1920,20 @@ out_free:
kfree_skb(skb);
}
-static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
+/* Called with mrt_lock or rcu_read_lock() */
+static int ipmr_find_vif(const struct mr_table *mrt, struct net_device *dev)
{
int ct;
-
- for (ct = mrt->maxvif-1; ct >= 0; ct--) {
- if (mrt->vif_table[ct].dev == dev)
+ /* Pairs with WRITE_ONCE() in vif_delete()/vif_add() */
+ for (ct = READ_ONCE(mrt->maxvif) - 1; ct >= 0; ct--) {
+ if (rcu_access_pointer(mrt->vif_table[ct].dev) == dev)
break;
}
return ct;
}
/* "local" means that we should preserve one skb (for local delivery) */
+/* Called uner rcu_read_lock() */
static void ip_mr_forward(struct net *net, struct mr_table *mrt,
struct net_device *dev, struct sk_buff *skb,
struct mfc_cache *c, int local)
@@ -1944,7 +1960,7 @@ static void ip_mr_forward(struct net *net, struct mr_table *mrt,
}
/* Wrong interface: drop packet and (maybe) send PIM assert. */
- if (mrt->vif_table[vif].dev != dev) {
+ if (rcu_access_pointer(mrt->vif_table[vif].dev) != dev) {
if (rt_is_output_route(skb_rtable(skb))) {
/* It is our own packet, looped back.
* Very complicated situation...
@@ -1983,8 +1999,10 @@ static void ip_mr_forward(struct net *net, struct mr_table *mrt,
}
forward:
- mrt->vif_table[vif].pkt_in++;
- mrt->vif_table[vif].bytes_in += skb->len;
+ WRITE_ONCE(mrt->vif_table[vif].pkt_in,
+ mrt->vif_table[vif].pkt_in + 1);
+ WRITE_ONCE(mrt->vif_table[vif].bytes_in,
+ mrt->vif_table[vif].bytes_in + skb->len);
/* Forward the frame */
if (c->mfc_origin == htonl(INADDR_ANY) &&
@@ -2140,22 +2158,14 @@ int ip_mr_input(struct sk_buff *skb)
skb = skb2;
}
- read_lock(&mrt_lock);
vif = ipmr_find_vif(mrt, dev);
- if (vif >= 0) {
- int err2 = ipmr_cache_unresolved(mrt, vif, skb, dev);
- read_unlock(&mrt_lock);
-
- return err2;
- }
- read_unlock(&mrt_lock);
+ if (vif >= 0)
+ return ipmr_cache_unresolved(mrt, vif, skb, dev);
kfree_skb(skb);
return -ENODEV;
}
- read_lock(&mrt_lock);
ip_mr_forward(net, mrt, dev, skb, cache, local);
- read_unlock(&mrt_lock);
if (local)
return ip_local_deliver(skb);
@@ -2252,18 +2262,15 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
int vif = -1;
dev = skb->dev;
- read_lock(&mrt_lock);
if (dev)
vif = ipmr_find_vif(mrt, dev);
if (vif < 0) {
- read_unlock(&mrt_lock);
rcu_read_unlock();
return -ENODEV;
}
skb2 = skb_realloc_headroom(skb, sizeof(struct iphdr));
if (!skb2) {
- read_unlock(&mrt_lock);
rcu_read_unlock();
return -ENOMEM;
}
@@ -2277,14 +2284,11 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
iph->daddr = daddr;
iph->version = 0;
err = ipmr_cache_unresolved(mrt, vif, skb2, dev);
- read_unlock(&mrt_lock);
rcu_read_unlock();
return err;
}
- read_lock(&mrt_lock);
err = mr_fill_mroute(mrt, skb, &cache->_c, rtm);
- read_unlock(&mrt_lock);
rcu_read_unlock();
return err;
}
@@ -2404,7 +2408,7 @@ static size_t igmpmsg_netlink_msgsize(size_t payloadlen)
return len;
}
-static void igmpmsg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt)
+static void igmpmsg_netlink_event(const struct mr_table *mrt, struct sk_buff *pkt)
{
struct net *net = read_pnet(&mrt->net);
struct nlmsghdr *nlh;
@@ -2744,18 +2748,21 @@ static bool ipmr_fill_table(struct mr_table *mrt, struct sk_buff *skb)
static bool ipmr_fill_vif(struct mr_table *mrt, u32 vifid, struct sk_buff *skb)
{
+ struct net_device *vif_dev;
struct nlattr *vif_nest;
struct vif_device *vif;
+ vif = &mrt->vif_table[vifid];
+ vif_dev = rtnl_dereference(vif->dev);
/* if the VIF doesn't exist just continue */
- if (!VIF_EXISTS(mrt, vifid))
+ if (!vif_dev)
return true;
- vif = &mrt->vif_table[vifid];
vif_nest = nla_nest_start_noflag(skb, IPMRA_VIF);
if (!vif_nest)
return false;
- if (nla_put_u32(skb, IPMRA_VIFA_IFINDEX, vif->dev->ifindex) ||
+
+ if (nla_put_u32(skb, IPMRA_VIFA_IFINDEX, vif_dev->ifindex) ||
nla_put_u32(skb, IPMRA_VIFA_VIF_ID, vifid) ||
nla_put_u16(skb, IPMRA_VIFA_FLAGS, vif->flags) ||
nla_put_u64_64bit(skb, IPMRA_VIFA_BYTES_IN, vif->bytes_in,
@@ -2887,7 +2894,7 @@ out:
*/
static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(mrt_lock)
+ __acquires(RCU)
{
struct mr_vif_iter *iter = seq->private;
struct net *net = seq_file_net(seq);
@@ -2899,14 +2906,14 @@ static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
iter->mrt = mrt;
- read_lock(&mrt_lock);
+ rcu_read_lock();
return mr_vif_seq_start(seq, pos);
}
static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
- __releases(mrt_lock)
+ __releases(RCU)
{
- read_unlock(&mrt_lock);
+ rcu_read_unlock();
}
static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
@@ -2919,9 +2926,11 @@ static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
"Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
} else {
const struct vif_device *vif = v;
- const char *name = vif->dev ?
- vif->dev->name : "none";
+ const struct net_device *vif_dev;
+ const char *name;
+ vif_dev = vif_dev_read(vif);
+ name = vif_dev ? vif_dev->name : "none";
seq_printf(seq,
"%2td %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
vif - mrt->vif_table,
@@ -3017,7 +3026,7 @@ static int ipmr_dump(struct net *net, struct notifier_block *nb,
struct netlink_ext_ack *extack)
{
return mr_dump(net, nb, RTNL_FAMILY_IPMR, ipmr_rules_dump,
- ipmr_mr_table_iter, &mrt_lock, extack);
+ ipmr_mr_table_iter, extack);
}
static const struct fib_notifier_ops ipmr_notifier_ops_template = {
diff --git a/net/ipv4/ipmr_base.c b/net/ipv4/ipmr_base.c
index aa8738a91210..271dc03fc6db 100644
--- a/net/ipv4/ipmr_base.c
+++ b/net/ipv4/ipmr_base.c
@@ -13,7 +13,7 @@ void vif_device_init(struct vif_device *v,
unsigned short flags,
unsigned short get_iflink_mask)
{
- v->dev = NULL;
+ RCU_INIT_POINTER(v->dev, NULL);
v->bytes_in = 0;
v->bytes_out = 0;
v->pkt_in = 0;
@@ -208,6 +208,7 @@ EXPORT_SYMBOL(mr_mfc_seq_next);
int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
struct mr_mfc *c, struct rtmsg *rtm)
{
+ struct net_device *vif_dev;
struct rta_mfc_stats mfcs;
struct nlattr *mp_attr;
struct rtnexthop *nhp;
@@ -220,10 +221,13 @@ int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
return -ENOENT;
}
- if (VIF_EXISTS(mrt, c->mfc_parent) &&
- nla_put_u32(skb, RTA_IIF,
- mrt->vif_table[c->mfc_parent].dev->ifindex) < 0)
+ rcu_read_lock();
+ vif_dev = rcu_dereference(mrt->vif_table[c->mfc_parent].dev);
+ if (vif_dev && nla_put_u32(skb, RTA_IIF, vif_dev->ifindex) < 0) {
+ rcu_read_unlock();
return -EMSGSIZE;
+ }
+ rcu_read_unlock();
if (c->mfc_flags & MFC_OFFLOAD)
rtm->rtm_flags |= RTNH_F_OFFLOAD;
@@ -232,23 +236,27 @@ int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
if (!mp_attr)
return -EMSGSIZE;
+ rcu_read_lock();
for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
- if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
- struct vif_device *vif;
+ struct vif_device *vif = &mrt->vif_table[ct];
+
+ vif_dev = rcu_dereference(vif->dev);
+ if (vif_dev && c->mfc_un.res.ttls[ct] < 255) {
nhp = nla_reserve_nohdr(skb, sizeof(*nhp));
if (!nhp) {
+ rcu_read_unlock();
nla_nest_cancel(skb, mp_attr);
return -EMSGSIZE;
}
nhp->rtnh_flags = 0;
nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
- vif = &mrt->vif_table[ct];
- nhp->rtnh_ifindex = vif->dev->ifindex;
+ nhp->rtnh_ifindex = vif_dev->ifindex;
nhp->rtnh_len = sizeof(*nhp);
}
}
+ rcu_read_unlock();
nla_nest_end(skb, mp_attr);
@@ -275,13 +283,14 @@ static bool mr_mfc_uses_dev(const struct mr_table *mrt,
int ct;
for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
- if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
- const struct vif_device *vif;
-
- vif = &mrt->vif_table[ct];
- if (vif->dev == dev)
- return true;
- }
+ const struct net_device *vif_dev;
+ const struct vif_device *vif;
+
+ vif = &mrt->vif_table[ct];
+ vif_dev = rcu_access_pointer(vif->dev);
+ if (vif_dev && c->mfc_un.res.ttls[ct] < 255 &&
+ vif_dev == dev)
+ return true;
}
return false;
}
@@ -390,7 +399,6 @@ int mr_dump(struct net *net, struct notifier_block *nb, unsigned short family,
struct netlink_ext_ack *extack),
struct mr_table *(*mr_iter)(struct net *net,
struct mr_table *mrt),
- rwlock_t *mrt_lock,
struct netlink_ext_ack *extack)
{
struct mr_table *mrt;
@@ -402,22 +410,25 @@ int mr_dump(struct net *net, struct notifier_block *nb, unsigned short family,
for (mrt = mr_iter(net, NULL); mrt; mrt = mr_iter(net, mrt)) {
struct vif_device *v = &mrt->vif_table[0];
+ struct net_device *vif_dev;
struct mr_mfc *mfc;
int vifi;
/* Notifiy on table VIF entries */
- read_lock(mrt_lock);
+ rcu_read_lock();
for (vifi = 0; vifi < mrt->maxvif; vifi++, v++) {
- if (!v->dev)
+ vif_dev = rcu_dereference(v->dev);
+ if (!vif_dev)
continue;
err = mr_call_vif_notifier(nb, family,
- FIB_EVENT_VIF_ADD,
- v, vifi, mrt->id, extack);
+ FIB_EVENT_VIF_ADD, v,
+ vif_dev, vifi,
+ mrt->id, extack);
if (err)
break;
}
- read_unlock(mrt_lock);
+ rcu_read_unlock();
if (err)
return err;
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
index 76a411ae9fe6..a334f0dcc2d0 100644
--- a/net/ipv4/netfilter/nf_nat_h323.c
+++ b/net/ipv4/netfilter/nf_nat_h323.c
@@ -579,28 +579,22 @@ static struct nf_ct_helper_expectfn callforwarding_nat = {
.expectfn = ip_nat_callforwarding_expect,
};
+static const struct nfct_h323_nat_hooks nathooks = {
+ .set_h245_addr = set_h245_addr,
+ .set_h225_addr = set_h225_addr,
+ .set_sig_addr = set_sig_addr,
+ .set_ras_addr = set_ras_addr,
+ .nat_rtp_rtcp = nat_rtp_rtcp,
+ .nat_t120 = nat_t120,
+ .nat_h245 = nat_h245,
+ .nat_callforwarding = nat_callforwarding,
+ .nat_q931 = nat_q931,
+};
+
/****************************************************************************/
static int __init nf_nat_h323_init(void)
{
- BUG_ON(set_h245_addr_hook != NULL);
- BUG_ON(set_h225_addr_hook != NULL);
- BUG_ON(set_sig_addr_hook != NULL);
- BUG_ON(set_ras_addr_hook != NULL);
- BUG_ON(nat_rtp_rtcp_hook != NULL);
- BUG_ON(nat_t120_hook != NULL);
- BUG_ON(nat_h245_hook != NULL);
- BUG_ON(nat_callforwarding_hook != NULL);
- BUG_ON(nat_q931_hook != NULL);
-
- RCU_INIT_POINTER(set_h245_addr_hook, set_h245_addr);
- RCU_INIT_POINTER(set_h225_addr_hook, set_h225_addr);
- RCU_INIT_POINTER(set_sig_addr_hook, set_sig_addr);
- RCU_INIT_POINTER(set_ras_addr_hook, set_ras_addr);
- RCU_INIT_POINTER(nat_rtp_rtcp_hook, nat_rtp_rtcp);
- RCU_INIT_POINTER(nat_t120_hook, nat_t120);
- RCU_INIT_POINTER(nat_h245_hook, nat_h245);
- RCU_INIT_POINTER(nat_callforwarding_hook, nat_callforwarding);
- RCU_INIT_POINTER(nat_q931_hook, nat_q931);
+ RCU_INIT_POINTER(nfct_h323_nat_hook, &nathooks);
nf_ct_helper_expectfn_register(&q931_nat);
nf_ct_helper_expectfn_register(&callforwarding_nat);
return 0;
@@ -609,15 +603,7 @@ static int __init nf_nat_h323_init(void)
/****************************************************************************/
static void __exit nf_nat_h323_fini(void)
{
- RCU_INIT_POINTER(set_h245_addr_hook, NULL);
- RCU_INIT_POINTER(set_h225_addr_hook, NULL);
- RCU_INIT_POINTER(set_sig_addr_hook, NULL);
- RCU_INIT_POINTER(set_ras_addr_hook, NULL);
- RCU_INIT_POINTER(nat_rtp_rtcp_hook, NULL);
- RCU_INIT_POINTER(nat_t120_hook, NULL);
- RCU_INIT_POINTER(nat_h245_hook, NULL);
- RCU_INIT_POINTER(nat_callforwarding_hook, NULL);
- RCU_INIT_POINTER(nat_q931_hook, NULL);
+ RCU_INIT_POINTER(nfct_h323_nat_hook, NULL);
nf_ct_helper_expectfn_unregister(&q931_nat);
nf_ct_helper_expectfn_unregister(&callforwarding_nat);
synchronize_rcu();
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 3c6101def7d6..b83c2bd9d722 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -50,7 +50,7 @@
struct ping_table {
struct hlist_nulls_head hash[PING_HTABLE_SIZE];
- rwlock_t lock;
+ spinlock_t lock;
};
static struct ping_table ping_table;
@@ -82,7 +82,7 @@ int ping_get_port(struct sock *sk, unsigned short ident)
struct sock *sk2 = NULL;
isk = inet_sk(sk);
- write_lock_bh(&ping_table.lock);
+ spin_lock(&ping_table.lock);
if (ident == 0) {
u32 i;
u16 result = ping_port_rover + 1;
@@ -128,14 +128,15 @@ next_port:
if (sk_unhashed(sk)) {
pr_debug("was not hashed\n");
sock_hold(sk);
- hlist_nulls_add_head(&sk->sk_nulls_node, hlist);
+ sock_set_flag(sk, SOCK_RCU_FREE);
+ hlist_nulls_add_head_rcu(&sk->sk_nulls_node, hlist);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
}
- write_unlock_bh(&ping_table.lock);
+ spin_unlock(&ping_table.lock);
return 0;
fail:
- write_unlock_bh(&ping_table.lock);
+ spin_unlock(&ping_table.lock);
return 1;
}
EXPORT_SYMBOL_GPL(ping_get_port);
@@ -153,19 +154,19 @@ void ping_unhash(struct sock *sk)
struct inet_sock *isk = inet_sk(sk);
pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num);
- write_lock_bh(&ping_table.lock);
+ spin_lock(&ping_table.lock);
if (sk_hashed(sk)) {
- hlist_nulls_del(&sk->sk_nulls_node);
- sk_nulls_node_init(&sk->sk_nulls_node);
+ hlist_nulls_del_init_rcu(&sk->sk_nulls_node);
sock_put(sk);
isk->inet_num = 0;
isk->inet_sport = 0;
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
}
- write_unlock_bh(&ping_table.lock);
+ spin_unlock(&ping_table.lock);
}
EXPORT_SYMBOL_GPL(ping_unhash);
+/* Called under rcu_read_lock() */
static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
{
struct hlist_nulls_head *hslot = ping_hashslot(&ping_table, net, ident);
@@ -190,8 +191,6 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
return NULL;
}
- read_lock_bh(&ping_table.lock);
-
ping_portaddr_for_each_entry(sk, hnode, hslot) {
isk = inet_sk(sk);
@@ -230,13 +229,11 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
sk->sk_bound_dev_if != sdif)
continue;
- sock_hold(sk);
goto exit;
}
sk = NULL;
exit:
- read_unlock_bh(&ping_table.lock);
return sk;
}
@@ -592,7 +589,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
sk->sk_err = err;
sk_error_report(sk);
out:
- sock_put(sk);
+ return;
}
EXPORT_SYMBOL_GPL(ping_err);
@@ -998,7 +995,6 @@ enum skb_drop_reason ping_rcv(struct sk_buff *skb)
reason = __ping_queue_rcv_skb(sk, skb2);
else
reason = SKB_DROP_REASON_NOMEM;
- sock_put(sk);
}
if (reason)
@@ -1084,13 +1080,13 @@ static struct sock *ping_get_idx(struct seq_file *seq, loff_t pos)
}
void *ping_seq_start(struct seq_file *seq, loff_t *pos, sa_family_t family)
- __acquires(ping_table.lock)
+ __acquires(RCU)
{
struct ping_iter_state *state = seq->private;
state->bucket = 0;
state->family = family;
- read_lock_bh(&ping_table.lock);
+ rcu_read_lock();
return *pos ? ping_get_idx(seq, *pos-1) : SEQ_START_TOKEN;
}
@@ -1116,9 +1112,9 @@ void *ping_seq_next(struct seq_file *seq, void *v, loff_t *pos)
EXPORT_SYMBOL_GPL(ping_seq_next);
void ping_seq_stop(struct seq_file *seq, void *v)
- __releases(ping_table.lock)
+ __releases(RCU)
{
- read_unlock_bh(&ping_table.lock);
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(ping_seq_stop);
@@ -1202,5 +1198,5 @@ void __init ping_init(void)
for (i = 0; i < PING_HTABLE_SIZE; i++)
INIT_HLIST_NULLS_HEAD(&ping_table.hash[i], i);
- rwlock_init(&ping_table.lock);
+ spin_lock_init(&ping_table.lock);
}
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index bbd717805b10..006c1f0ed8b4 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -85,21 +85,20 @@ struct raw_frag_vec {
int hlen;
};
-struct raw_hashinfo raw_v4_hashinfo = {
- .lock = __RW_LOCK_UNLOCKED(raw_v4_hashinfo.lock),
-};
+struct raw_hashinfo raw_v4_hashinfo;
EXPORT_SYMBOL_GPL(raw_v4_hashinfo);
int raw_hash_sk(struct sock *sk)
{
struct raw_hashinfo *h = sk->sk_prot->h.raw_hash;
- struct hlist_head *head;
+ struct hlist_nulls_head *hlist;
- head = &h->ht[inet_sk(sk)->inet_num & (RAW_HTABLE_SIZE - 1)];
+ hlist = &h->ht[inet_sk(sk)->inet_num & (RAW_HTABLE_SIZE - 1)];
- write_lock_bh(&h->lock);
- sk_add_node(sk, head);
- write_unlock_bh(&h->lock);
+ spin_lock(&h->lock);
+ __sk_nulls_add_node_rcu(sk, hlist);
+ sock_set_flag(sk, SOCK_RCU_FREE);
+ spin_unlock(&h->lock);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
return 0;
@@ -110,31 +109,26 @@ void raw_unhash_sk(struct sock *sk)
{
struct raw_hashinfo *h = sk->sk_prot->h.raw_hash;
- write_lock_bh(&h->lock);
- if (sk_del_node_init(sk))
+ spin_lock(&h->lock);
+ if (__sk_nulls_del_node_init_rcu(sk))
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
- write_unlock_bh(&h->lock);
+ spin_unlock(&h->lock);
}
EXPORT_SYMBOL_GPL(raw_unhash_sk);
-struct sock *__raw_v4_lookup(struct net *net, struct sock *sk,
- unsigned short num, __be32 raddr, __be32 laddr,
- int dif, int sdif)
+bool raw_v4_match(struct net *net, struct sock *sk, unsigned short num,
+ __be32 raddr, __be32 laddr, int dif, int sdif)
{
- sk_for_each_from(sk) {
- struct inet_sock *inet = inet_sk(sk);
-
- if (net_eq(sock_net(sk), net) && inet->inet_num == num &&
- !(inet->inet_daddr && inet->inet_daddr != raddr) &&
- !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) &&
- raw_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif))
- goto found; /* gotcha */
- }
- sk = NULL;
-found:
- return sk;
+ struct inet_sock *inet = inet_sk(sk);
+
+ if (net_eq(sock_net(sk), net) && inet->inet_num == num &&
+ !(inet->inet_daddr && inet->inet_daddr != raddr) &&
+ !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) &&
+ raw_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif))
+ return true;
+ return false;
}
-EXPORT_SYMBOL_GPL(__raw_v4_lookup);
+EXPORT_SYMBOL_GPL(raw_v4_match);
/*
* 0 - deliver
@@ -168,23 +162,20 @@ static int icmp_filter(const struct sock *sk, const struct sk_buff *skb)
*/
static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash)
{
+ struct net *net = dev_net(skb->dev);
+ struct hlist_nulls_head *hlist;
+ struct hlist_nulls_node *hnode;
int sdif = inet_sdif(skb);
int dif = inet_iif(skb);
- struct sock *sk;
- struct hlist_head *head;
int delivered = 0;
- struct net *net;
-
- read_lock(&raw_v4_hashinfo.lock);
- head = &raw_v4_hashinfo.ht[hash];
- if (hlist_empty(head))
- goto out;
-
- net = dev_net(skb->dev);
- sk = __raw_v4_lookup(net, __sk_head(head), iph->protocol,
- iph->saddr, iph->daddr, dif, sdif);
+ struct sock *sk;
- while (sk) {
+ hlist = &raw_v4_hashinfo.ht[hash];
+ rcu_read_lock();
+ sk_nulls_for_each(sk, hnode, hlist) {
+ if (!raw_v4_match(net, sk, iph->protocol,
+ iph->saddr, iph->daddr, dif, sdif))
+ continue;
delivered = 1;
if ((iph->protocol != IPPROTO_ICMP || !icmp_filter(sk, skb)) &&
ip_mc_sf_allow(sk, iph->daddr, iph->saddr,
@@ -195,31 +186,16 @@ static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash)
if (clone)
raw_rcv(sk, clone);
}
- sk = __raw_v4_lookup(net, sk_next(sk), iph->protocol,
- iph->saddr, iph->daddr,
- dif, sdif);
}
-out:
- read_unlock(&raw_v4_hashinfo.lock);
+ rcu_read_unlock();
return delivered;
}
int raw_local_deliver(struct sk_buff *skb, int protocol)
{
- int hash;
- struct sock *raw_sk;
-
- hash = protocol & (RAW_HTABLE_SIZE - 1);
- raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]);
-
- /* If there maybe a raw socket we must check - if not we
- * don't care less
- */
- if (raw_sk && !raw_v4_input(skb, ip_hdr(skb), hash))
- raw_sk = NULL;
-
- return raw_sk != NULL;
+ int hash = protocol & (RAW_HTABLE_SIZE - 1);
+ return raw_v4_input(skb, ip_hdr(skb), hash);
}
static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info)
@@ -286,31 +262,27 @@ static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info)
void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
{
- int hash;
- struct sock *raw_sk;
+ struct net *net = dev_net(skb->dev);
+ struct hlist_nulls_head *hlist;
+ struct hlist_nulls_node *hnode;
+ int dif = skb->dev->ifindex;
+ int sdif = inet_sdif(skb);
const struct iphdr *iph;
- struct net *net;
+ struct sock *sk;
+ int hash;
hash = protocol & (RAW_HTABLE_SIZE - 1);
+ hlist = &raw_v4_hashinfo.ht[hash];
- read_lock(&raw_v4_hashinfo.lock);
- raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]);
- if (raw_sk) {
- int dif = skb->dev->ifindex;
- int sdif = inet_sdif(skb);
-
+ rcu_read_lock();
+ sk_nulls_for_each(sk, hnode, hlist) {
iph = (const struct iphdr *)skb->data;
- net = dev_net(skb->dev);
-
- while ((raw_sk = __raw_v4_lookup(net, raw_sk, protocol,
- iph->daddr, iph->saddr,
- dif, sdif)) != NULL) {
- raw_err(raw_sk, skb, info);
- raw_sk = sk_next(raw_sk);
- iph = (const struct iphdr *)skb->data;
- }
+ if (!raw_v4_match(net, sk, iph->protocol,
+ iph->daddr, iph->saddr, dif, sdif))
+ continue;
+ raw_err(sk, skb, info);
}
- read_unlock(&raw_v4_hashinfo.lock);
+ rcu_read_unlock();
}
static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
@@ -971,44 +943,41 @@ struct proto raw_prot = {
};
#ifdef CONFIG_PROC_FS
-static struct sock *raw_get_first(struct seq_file *seq)
+static struct sock *raw_get_first(struct seq_file *seq, int bucket)
{
- struct sock *sk;
struct raw_hashinfo *h = pde_data(file_inode(seq->file));
struct raw_iter_state *state = raw_seq_private(seq);
+ struct hlist_nulls_head *hlist;
+ struct hlist_nulls_node *hnode;
+ struct sock *sk;
- for (state->bucket = 0; state->bucket < RAW_HTABLE_SIZE;
+ for (state->bucket = bucket; state->bucket < RAW_HTABLE_SIZE;
++state->bucket) {
- sk_for_each(sk, &h->ht[state->bucket])
+ hlist = &h->ht[state->bucket];
+ sk_nulls_for_each(sk, hnode, hlist) {
if (sock_net(sk) == seq_file_net(seq))
- goto found;
+ return sk;
+ }
}
- sk = NULL;
-found:
- return sk;
+ return NULL;
}
static struct sock *raw_get_next(struct seq_file *seq, struct sock *sk)
{
- struct raw_hashinfo *h = pde_data(file_inode(seq->file));
struct raw_iter_state *state = raw_seq_private(seq);
do {
- sk = sk_next(sk);
-try_again:
- ;
+ sk = sk_nulls_next(sk);
} while (sk && sock_net(sk) != seq_file_net(seq));
- if (!sk && ++state->bucket < RAW_HTABLE_SIZE) {
- sk = sk_head(&h->ht[state->bucket]);
- goto try_again;
- }
+ if (!sk)
+ return raw_get_first(seq, state->bucket + 1);
return sk;
}
static struct sock *raw_get_idx(struct seq_file *seq, loff_t pos)
{
- struct sock *sk = raw_get_first(seq);
+ struct sock *sk = raw_get_first(seq, 0);
if (sk)
while (pos && (sk = raw_get_next(seq, sk)) != NULL)
@@ -1017,11 +986,9 @@ static struct sock *raw_get_idx(struct seq_file *seq, loff_t pos)
}
void *raw_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(&h->lock)
+ __acquires(RCU)
{
- struct raw_hashinfo *h = pde_data(file_inode(seq->file));
-
- read_lock(&h->lock);
+ rcu_read_lock();
return *pos ? raw_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
}
EXPORT_SYMBOL_GPL(raw_seq_start);
@@ -1031,7 +998,7 @@ void *raw_seq_next(struct seq_file *seq, void *v, loff_t *pos)
struct sock *sk;
if (v == SEQ_START_TOKEN)
- sk = raw_get_first(seq);
+ sk = raw_get_first(seq, 0);
else
sk = raw_get_next(seq, v);
++*pos;
@@ -1040,11 +1007,9 @@ void *raw_seq_next(struct seq_file *seq, void *v, loff_t *pos)
EXPORT_SYMBOL_GPL(raw_seq_next);
void raw_seq_stop(struct seq_file *seq, void *v)
- __releases(&h->lock)
+ __releases(RCU)
{
- struct raw_hashinfo *h = pde_data(file_inode(seq->file));
-
- read_unlock(&h->lock);
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(raw_seq_stop);
@@ -1106,6 +1071,7 @@ static __net_initdata struct pernet_operations raw_net_ops = {
int __init raw_proc_init(void)
{
+
return register_pernet_subsys(&raw_net_ops);
}
diff --git a/net/ipv4/raw_diag.c b/net/ipv4/raw_diag.c
index ccacbde30a2c..999321834b94 100644
--- a/net/ipv4/raw_diag.c
+++ b/net/ipv4/raw_diag.c
@@ -34,57 +34,57 @@ raw_get_hashinfo(const struct inet_diag_req_v2 *r)
* use helper to figure it out.
*/
-static struct sock *raw_lookup(struct net *net, struct sock *from,
- const struct inet_diag_req_v2 *req)
+static bool raw_lookup(struct net *net, struct sock *sk,
+ const struct inet_diag_req_v2 *req)
{
struct inet_diag_req_raw *r = (void *)req;
- struct sock *sk = NULL;
if (r->sdiag_family == AF_INET)
- sk = __raw_v4_lookup(net, from, r->sdiag_raw_protocol,
- r->id.idiag_dst[0],
- r->id.idiag_src[0],
- r->id.idiag_if, 0);
+ return raw_v4_match(net, sk, r->sdiag_raw_protocol,
+ r->id.idiag_dst[0],
+ r->id.idiag_src[0],
+ r->id.idiag_if, 0);
#if IS_ENABLED(CONFIG_IPV6)
else
- sk = __raw_v6_lookup(net, from, r->sdiag_raw_protocol,
- (const struct in6_addr *)r->id.idiag_src,
- (const struct in6_addr *)r->id.idiag_dst,
- r->id.idiag_if, 0);
+ return raw_v6_match(net, sk, r->sdiag_raw_protocol,
+ (const struct in6_addr *)r->id.idiag_src,
+ (const struct in6_addr *)r->id.idiag_dst,
+ r->id.idiag_if, 0);
#endif
- return sk;
+ return false;
}
static struct sock *raw_sock_get(struct net *net, const struct inet_diag_req_v2 *r)
{
struct raw_hashinfo *hashinfo = raw_get_hashinfo(r);
- struct sock *sk = NULL, *s;
+ struct hlist_nulls_head *hlist;
+ struct hlist_nulls_node *hnode;
+ struct sock *sk;
int slot;
if (IS_ERR(hashinfo))
return ERR_CAST(hashinfo);
- read_lock(&hashinfo->lock);
+ rcu_read_lock();
for (slot = 0; slot < RAW_HTABLE_SIZE; slot++) {
- sk_for_each(s, &hashinfo->ht[slot]) {
- sk = raw_lookup(net, s, r);
- if (sk) {
+ hlist = &hashinfo->ht[slot];
+ sk_nulls_for_each(sk, hnode, hlist) {
+ if (raw_lookup(net, sk, r)) {
/*
* Grab it and keep until we fill
- * diag meaage to be reported, so
+ * diag message to be reported, so
* caller should call sock_put then.
- * We can do that because we're keeping
- * hashinfo->lock here.
*/
- sock_hold(sk);
- goto out_unlock;
+ if (refcount_inc_not_zero(&sk->sk_refcnt))
+ goto out_unlock;
}
}
}
+ sk = ERR_PTR(-ENOENT);
out_unlock:
- read_unlock(&hashinfo->lock);
+ rcu_read_unlock();
- return sk ? sk : ERR_PTR(-ENOENT);
+ return sk;
}
static int raw_diag_dump_one(struct netlink_callback *cb,
@@ -142,6 +142,8 @@ static void raw_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
struct raw_hashinfo *hashinfo = raw_get_hashinfo(r);
struct net *net = sock_net(skb->sk);
struct inet_diag_dump_data *cb_data;
+ struct hlist_nulls_head *hlist;
+ struct hlist_nulls_node *hnode;
int num, s_num, slot, s_slot;
struct sock *sk = NULL;
struct nlattr *bc;
@@ -154,11 +156,12 @@ static void raw_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
s_slot = cb->args[0];
num = s_num = cb->args[1];
- read_lock(&hashinfo->lock);
+ rcu_read_lock();
for (slot = s_slot; slot < RAW_HTABLE_SIZE; s_num = 0, slot++) {
num = 0;
- sk_for_each(sk, &hashinfo->ht[slot]) {
+ hlist = &hashinfo->ht[slot];
+ sk_nulls_for_each(sk, hnode, hlist) {
struct inet_sock *inet = inet_sk(sk);
if (!net_eq(sock_net(sk), net))
@@ -181,7 +184,7 @@ next:
}
out_unlock:
- read_unlock(&hashinfo->lock);
+ rcu_read_unlock();
cb->args[0] = slot;
cb->args[1] = num;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 4702c61207a8..bce603f399bd 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1550,9 +1550,8 @@ void rt_flush_dev(struct net_device *dev)
if (rt->dst.dev != dev)
continue;
rt->dst.dev = blackhole_netdev;
- dev_replace_track(dev, blackhole_netdev,
- &rt->dst.dev_tracker,
- GFP_ATOMIC);
+ netdev_ref_replace(dev, blackhole_netdev,
+ &rt->dst.dev_tracker, GFP_ATOMIC);
list_move(&rt->rt_uncached, &ul->quarantine);
}
spin_unlock_bh(&ul->lock);
@@ -2440,24 +2439,9 @@ martian_source:
goto out;
}
-int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
- u8 tos, struct net_device *dev)
-{
- struct fib_result res;
- int err;
-
- tos &= IPTOS_RT_MASK;
- rcu_read_lock();
- err = ip_route_input_rcu(skb, daddr, saddr, tos, dev, &res);
- rcu_read_unlock();
-
- return err;
-}
-EXPORT_SYMBOL(ip_route_input_noref);
-
/* called with rcu_read_lock held */
-int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
- u8 tos, struct net_device *dev, struct fib_result *res)
+static int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ u8 tos, struct net_device *dev, struct fib_result *res)
{
/* Multicast recognition logic is moved from route cache to here.
* The problem was that too many Ethernet cards have broken/missing
@@ -2506,6 +2490,21 @@ int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
return ip_route_input_slow(skb, daddr, saddr, tos, dev, res);
}
+int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ u8 tos, struct net_device *dev)
+{
+ struct fib_result res;
+ int err;
+
+ tos &= IPTOS_RT_MASK;
+ rcu_read_lock();
+ err = ip_route_input_rcu(skb, daddr, saddr, tos, dev, &res);
+ rcu_read_unlock();
+
+ return err;
+}
+EXPORT_SYMBOL(ip_route_input_noref);
+
/* called with rcu_read_lock() */
static struct rtable *__mkroute_output(const struct fib_result *res,
const struct flowi4 *fl4, int orig_oif,
@@ -2851,7 +2850,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
new->output = dst_discard_out;
new->dev = net->loopback_dev;
- dev_hold_track(new->dev, &new->dev_tracker, GFP_ATOMIC);
+ netdev_hold(new->dev, &new->dev_tracker, GFP_ATOMIC);
rt->rt_is_input = ort->rt_is_input;
rt->rt_iif = ort->rt_iif;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 2faaaaf540ac..ba2bdc811374 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -294,6 +294,8 @@ EXPORT_SYMBOL(sysctl_tcp_mem);
atomic_long_t tcp_memory_allocated ____cacheline_aligned_in_smp; /* Current allocated memory. */
EXPORT_SYMBOL(tcp_memory_allocated);
+DEFINE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc);
+EXPORT_PER_CPU_SYMBOL_GPL(tcp_memory_per_cpu_fw_alloc);
#if IS_ENABLED(CONFIG_SMC)
DEFINE_STATIC_KEY_FALSE(tcp_have_smc);
@@ -856,9 +858,6 @@ struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
{
struct sk_buff *skb;
- if (unlikely(tcp_under_memory_pressure(sk)))
- sk_mem_reclaim_partial(sk);
-
skb = alloc_skb_fclone(size + MAX_TCP_HEADER, gfp);
if (likely(skb)) {
bool mem_scheduled;
@@ -952,6 +951,24 @@ static int tcp_downgrade_zcopy_pure(struct sock *sk, struct sk_buff *skb)
return 0;
}
+
+static int tcp_wmem_schedule(struct sock *sk, int copy)
+{
+ int left;
+
+ if (likely(sk_wmem_schedule(sk, copy)))
+ return copy;
+
+ /* We could be in trouble if we have nothing queued.
+ * Use whatever is left in sk->sk_forward_alloc and tcp_wmem[0]
+ * to guarantee some progress.
+ */
+ left = sock_net(sk)->ipv4.sysctl_tcp_wmem[0] - sk->sk_wmem_queued;
+ if (left > 0)
+ sk_forced_mem_schedule(sk, min(left, copy));
+ return min(copy, sk->sk_forward_alloc);
+}
+
static struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags,
struct page *page, int offset, size_t *size)
{
@@ -987,7 +1004,11 @@ new_segment:
tcp_mark_push(tp, skb);
goto new_segment;
}
- if (tcp_downgrade_zcopy_pure(sk, skb) || !sk_wmem_schedule(sk, copy))
+ if (tcp_downgrade_zcopy_pure(sk, skb))
+ return NULL;
+
+ copy = tcp_wmem_schedule(sk, copy);
+ if (!copy)
return NULL;
if (can_coalesce) {
@@ -1203,17 +1224,23 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
flags = msg->msg_flags;
- if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) {
+ if ((flags & MSG_ZEROCOPY) && size) {
skb = tcp_write_queue_tail(sk);
- uarg = msg_zerocopy_realloc(sk, size, skb_zcopy(skb));
- if (!uarg) {
- err = -ENOBUFS;
- goto out_err;
- }
- zc = sk->sk_route_caps & NETIF_F_SG;
- if (!zc)
- uarg->zerocopy = 0;
+ if (msg->msg_ubuf) {
+ uarg = msg->msg_ubuf;
+ net_zcopy_get(uarg);
+ zc = sk->sk_route_caps & NETIF_F_SG;
+ } else if (sock_flag(sk, SOCK_ZEROCOPY)) {
+ uarg = msg_zerocopy_realloc(sk, size, skb_zcopy(skb));
+ if (!uarg) {
+ err = -ENOBUFS;
+ goto out_err;
+ }
+ zc = sk->sk_route_caps & NETIF_F_SG;
+ if (!zc)
+ uarg->zerocopy = 0;
+ }
}
if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect) &&
@@ -1336,8 +1363,14 @@ new_segment:
copy = min_t(int, copy, pfrag->size - pfrag->offset);
- if (tcp_downgrade_zcopy_pure(sk, skb) ||
- !sk_wmem_schedule(sk, copy))
+ if (unlikely(skb_zcopy_pure(skb) || skb_zcopy_managed(skb))) {
+ if (tcp_downgrade_zcopy_pure(sk, skb))
+ goto wait_for_space;
+ skb_zcopy_downgrade_managed(skb);
+ }
+
+ copy = tcp_wmem_schedule(sk, copy);
+ if (!copy)
goto wait_for_space;
err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
@@ -1364,7 +1397,8 @@ new_segment:
skb_shinfo(skb)->flags |= SKBFL_PURE_ZEROCOPY;
if (!skb_zcopy_pure(skb)) {
- if (!sk_wmem_schedule(sk, copy))
+ copy = tcp_wmem_schedule(sk, copy);
+ if (!copy)
goto wait_for_space;
}
@@ -1710,6 +1744,50 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
}
EXPORT_SYMBOL(tcp_read_sock);
+int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ u32 seq = tp->copied_seq;
+ struct sk_buff *skb;
+ int copied = 0;
+ u32 offset;
+
+ if (sk->sk_state == TCP_LISTEN)
+ return -ENOTCONN;
+
+ while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
+ int used;
+
+ __skb_unlink(skb, &sk->sk_receive_queue);
+ used = recv_actor(sk, skb);
+ if (used <= 0) {
+ if (!copied)
+ copied = used;
+ break;
+ }
+ seq += used;
+ copied += used;
+
+ if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
+ consume_skb(skb);
+ ++seq;
+ break;
+ }
+ consume_skb(skb);
+ break;
+ }
+ WRITE_ONCE(tp->copied_seq, seq);
+
+ tcp_rcv_space_adjust(sk);
+
+ /* Clean up data we have read: This will do ACK frames. */
+ if (copied > 0)
+ tcp_cleanup_rbuf(sk, copied);
+
+ return copied;
+}
+EXPORT_SYMBOL(tcp_read_skb);
+
int tcp_peek_len(struct socket *sock)
{
return tcp_inq(sock->sk);
@@ -2764,8 +2842,6 @@ void __tcp_close(struct sock *sk, long timeout)
__kfree_skb(skb);
}
- sk_mem_reclaim(sk);
-
/* If socket has been already reset (e.g. in tcp_reset()) - kill it. */
if (sk->sk_state == TCP_CLOSE)
goto adjudge_to_death;
@@ -2873,7 +2949,6 @@ adjudge_to_death:
}
}
if (sk->sk_state != TCP_CLOSE) {
- sk_mem_reclaim(sk);
if (tcp_check_oom(sk, 0)) {
tcp_set_state(sk, TCP_CLOSE);
tcp_send_active_reset(sk, GFP_ATOMIC);
@@ -2951,7 +3026,6 @@ void tcp_write_queue_purge(struct sock *sk)
}
tcp_rtx_queue_purge(sk);
INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue);
- sk_mem_reclaim(sk);
tcp_clear_all_retrans_hints(tcp_sk(sk));
tcp_sk(sk)->packets_out = 0;
inet_csk(sk)->icsk_backoff = 0;
@@ -4514,16 +4588,24 @@ EXPORT_SYMBOL_GPL(tcp_done);
int tcp_abort(struct sock *sk, int err)
{
- if (!sk_fullsock(sk)) {
- if (sk->sk_state == TCP_NEW_SYN_RECV) {
- struct request_sock *req = inet_reqsk(sk);
+ int state = inet_sk_state_load(sk);
- local_bh_disable();
- inet_csk_reqsk_queue_drop(req->rsk_listener, req);
- local_bh_enable();
- return 0;
- }
- return -EOPNOTSUPP;
+ if (state == TCP_NEW_SYN_RECV) {
+ struct request_sock *req = inet_reqsk(sk);
+
+ local_bh_disable();
+ inet_csk_reqsk_queue_drop(req->rsk_listener, req);
+ local_bh_enable();
+ return 0;
+ }
+ if (state == TCP_TIME_WAIT) {
+ struct inet_timewait_sock *tw = inet_twsk(sk);
+
+ refcount_inc(&tw->tw_refcnt);
+ local_bh_disable();
+ inet_twsk_deschedule_put(tw);
+ local_bh_enable();
+ return 0;
}
/* Don't race with userspace socket closes such as tcp_close. */
@@ -4655,11 +4737,11 @@ void __init tcp_init(void)
max_wshare = min(4UL*1024*1024, limit);
max_rshare = min(6UL*1024*1024, limit);
- init_net.ipv4.sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
+ init_net.ipv4.sysctl_tcp_wmem[0] = PAGE_SIZE;
init_net.ipv4.sysctl_tcp_wmem[1] = 16*1024;
init_net.ipv4.sysctl_tcp_wmem[2] = max(64*1024, max_wshare);
- init_net.ipv4.sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
+ init_net.ipv4.sysctl_tcp_rmem[0] = PAGE_SIZE;
init_net.ipv4.sysctl_tcp_rmem[1] = 131072;
init_net.ipv4.sysctl_tcp_rmem[2] = max(131072, max_rshare);
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index 0d3f68bb51c0..a1626afe87a1 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -540,6 +540,7 @@ static void tcp_bpf_rebuild_protos(struct proto prot[TCP_BPF_NUM_CFGS],
struct proto *base)
{
prot[TCP_BPF_BASE] = *base;
+ prot[TCP_BPF_BASE].destroy = sock_map_destroy;
prot[TCP_BPF_BASE].close = sock_map_close;
prot[TCP_BPF_BASE].recvmsg = tcp_bpf_recvmsg;
prot[TCP_BPF_BASE].sock_is_readable = sk_msg_is_readable;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 07dbcbae7782..ae73b34d32e9 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -805,7 +805,6 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
* restart window, so that we send ACKs quickly.
*/
tcp_incr_quickack(sk, TCP_MAX_QUICKACKS);
- sk_mem_reclaim(sk);
}
}
icsk->icsk_ack.lrcvtime = now;
@@ -3972,7 +3971,7 @@ static bool smc_parse_options(const struct tcphdr *th,
/* Try to parse the MSS option from the TCP header. Return 0 on failure, clamped
* value on success.
*/
-static u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss)
+u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss)
{
const unsigned char *ptr = (const unsigned char *)(th + 1);
int length = (th->doff * 4) - sizeof(struct tcphdr);
@@ -4011,6 +4010,7 @@ static u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss)
}
return mss;
}
+EXPORT_SYMBOL_GPL(tcp_parse_mss_option);
/* Look for tcp options. Normally only called on SYN and SYNACK packets.
* But, this can also be called on packets in the established flow when
@@ -4395,7 +4395,6 @@ void tcp_fin(struct sock *sk)
skb_rbtree_purge(&tp->out_of_order_queue);
if (tcp_is_sack(tp))
tcp_sack_reset(&tp->rx_opt);
- sk_mem_reclaim(sk);
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
@@ -5292,7 +5291,7 @@ new_range:
before(TCP_SKB_CB(skb)->end_seq, start)) {
/* Do not attempt collapsing tiny skbs */
if (range_truesize != head->truesize ||
- end - start >= SKB_WITH_OVERHEAD(SK_MEM_QUANTUM)) {
+ end - start >= SKB_WITH_OVERHEAD(PAGE_SIZE)) {
tcp_collapse(sk, NULL, &tp->out_of_order_queue,
head, skb, start, end);
} else {
@@ -5341,7 +5340,6 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
tcp_drop_reason(sk, rb_to_skb(node),
SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE);
if (!prev || goal <= 0) {
- sk_mem_reclaim(sk);
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
!tcp_under_memory_pressure(sk))
break;
@@ -5388,7 +5386,6 @@ static int tcp_prune_queue(struct sock *sk)
skb_peek(&sk->sk_receive_queue),
NULL,
tp->copied_seq, tp->rcv_nxt);
- sk_mem_reclaim(sk);
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
return 0;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index d16e6e40f47b..c7e7101647dc 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -819,6 +819,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
inet_twsk(sk)->tw_priority : sk->sk_priority;
transmit_time = tcp_transmit_time(sk);
+ xfrm_sk_clone_policy(ctl_sk, sk);
}
ip_send_unicast_reply(ctl_sk,
skb, &TCP_SKB_CB(skb)->header.h4.opt,
@@ -827,6 +828,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
transmit_time);
ctl_sk->sk_mark = 0;
+ xfrm_sk_free_policy(ctl_sk);
sock_net_set(ctl_sk, &init_net);
__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
__TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
@@ -3049,7 +3051,10 @@ struct proto tcp_prot = {
.stream_memory_free = tcp_stream_memory_free,
.sockets_allocated = &tcp_sockets_allocated,
.orphan_count = &tcp_orphan_count,
+
.memory_allocated = &tcp_memory_allocated,
+ .per_cpu_fw_alloc = &tcp_memory_per_cpu_fw_alloc,
+
.memory_pressure = &tcp_memory_pressure,
.sysctl_mem = sysctl_tcp_mem,
.sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index c38e07b50639..2b72ccd2e651 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -3145,7 +3145,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
struct tcp_sock *tp = tcp_sk(sk);
unsigned int cur_mss;
int diff, len, err;
-
+ int avail_wnd;
/* Inconclusive MTU probe */
if (icsk->icsk_mtup.probe_size)
@@ -3167,17 +3167,25 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
return -EHOSTUNREACH; /* Routing failure or similar. */
cur_mss = tcp_current_mss(sk);
+ avail_wnd = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
/* If receiver has shrunk his window, and skb is out of
* new window, do not retransmit it. The exception is the
* case, when window is shrunk to zero. In this case
- * our retransmit serves as a zero window probe.
+ * our retransmit of one segment serves as a zero window probe.
*/
- if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) &&
- TCP_SKB_CB(skb)->seq != tp->snd_una)
- return -EAGAIN;
+ if (avail_wnd <= 0) {
+ if (TCP_SKB_CB(skb)->seq != tp->snd_una)
+ return -EAGAIN;
+ avail_wnd = cur_mss;
+ }
len = cur_mss * segs;
+ if (len > avail_wnd) {
+ len = rounddown(avail_wnd, cur_mss);
+ if (!len)
+ len = avail_wnd;
+ }
if (skb->len > len) {
if (tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, len,
cur_mss, GFP_ATOMIC))
@@ -3191,8 +3199,9 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
diff -= tcp_skb_pcount(skb);
if (diff)
tcp_adjust_pcount(sk, skb, diff);
- if (skb->len < cur_mss)
- tcp_retrans_try_collapse(sk, skb, cur_mss);
+ avail_wnd = min_t(int, avail_wnd, cur_mss);
+ if (skb->len < avail_wnd)
+ tcp_retrans_try_collapse(sk, skb, avail_wnd);
}
/* RFC3168, section 6.1.1.1. ECN fallback */
@@ -3363,12 +3372,13 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
*/
void sk_forced_mem_schedule(struct sock *sk, int size)
{
- int amt;
+ int delta, amt;
- if (size <= sk->sk_forward_alloc)
+ delta = size - sk->sk_forward_alloc;
+ if (delta <= 0)
return;
- amt = sk_mem_pages(size);
- sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
+ amt = sk_mem_pages(delta);
+ sk->sk_forward_alloc += amt << PAGE_SHIFT;
sk_memory_allocated_add(sk, amt);
if (mem_cgroup_sockets_enabled && sk->sk_memcg)
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 50bba370486e..b4dfb82d6ecb 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -291,15 +291,13 @@ void tcp_delack_timer_handler(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
- sk_mem_reclaim_partial(sk);
-
if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
!(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
- goto out;
+ return;
if (time_after(icsk->icsk_ack.timeout, jiffies)) {
sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
- goto out;
+ return;
}
icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
@@ -318,10 +316,6 @@ void tcp_delack_timer_handler(struct sock *sk)
tcp_send_ack(sk);
__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
}
-
-out:
- if (tcp_under_memory_pressure(sk))
- sk_mem_reclaim(sk);
}
@@ -604,11 +598,11 @@ void tcp_write_timer_handler(struct sock *sk)
if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
!icsk->icsk_pending)
- goto out;
+ return;
if (time_after(icsk->icsk_timeout, jiffies)) {
sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
- goto out;
+ return;
}
tcp_mstamp_refresh(tcp_sk(sk));
@@ -630,9 +624,6 @@ void tcp_write_timer_handler(struct sock *sk)
tcp_probe_timer(sk);
break;
}
-
-out:
- sk_mem_reclaim(sk);
}
static void tcp_write_timer(struct timer_list *t)
@@ -747,8 +738,6 @@ static void tcp_keepalive_timer (struct timer_list *t)
elapsed = keepalive_time_when(tp) - elapsed;
}
- sk_mem_reclaim(sk);
-
resched:
inet_csk_reset_keepalive_timer (sk, elapsed);
goto out;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index aa9f2ec3dc46..2516078aa03e 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -125,6 +125,8 @@ EXPORT_SYMBOL(sysctl_udp_mem);
atomic_long_t udp_memory_allocated ____cacheline_aligned_in_smp;
EXPORT_SYMBOL(udp_memory_allocated);
+DEFINE_PER_CPU(int, udp_memory_per_cpu_fw_alloc);
+EXPORT_PER_CPU_SYMBOL_GPL(udp_memory_per_cpu_fw_alloc);
#define MAX_UDP_PORTS 65536
#define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN)
@@ -1461,11 +1463,11 @@ static void udp_rmem_release(struct sock *sk, int size, int partial,
sk->sk_forward_alloc += size;
- amt = (sk->sk_forward_alloc - partial) & ~(SK_MEM_QUANTUM - 1);
+ amt = (sk->sk_forward_alloc - partial) & ~(PAGE_SIZE - 1);
sk->sk_forward_alloc -= amt;
if (amt)
- __sk_mem_reduce_allocated(sk, amt >> SK_MEM_QUANTUM_SHIFT);
+ __sk_mem_reduce_allocated(sk, amt >> PAGE_SHIFT);
atomic_sub(size, &sk->sk_rmem_alloc);
@@ -1558,7 +1560,7 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
spin_lock(&list->lock);
if (size >= sk->sk_forward_alloc) {
amt = sk_mem_pages(size);
- delta = amt << SK_MEM_QUANTUM_SHIFT;
+ delta = amt << PAGE_SHIFT;
if (!__sk_mem_raise_allocated(sk, delta, amt, SK_MEM_RECV)) {
err = -ENOBUFS;
spin_unlock(&list->lock);
@@ -1795,8 +1797,7 @@ busy_check:
}
EXPORT_SYMBOL(__skb_recv_udp);
-int udp_read_sock(struct sock *sk, read_descriptor_t *desc,
- sk_read_actor_t recv_actor)
+int udp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
{
int copied = 0;
@@ -1818,7 +1819,8 @@ int udp_read_sock(struct sock *sk, read_descriptor_t *desc,
continue;
}
- used = recv_actor(desc, skb, 0, skb->len);
+ WARN_ON(!skb_set_owner_sk_safe(skb, sk));
+ used = recv_actor(sk, skb);
if (used <= 0) {
if (!copied)
copied = used;
@@ -1829,13 +1831,12 @@ int udp_read_sock(struct sock *sk, read_descriptor_t *desc,
}
kfree_skb(skb);
- if (!desc->count)
- break;
+ break;
}
return copied;
}
-EXPORT_SYMBOL(udp_read_sock);
+EXPORT_SYMBOL(udp_read_skb);
/*
* This should be easy, if there is something there we
@@ -2946,6 +2947,8 @@ struct proto udp_prot = {
.psock_update_sk_prot = udp_bpf_update_proto,
#endif
.memory_allocated = &udp_memory_allocated,
+ .per_cpu_fw_alloc = &udp_memory_per_cpu_fw_alloc,
+
.sysctl_mem = sysctl_udp_mem,
.sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
.sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
@@ -3263,8 +3266,8 @@ EXPORT_SYMBOL(udp_flow_hashrnd);
static void __udp_sysctl_init(struct net *net)
{
- net->ipv4.sysctl_udp_rmem_min = SK_MEM_QUANTUM;
- net->ipv4.sysctl_udp_wmem_min = SK_MEM_QUANTUM;
+ net->ipv4.sysctl_udp_rmem_min = PAGE_SIZE;
+ net->ipv4.sysctl_udp_wmem_min = PAGE_SIZE;
#ifdef CONFIG_NET_L3_MASTER_DEV
net->ipv4.sysctl_udp_l3mdev_accept = 0;
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index cd1cd68adeec..6e08a76ae1e7 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -51,7 +51,10 @@ struct proto udplite_prot = {
.unhash = udp_lib_unhash,
.rehash = udp_v4_rehash,
.get_port = udp_v4_get_port,
+
.memory_allocated = &udp_memory_allocated,
+ .per_cpu_fw_alloc = &udp_memory_per_cpu_fw_alloc,
+
.sysctl_mem = sysctl_udp_mem,
.obj_size = sizeof(struct udp_sock),
.h.udp_table = &udplite_table,
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 6fde0b184791..3d0dfa6cf9f9 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -75,7 +75,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
xdst->u.rt.rt_iif = fl4->flowi4_iif;
xdst->u.dst.dev = dev;
- dev_hold_track(dev, &xdst->u.dst.dev_tracker, GFP_ATOMIC);
+ netdev_hold(dev, &xdst->u.dst.dev_tracker, GFP_ATOMIC);
/* Sheit... I remember I did this right. Apparently,
* it was magically lost, so this code needs audit */
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 49cc6587dd77..6ed807b6c647 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -398,13 +398,13 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
if (ndev->cnf.forwarding)
dev_disable_lro(dev);
/* We refer to the device */
- dev_hold_track(dev, &ndev->dev_tracker, GFP_KERNEL);
+ netdev_hold(dev, &ndev->dev_tracker, GFP_KERNEL);
if (snmp6_alloc_dev(ndev) < 0) {
netdev_dbg(dev, "%s: cannot allocate memory for statistics\n",
__func__);
neigh_parms_release(&nd_tbl, ndev->nd_parms);
- dev_put_track(dev, &ndev->dev_tracker);
+ netdev_put(dev, &ndev->dev_tracker);
kfree(ndev);
return ERR_PTR(err);
}
@@ -4520,6 +4520,39 @@ restart:
/* We try to batch several events at once. */
age = (now - ifp->tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
+ if ((ifp->flags&IFA_F_TEMPORARY) &&
+ !(ifp->flags&IFA_F_TENTATIVE) &&
+ ifp->prefered_lft != INFINITY_LIFE_TIME &&
+ !ifp->regen_count && ifp->ifpub) {
+ /* This is a non-regenerated temporary addr. */
+
+ unsigned long regen_advance = ifp->idev->cnf.regen_max_retry *
+ ifp->idev->cnf.dad_transmits *
+ max(NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME), HZ/100) / HZ;
+
+ if (age + regen_advance >= ifp->prefered_lft) {
+ struct inet6_ifaddr *ifpub = ifp->ifpub;
+ if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
+ next = ifp->tstamp + ifp->prefered_lft * HZ;
+
+ ifp->regen_count++;
+ in6_ifa_hold(ifp);
+ in6_ifa_hold(ifpub);
+ spin_unlock(&ifp->lock);
+
+ spin_lock(&ifpub->lock);
+ ifpub->regen_count = 0;
+ spin_unlock(&ifpub->lock);
+ rcu_read_unlock_bh();
+ ipv6_create_tempaddr(ifpub, true);
+ in6_ifa_put(ifpub);
+ in6_ifa_put(ifp);
+ rcu_read_lock_bh();
+ goto restart;
+ } else if (time_before(ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ, next))
+ next = ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ;
+ }
+
if (ifp->valid_lft != INFINITY_LIFE_TIME &&
age >= ifp->valid_lft) {
spin_unlock(&ifp->lock);
@@ -4553,35 +4586,6 @@ restart:
in6_ifa_put(ifp);
goto restart;
}
- } else if ((ifp->flags&IFA_F_TEMPORARY) &&
- !(ifp->flags&IFA_F_TENTATIVE)) {
- unsigned long regen_advance = ifp->idev->cnf.regen_max_retry *
- ifp->idev->cnf.dad_transmits *
- max(NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME), HZ/100) / HZ;
-
- if (age >= ifp->prefered_lft - regen_advance) {
- struct inet6_ifaddr *ifpub = ifp->ifpub;
- if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
- next = ifp->tstamp + ifp->prefered_lft * HZ;
- if (!ifp->regen_count && ifpub) {
- ifp->regen_count++;
- in6_ifa_hold(ifp);
- in6_ifa_hold(ifpub);
- spin_unlock(&ifp->lock);
-
- spin_lock(&ifpub->lock);
- ifpub->regen_count = 0;
- spin_unlock(&ifpub->lock);
- rcu_read_unlock_bh();
- ipv6_create_tempaddr(ifpub, true);
- in6_ifa_put(ifpub);
- in6_ifa_put(ifp);
- rcu_read_lock_bh();
- goto restart;
- }
- } else if (time_before(ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ, next))
- next = ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ;
- spin_unlock(&ifp->lock);
} else {
/* ifp->prefered_lft <= ifp->valid_lft */
if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
@@ -7038,7 +7042,7 @@ static const struct ctl_table addrconf_sysctl[] = {
.data = &ipv6_devconf.accept_untracked_na,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec_minmax,
+ .proc_handler = proc_dointvec,
.extra1 = (void *)SYSCTL_ZERO,
.extra2 = (void *)SYSCTL_ONE,
},
diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
index 881d1477d24a..507a8353a6bd 100644
--- a/net/ipv6/addrconf_core.c
+++ b/net/ipv6/addrconf_core.c
@@ -263,7 +263,7 @@ void in6_dev_finish_destroy(struct inet6_dev *idev)
#ifdef NET_REFCNT_DEBUG
pr_debug("%s: %s\n", __func__, dev ? dev->name : "NIL");
#endif
- dev_put_track(dev, &idev->dev_tracker);
+ netdev_put(dev, &idev->dev_tracker);
if (!idev->dead) {
pr_warn("Freeing alive inet6 device %p\n", idev);
return;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 6f354f8be2c5..2ce0c44d0081 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -63,6 +63,7 @@
#include <net/compat.h>
#include <net/xfrm.h>
#include <net/ioam6.h>
+#include <net/rawv6.h>
#include <linux/uaccess.h>
#include <linux/mroute6.h>
@@ -701,6 +702,7 @@ const struct proto_ops inet6_stream_ops = {
.sendpage_locked = tcp_sendpage_locked,
.splice_read = tcp_splice_read,
.read_sock = tcp_read_sock,
+ .read_skb = tcp_read_skb,
.peek_len = tcp_peek_len,
#ifdef CONFIG_COMPAT
.compat_ioctl = inet6_compat_ioctl,
@@ -726,7 +728,7 @@ const struct proto_ops inet6_dgram_ops = {
.getsockopt = sock_common_getsockopt, /* ok */
.sendmsg = inet6_sendmsg, /* retpoline's sake */
.recvmsg = inet6_recvmsg, /* retpoline's sake */
- .read_sock = udp_read_sock,
+ .read_skb = udp_read_skb,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
.set_peek_off = sk_set_peek_off,
@@ -1073,6 +1075,8 @@ static int __init inet6_init(void)
goto out;
}
+ raw_hashinfo_init(&raw_v6_hashinfo);
+
err = proto_register(&tcpv6_prot, 1);
if (err)
goto out;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index a9051df0625d..80cb50d459e4 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -398,7 +398,7 @@ static void ip6erspan_tunnel_uninit(struct net_device *dev)
ip6erspan_tunnel_unlink_md(ign, t);
ip6gre_tunnel_unlink(ign, t);
dst_cache_reset(&t->dst_cache);
- dev_put_track(dev, &t->dev_tracker);
+ netdev_put(dev, &t->dev_tracker);
}
static void ip6gre_tunnel_uninit(struct net_device *dev)
@@ -411,7 +411,7 @@ static void ip6gre_tunnel_uninit(struct net_device *dev)
if (ign->fb_tunnel_dev == dev)
WRITE_ONCE(ign->fb_tunnel_dev, NULL);
dst_cache_reset(&t->dst_cache);
- dev_put_track(dev, &t->dev_tracker);
+ netdev_put(dev, &t->dev_tracker);
}
@@ -701,6 +701,33 @@ static int prepare_ip6gre_xmit_ipv6(struct sk_buff *skb,
return 0;
}
+static int prepare_ip6gre_xmit_other(struct sk_buff *skb,
+ struct net_device *dev,
+ struct flowi6 *fl6, __u8 *dsfield,
+ int *encap_limit)
+{
+ struct ip6_tnl *t = netdev_priv(dev);
+
+ if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+ *encap_limit = t->parms.encap_limit;
+
+ memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6));
+
+ if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
+ *dsfield = 0;
+ else
+ *dsfield = ip6_tclass(t->parms.flowinfo);
+
+ if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
+ fl6->flowi6_mark = skb->mark;
+ else
+ fl6->flowi6_mark = t->parms.fwmark;
+
+ fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
+
+ return 0;
+}
+
static struct ip_tunnel_info *skb_tunnel_info_txcheck(struct sk_buff *skb)
{
struct ip_tunnel_info *tun_info;
@@ -868,20 +895,18 @@ static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev)
struct ip6_tnl *t = netdev_priv(dev);
int encap_limit = -1;
struct flowi6 fl6;
+ __u8 dsfield = 0;
__u32 mtu;
int err;
- if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
- encap_limit = t->parms.encap_limit;
-
- if (!t->parms.collect_md)
- memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+ if (!t->parms.collect_md &&
+ prepare_ip6gre_xmit_other(skb, dev, &fl6, &dsfield, &encap_limit))
+ return -1;
err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
if (err)
return err;
-
- err = __gre6_xmit(skb, dev, 0, &fl6, encap_limit, &mtu, skb->protocol);
+ err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, skb->protocol);
return err;
}
@@ -891,6 +916,7 @@ static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
{
struct ip6_tnl *t = netdev_priv(dev);
struct net_device_stats *stats = &t->dev->stats;
+ __be16 payload_protocol;
int ret;
if (!pskb_inet_may_pull(skb))
@@ -899,7 +925,8 @@ static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
goto tx_err;
- switch (skb->protocol) {
+ payload_protocol = skb_protocol(skb, true);
+ switch (payload_protocol) {
case htons(ETH_P_IP):
ret = ip6gre_xmit_ipv4(skb, dev);
break;
@@ -1500,7 +1527,7 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
}
ip6gre_tnl_init_features(dev);
- dev_hold_track(dev, &tunnel->dev_tracker, GFP_KERNEL);
+ netdev_hold(dev, &tunnel->dev_tracker, GFP_KERNEL);
return 0;
cleanup_dst_cache_init:
@@ -1892,7 +1919,7 @@ static int ip6erspan_tap_init(struct net_device *dev)
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
ip6erspan_tnl_link_config(tunnel, 1);
- dev_hold_track(dev, &tunnel->dev_tracker, GFP_KERNEL);
+ netdev_hold(dev, &tunnel->dev_tracker, GFP_KERNEL);
return 0;
cleanup_dst_cache_init:
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 77e3f5970ce4..897ca4f9b791 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1464,6 +1464,7 @@ static int __ip6_append_data(struct sock *sk,
int copy;
int err;
int offset = 0;
+ bool zc = false;
u32 tskey = 0;
struct rt6_info *rt = (struct rt6_info *)cork->dst;
struct ipv6_txoptions *opt = v6_cork->opt;
@@ -1541,17 +1542,35 @@ emsgsize:
rt->dst.dev->features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
csummode = CHECKSUM_PARTIAL;
- if (flags & MSG_ZEROCOPY && length && sock_flag(sk, SOCK_ZEROCOPY)) {
- uarg = msg_zerocopy_realloc(sk, length, skb_zcopy(skb));
- if (!uarg)
- return -ENOBUFS;
- extra_uref = !skb_zcopy(skb); /* only ref on new uarg */
- if (rt->dst.dev->features & NETIF_F_SG &&
- csummode == CHECKSUM_PARTIAL) {
- paged = true;
- } else {
- uarg->zerocopy = 0;
- skb_zcopy_set(skb, uarg, &extra_uref);
+ if ((flags & MSG_ZEROCOPY) && length) {
+ struct msghdr *msg = from;
+
+ if (getfrag == ip_generic_getfrag && msg->msg_ubuf) {
+ if (skb_zcopy(skb) && msg->msg_ubuf != skb_zcopy(skb))
+ return -EINVAL;
+
+ /* Leave uarg NULL if can't zerocopy, callers should
+ * be able to handle it.
+ */
+ if ((rt->dst.dev->features & NETIF_F_SG) &&
+ csummode == CHECKSUM_PARTIAL) {
+ paged = true;
+ zc = true;
+ uarg = msg->msg_ubuf;
+ }
+ } else if (sock_flag(sk, SOCK_ZEROCOPY)) {
+ uarg = msg_zerocopy_realloc(sk, length, skb_zcopy(skb));
+ if (!uarg)
+ return -ENOBUFS;
+ extra_uref = !skb_zcopy(skb); /* only ref on new uarg */
+ if (rt->dst.dev->features & NETIF_F_SG &&
+ csummode == CHECKSUM_PARTIAL) {
+ paged = true;
+ zc = true;
+ } else {
+ uarg->zerocopy = 0;
+ skb_zcopy_set(skb, uarg, &extra_uref);
+ }
}
}
@@ -1630,9 +1649,12 @@ alloc_new_skb:
(fraglen + alloc_extra < SKB_MAX_ALLOC ||
!(rt->dst.dev->features & NETIF_F_SG)))
alloclen = fraglen;
- else {
+ else if (!zc) {
alloclen = min_t(int, fraglen, MAX_HEADER);
pagedlen = fraglen - alloclen;
+ } else {
+ alloclen = fragheaderlen + transhdrlen;
+ pagedlen = datalen - transhdrlen;
}
alloclen += alloc_extra;
@@ -1742,13 +1764,14 @@ alloc_new_skb:
err = -EFAULT;
goto error;
}
- } else if (!uarg || !uarg->zerocopy) {
+ } else if (!zc) {
int i = skb_shinfo(skb)->nr_frags;
err = -ENOMEM;
if (!sk_page_frag_refill(sk, pfrag))
goto error;
+ skb_zcopy_downgrade_managed(skb);
if (!skb_can_coalesce(skb, i, pfrag->page,
pfrag->offset)) {
err = -EMSGSIZE;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 19325b7600bb..3fda5634578c 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -381,7 +381,7 @@ ip6_tnl_dev_uninit(struct net_device *dev)
else
ip6_tnl_unlink(ip6n, t);
dst_cache_reset(&t->dst_cache);
- dev_put_track(dev, &t->dev_tracker);
+ netdev_put(dev, &t->dev_tracker);
}
/**
@@ -796,7 +796,6 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
struct sk_buff *skb),
bool log_ecn_err)
{
- struct pcpu_sw_netstats *tstats;
const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
int err;
@@ -856,11 +855,7 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
}
}
- tstats = this_cpu_ptr(tunnel->dev->tstats);
- u64_stats_update_begin(&tstats->syncp);
- tstats->rx_packets++;
- tstats->rx_bytes += skb->len;
- u64_stats_update_end(&tstats->syncp);
+ dev_sw_netstats_rx_add(tunnel->dev, skb->len);
skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
@@ -1085,10 +1080,13 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
unsigned int eth_hlen = t->dev->type == ARPHRD_ETHER ? ETH_HLEN : 0;
unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen;
unsigned int max_headroom = psh_hlen;
+ __be16 payload_protocol;
bool use_cache = false;
u8 hop_limit;
int err = -1;
+ payload_protocol = skb_protocol(skb, true);
+
if (t->parms.collect_md) {
hop_limit = skb_tunnel_info(skb)->key.ttl;
goto route_lookup;
@@ -1098,7 +1096,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
/* NBMA tunnel */
if (ipv6_addr_any(&t->parms.raddr)) {
- if (skb->protocol == htons(ETH_P_IPV6)) {
+ if (payload_protocol == htons(ETH_P_IPV6)) {
struct in6_addr *addr6;
struct neighbour *neigh;
int addr_type;
@@ -1119,7 +1117,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
neigh_release(neigh);
- } else if (skb->protocol == htons(ETH_P_IP)) {
+ } else if (payload_protocol == htons(ETH_P_IP)) {
const struct rtable *rt = skb_rtable(skb);
if (!rt)
@@ -1230,9 +1228,9 @@ route_lookup:
skb_dst_set(skb, dst);
if (hop_limit == 0) {
- if (skb->protocol == htons(ETH_P_IP))
+ if (payload_protocol == htons(ETH_P_IP))
hop_limit = ip_hdr(skb)->ttl;
- else if (skb->protocol == htons(ETH_P_IPV6))
+ else if (payload_protocol == htons(ETH_P_IPV6))
hop_limit = ipv6_hdr(skb)->hop_limit;
else
hop_limit = ip6_dst_hoplimit(dst);
@@ -1889,7 +1887,7 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len;
- dev_hold_track(dev, &t->dev_tracker, GFP_KERNEL);
+ netdev_hold(dev, &t->dev_tracker, GFP_KERNEL);
return 0;
destroy_dst:
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 3a434d75925c..8fe59a79e800 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -293,7 +293,7 @@ static void vti6_dev_uninit(struct net_device *dev)
RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
else
vti6_tnl_unlink(ip6n, t);
- dev_put_track(dev, &t->dev_tracker);
+ netdev_put(dev, &t->dev_tracker);
}
static int vti6_input_proto(struct sk_buff *skb, int nexthdr, __be32 spi,
@@ -936,7 +936,7 @@ static inline int vti6_dev_init_gen(struct net_device *dev)
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
- dev_hold_track(dev, &t->dev_tracker, GFP_KERNEL);
+ netdev_hold(dev, &t->dev_tracker, GFP_KERNEL);
return 0;
}
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 4e74bc61a3db..d546fc09d803 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -62,7 +62,12 @@ struct ip6mr_result {
Note that the changes are semaphored via rtnl_lock.
*/
-static DEFINE_RWLOCK(mrt_lock);
+static DEFINE_SPINLOCK(mrt_lock);
+
+static struct net_device *vif_dev_read(const struct vif_device *vif)
+{
+ return rcu_dereference(vif->dev);
+}
/* Multicast router control variables */
@@ -85,11 +90,13 @@ static void ip6mr_free_table(struct mr_table *mrt);
static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
struct net_device *dev, struct sk_buff *skb,
struct mfc6_cache *cache);
-static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt,
+static int ip6mr_cache_report(const struct mr_table *mrt, struct sk_buff *pkt,
mifi_t mifi, int assert);
static void mr6_netlink_event(struct mr_table *mrt, struct mfc6_cache *mfc,
int cmd);
-static void mrt6msg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt);
+static void mrt6msg_netlink_event(const struct mr_table *mrt, struct sk_buff *pkt);
+static int ip6mr_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack);
static int ip6mr_rtm_dumproute(struct sk_buff *skb,
struct netlink_callback *cb);
static void mroute_clean_tables(struct mr_table *mrt, int flags);
@@ -398,7 +405,7 @@ static void ip6mr_free_table(struct mr_table *mrt)
*/
static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(mrt_lock)
+ __acquires(RCU)
{
struct mr_vif_iter *iter = seq->private;
struct net *net = seq_file_net(seq);
@@ -410,14 +417,14 @@ static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
iter->mrt = mrt;
- read_lock(&mrt_lock);
+ rcu_read_lock();
return mr_vif_seq_start(seq, pos);
}
static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
- __releases(mrt_lock)
+ __releases(RCU)
{
- read_unlock(&mrt_lock);
+ rcu_read_unlock();
}
static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
@@ -430,7 +437,11 @@ static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
"Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
} else {
const struct vif_device *vif = v;
- const char *name = vif->dev ? vif->dev->name : "none";
+ const struct net_device *vif_dev;
+ const char *name;
+
+ vif_dev = vif_dev_read(vif);
+ name = vif_dev ? vif_dev->name : "none";
seq_printf(seq,
"%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
@@ -549,13 +560,11 @@ static int pim6_rcv(struct sk_buff *skb)
if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
goto drop;
- reg_vif_num = mrt->mroute_reg_vif_num;
- read_lock(&mrt_lock);
+ /* Pairs with WRITE_ONCE() in mif6_add()/mif6_delete() */
+ reg_vif_num = READ_ONCE(mrt->mroute_reg_vif_num);
if (reg_vif_num >= 0)
- reg_dev = mrt->vif_table[reg_vif_num].dev;
- dev_hold(reg_dev);
- read_unlock(&mrt_lock);
+ reg_dev = vif_dev_read(&mrt->vif_table[reg_vif_num]);
if (!reg_dev)
goto drop;
@@ -570,7 +579,6 @@ static int pim6_rcv(struct sk_buff *skb)
netif_rx(skb);
- dev_put(reg_dev);
return 0;
drop:
kfree_skb(skb);
@@ -600,11 +608,12 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
goto tx_err;
- read_lock(&mrt_lock);
dev->stats.tx_bytes += skb->len;
dev->stats.tx_packets++;
- ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT);
- read_unlock(&mrt_lock);
+ rcu_read_lock();
+ ip6mr_cache_report(mrt, skb, READ_ONCE(mrt->mroute_reg_vif_num),
+ MRT6MSG_WHOLEPKT);
+ rcu_read_unlock();
kfree_skb(skb);
return NETDEV_TX_OK;
@@ -670,10 +679,11 @@ failure:
static int call_ip6mr_vif_entry_notifiers(struct net *net,
enum fib_event_type event_type,
struct vif_device *vif,
+ struct net_device *vif_dev,
mifi_t vif_index, u32 tb_id)
{
return mr_call_vif_notifiers(net, RTNL_FAMILY_IP6MR, event_type,
- vif, vif_index, tb_id,
+ vif, vif_dev, vif_index, tb_id,
&net->ipv6.ipmr_seq);
}
@@ -698,23 +708,21 @@ static int mif6_delete(struct mr_table *mrt, int vifi, int notify,
v = &mrt->vif_table[vifi];
- if (VIF_EXISTS(mrt, vifi))
- call_ip6mr_vif_entry_notifiers(read_pnet(&mrt->net),
- FIB_EVENT_VIF_DEL, v, vifi,
- mrt->id);
-
- write_lock_bh(&mrt_lock);
- dev = v->dev;
- v->dev = NULL;
-
- if (!dev) {
- write_unlock_bh(&mrt_lock);
+ dev = rtnl_dereference(v->dev);
+ if (!dev)
return -EADDRNOTAVAIL;
- }
+
+ call_ip6mr_vif_entry_notifiers(read_pnet(&mrt->net),
+ FIB_EVENT_VIF_DEL, v, dev,
+ vifi, mrt->id);
+ spin_lock(&mrt_lock);
+ RCU_INIT_POINTER(v->dev, NULL);
#ifdef CONFIG_IPV6_PIMSM_V2
- if (vifi == mrt->mroute_reg_vif_num)
- mrt->mroute_reg_vif_num = -1;
+ if (vifi == mrt->mroute_reg_vif_num) {
+ /* Pairs with READ_ONCE() in ip6mr_cache_report() and reg_vif_xmit() */
+ WRITE_ONCE(mrt->mroute_reg_vif_num, -1);
+ }
#endif
if (vifi + 1 == mrt->maxvif) {
@@ -723,10 +731,10 @@ static int mif6_delete(struct mr_table *mrt, int vifi, int notify,
if (VIF_EXISTS(mrt, tmp))
break;
}
- mrt->maxvif = tmp + 1;
+ WRITE_ONCE(mrt->maxvif, tmp + 1);
}
- write_unlock_bh(&mrt_lock);
+ spin_unlock(&mrt_lock);
dev_set_allmulti(dev, -1);
@@ -741,7 +749,7 @@ static int mif6_delete(struct mr_table *mrt, int vifi, int notify,
if ((v->flags & MIFF_REGISTER) && !notify)
unregister_netdevice_queue(dev, head);
- dev_put_track(dev, &v->dev_tracker);
+ netdev_put(dev, &v->dev_tracker);
return 0;
}
@@ -826,7 +834,7 @@ static void ipmr_expire_process(struct timer_list *t)
spin_unlock(&mfc_unres_lock);
}
-/* Fill oifs list. It is called under write locked mrt_lock. */
+/* Fill oifs list. It is called under locked mrt_lock. */
static void ip6mr_update_thresholds(struct mr_table *mrt,
struct mr_mfc *cache,
@@ -912,18 +920,18 @@ static int mif6_add(struct net *net, struct mr_table *mrt,
MIFF_REGISTER);
/* And finish update writing critical data */
- write_lock_bh(&mrt_lock);
- v->dev = dev;
+ spin_lock(&mrt_lock);
+ rcu_assign_pointer(v->dev, dev);
netdev_tracker_alloc(dev, &v->dev_tracker, GFP_ATOMIC);
#ifdef CONFIG_IPV6_PIMSM_V2
if (v->flags & MIFF_REGISTER)
- mrt->mroute_reg_vif_num = vifi;
+ WRITE_ONCE(mrt->mroute_reg_vif_num, vifi);
#endif
if (vifi + 1 > mrt->maxvif)
- mrt->maxvif = vifi + 1;
- write_unlock_bh(&mrt_lock);
+ WRITE_ONCE(mrt->maxvif, vifi + 1);
+ spin_unlock(&mrt_lock);
call_ip6mr_vif_entry_notifiers(net, FIB_EVENT_VIF_ADD,
- v, vifi, mrt->id);
+ v, dev, vifi, mrt->id);
return 0;
}
@@ -1028,10 +1036,10 @@ static void ip6mr_cache_resolve(struct net *net, struct mr_table *mrt,
/*
* Bounce a cache query up to pim6sd and netlink.
*
- * Called under mrt_lock.
+ * Called under rcu_read_lock()
*/
-static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt,
+static int ip6mr_cache_report(const struct mr_table *mrt, struct sk_buff *pkt,
mifi_t mifi, int assert)
{
struct sock *mroute6_sk;
@@ -1072,7 +1080,7 @@ static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt,
if (assert == MRT6MSG_WRMIFWHOLE)
msg->im6_mif = mifi;
else
- msg->im6_mif = mrt->mroute_reg_vif_num;
+ msg->im6_mif = READ_ONCE(mrt->mroute_reg_vif_num);
msg->im6_pad = 0;
msg->im6_src = ipv6_hdr(pkt)->saddr;
msg->im6_dst = ipv6_hdr(pkt)->daddr;
@@ -1107,10 +1115,8 @@ static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt,
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
- rcu_read_lock();
mroute6_sk = rcu_dereference(mrt->mroute_sk);
if (!mroute6_sk) {
- rcu_read_unlock();
kfree_skb(skb);
return -EINVAL;
}
@@ -1119,7 +1125,7 @@ static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt,
/* Deliver to user space multicast routing algorithms */
ret = sock_queue_rcv_skb(mroute6_sk, skb);
- rcu_read_unlock();
+
if (ret < 0) {
net_warn_ratelimited("mroute6: pending queue full, dropping entries\n");
kfree_skb(skb);
@@ -1243,7 +1249,7 @@ static int ip6mr_device_event(struct notifier_block *this,
ip6mr_for_each_table(mrt, net) {
v = &mrt->vif_table[0];
for (ct = 0; ct < mrt->maxvif; ct++, v++) {
- if (v->dev == dev)
+ if (rcu_access_pointer(v->dev) == dev)
mif6_delete(mrt, ct, 1, NULL);
}
}
@@ -1262,7 +1268,7 @@ static int ip6mr_dump(struct net *net, struct notifier_block *nb,
struct netlink_ext_ack *extack)
{
return mr_dump(net, nb, RTNL_FAMILY_IP6MR, ip6mr_rules_dump,
- ip6mr_mr_table_iter, &mrt_lock, extack);
+ ip6mr_mr_table_iter, extack);
}
static struct notifier_block ip6_mr_notifier = {
@@ -1386,7 +1392,7 @@ int __init ip6_mr_init(void)
}
#endif
err = rtnl_register_module(THIS_MODULE, RTNL_FAMILY_IP6MR, RTM_GETROUTE,
- NULL, ip6mr_rtm_dumproute, 0);
+ ip6mr_rtm_getroute, ip6mr_rtm_dumproute, 0);
if (err == 0)
return 0;
@@ -1437,12 +1443,12 @@ static int ip6mr_mfc_add(struct net *net, struct mr_table *mrt,
&mfc->mf6cc_mcastgrp.sin6_addr, parent);
rcu_read_unlock();
if (c) {
- write_lock_bh(&mrt_lock);
+ spin_lock(&mrt_lock);
c->_c.mfc_parent = mfc->mf6cc_parent;
ip6mr_update_thresholds(mrt, &c->_c, ttls);
if (!mrtsock)
c->_c.mfc_flags |= MFC_STATIC;
- write_unlock_bh(&mrt_lock);
+ spin_unlock(&mrt_lock);
call_ip6mr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE,
c, mrt->id);
mr6_netlink_event(mrt, c, RTM_NEWROUTE);
@@ -1560,7 +1566,7 @@ static int ip6mr_sk_init(struct mr_table *mrt, struct sock *sk)
struct net *net = sock_net(sk);
rtnl_lock();
- write_lock_bh(&mrt_lock);
+ spin_lock(&mrt_lock);
if (rtnl_dereference(mrt->mroute_sk)) {
err = -EADDRINUSE;
} else {
@@ -1568,7 +1574,7 @@ static int ip6mr_sk_init(struct mr_table *mrt, struct sock *sk)
sock_set_flag(sk, SOCK_RCU_FREE);
atomic_inc(&net->ipv6.devconf_all->mc_forwarding);
}
- write_unlock_bh(&mrt_lock);
+ spin_unlock(&mrt_lock);
if (!err)
inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
@@ -1598,14 +1604,14 @@ int ip6mr_sk_done(struct sock *sk)
rtnl_lock();
ip6mr_for_each_table(mrt, net) {
if (sk == rtnl_dereference(mrt->mroute_sk)) {
- write_lock_bh(&mrt_lock);
+ spin_lock(&mrt_lock);
RCU_INIT_POINTER(mrt->mroute_sk, NULL);
/* Note that mroute_sk had SOCK_RCU_FREE set,
* so the RCU grace period before sk freeing
* is guaranteed by sk_destruct()
*/
atomic_dec(&devconf->mc_forwarding);
- write_unlock_bh(&mrt_lock);
+ spin_unlock(&mrt_lock);
inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
NETCONFA_MC_FORWARDING,
NETCONFA_IFINDEX_ALL,
@@ -1891,20 +1897,20 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
if (vr.mifi >= mrt->maxvif)
return -EINVAL;
vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
- read_lock(&mrt_lock);
+ rcu_read_lock();
vif = &mrt->vif_table[vr.mifi];
if (VIF_EXISTS(mrt, vr.mifi)) {
- vr.icount = vif->pkt_in;
- vr.ocount = vif->pkt_out;
- vr.ibytes = vif->bytes_in;
- vr.obytes = vif->bytes_out;
- read_unlock(&mrt_lock);
+ vr.icount = READ_ONCE(vif->pkt_in);
+ vr.ocount = READ_ONCE(vif->pkt_out);
+ vr.ibytes = READ_ONCE(vif->bytes_in);
+ vr.obytes = READ_ONCE(vif->bytes_out);
+ rcu_read_unlock();
if (copy_to_user(arg, &vr, sizeof(vr)))
return -EFAULT;
return 0;
}
- read_unlock(&mrt_lock);
+ rcu_read_unlock();
return -EADDRNOTAVAIL;
case SIOCGETSGCNT_IN6:
if (copy_from_user(&sr, arg, sizeof(sr)))
@@ -1966,20 +1972,20 @@ int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
if (vr.mifi >= mrt->maxvif)
return -EINVAL;
vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
- read_lock(&mrt_lock);
+ rcu_read_lock();
vif = &mrt->vif_table[vr.mifi];
if (VIF_EXISTS(mrt, vr.mifi)) {
- vr.icount = vif->pkt_in;
- vr.ocount = vif->pkt_out;
- vr.ibytes = vif->bytes_in;
- vr.obytes = vif->bytes_out;
- read_unlock(&mrt_lock);
+ vr.icount = READ_ONCE(vif->pkt_in);
+ vr.ocount = READ_ONCE(vif->pkt_out);
+ vr.ibytes = READ_ONCE(vif->bytes_in);
+ vr.obytes = READ_ONCE(vif->bytes_out);
+ rcu_read_unlock();
if (copy_to_user(arg, &vr, sizeof(vr)))
return -EFAULT;
return 0;
}
- read_unlock(&mrt_lock);
+ rcu_read_unlock();
return -EADDRNOTAVAIL;
case SIOCGETSGCNT_IN6:
if (copy_from_user(&sr, arg, sizeof(sr)))
@@ -2021,21 +2027,22 @@ static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct
static int ip6mr_forward2(struct net *net, struct mr_table *mrt,
struct sk_buff *skb, int vifi)
{
- struct ipv6hdr *ipv6h;
struct vif_device *vif = &mrt->vif_table[vifi];
- struct net_device *dev;
+ struct net_device *vif_dev;
+ struct ipv6hdr *ipv6h;
struct dst_entry *dst;
struct flowi6 fl6;
- if (!vif->dev)
+ vif_dev = vif_dev_read(vif);
+ if (!vif_dev)
goto out_free;
#ifdef CONFIG_IPV6_PIMSM_V2
if (vif->flags & MIFF_REGISTER) {
- vif->pkt_out++;
- vif->bytes_out += skb->len;
- vif->dev->stats.tx_bytes += skb->len;
- vif->dev->stats.tx_packets++;
+ WRITE_ONCE(vif->pkt_out, vif->pkt_out + 1);
+ WRITE_ONCE(vif->bytes_out, vif->bytes_out + skb->len);
+ vif_dev->stats.tx_bytes += skb->len;
+ vif_dev->stats.tx_packets++;
ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
goto out_free;
}
@@ -2068,14 +2075,13 @@ static int ip6mr_forward2(struct net *net, struct mr_table *mrt,
* not mrouter) cannot join to more than one interface - it will
* result in receiving multiple packets.
*/
- dev = vif->dev;
- skb->dev = dev;
- vif->pkt_out++;
- vif->bytes_out += skb->len;
+ skb->dev = vif_dev;
+ WRITE_ONCE(vif->pkt_out, vif->pkt_out + 1);
+ WRITE_ONCE(vif->bytes_out, vif->bytes_out + skb->len);
/* We are about to write */
/* XXX: extension headers? */
- if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
+ if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(vif_dev)))
goto out_free;
ipv6h = ipv6_hdr(skb);
@@ -2084,7 +2090,7 @@ static int ip6mr_forward2(struct net *net, struct mr_table *mrt,
IP6CB(skb)->flags |= IP6SKB_FORWARDED;
return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
- net, NULL, skb, skb->dev, dev,
+ net, NULL, skb, skb->dev, vif_dev,
ip6mr_forward2_finish);
out_free:
@@ -2092,17 +2098,20 @@ out_free:
return 0;
}
+/* Called with rcu_read_lock() */
static int ip6mr_find_vif(struct mr_table *mrt, struct net_device *dev)
{
int ct;
- for (ct = mrt->maxvif - 1; ct >= 0; ct--) {
- if (mrt->vif_table[ct].dev == dev)
+ /* Pairs with WRITE_ONCE() in mif6_delete()/mif6_add() */
+ for (ct = READ_ONCE(mrt->maxvif) - 1; ct >= 0; ct--) {
+ if (rcu_access_pointer(mrt->vif_table[ct].dev) == dev)
break;
}
return ct;
}
+/* Called under rcu_read_lock() */
static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
struct net_device *dev, struct sk_buff *skb,
struct mfc6_cache *c)
@@ -2122,20 +2131,18 @@ static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
/* For an (*,G) entry, we only check that the incoming
* interface is part of the static tree.
*/
- rcu_read_lock();
cache_proxy = mr_mfc_find_any_parent(mrt, vif);
if (cache_proxy &&
cache_proxy->_c.mfc_un.res.ttls[true_vifi] < 255) {
rcu_read_unlock();
goto forward;
}
- rcu_read_unlock();
}
/*
* Wrong interface: drop packet and (maybe) send PIM assert.
*/
- if (mrt->vif_table[vif].dev != dev) {
+ if (rcu_access_pointer(mrt->vif_table[vif].dev) != dev) {
c->_c.mfc_un.res.wrong_if++;
if (true_vifi >= 0 && mrt->mroute_do_assert &&
@@ -2159,8 +2166,10 @@ static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
}
forward:
- mrt->vif_table[vif].pkt_in++;
- mrt->vif_table[vif].bytes_in += skb->len;
+ WRITE_ONCE(mrt->vif_table[vif].pkt_in,
+ mrt->vif_table[vif].pkt_in + 1);
+ WRITE_ONCE(mrt->vif_table[vif].bytes_in,
+ mrt->vif_table[vif].bytes_in + skb->len);
/*
* Forward the frame
@@ -2238,7 +2247,6 @@ int ip6_mr_input(struct sk_buff *skb)
return err;
}
- read_lock(&mrt_lock);
cache = ip6mr_cache_find(mrt,
&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
if (!cache) {
@@ -2259,19 +2267,15 @@ int ip6_mr_input(struct sk_buff *skb)
vif = ip6mr_find_vif(mrt, dev);
if (vif >= 0) {
int err = ip6mr_cache_unresolved(mrt, vif, skb, dev);
- read_unlock(&mrt_lock);
return err;
}
- read_unlock(&mrt_lock);
kfree_skb(skb);
return -ENODEV;
}
ip6_mr_forward(net, mrt, dev, skb, cache);
- read_unlock(&mrt_lock);
-
return 0;
}
@@ -2287,7 +2291,7 @@ int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
if (!mrt)
return -ENOENT;
- read_lock(&mrt_lock);
+ rcu_read_lock();
cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
if (!cache && skb->dev) {
int vif = ip6mr_find_vif(mrt, skb->dev);
@@ -2305,14 +2309,14 @@ int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
dev = skb->dev;
if (!dev || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
- read_unlock(&mrt_lock);
+ rcu_read_unlock();
return -ENODEV;
}
/* really correct? */
skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
if (!skb2) {
- read_unlock(&mrt_lock);
+ rcu_read_unlock();
return -ENOMEM;
}
@@ -2335,13 +2339,13 @@ int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
iph->daddr = rt->rt6i_dst.addr;
err = ip6mr_cache_unresolved(mrt, vif, skb2, dev);
- read_unlock(&mrt_lock);
+ rcu_read_unlock();
return err;
}
err = mr_fill_mroute(mrt, skb, &cache->_c, rtm);
- read_unlock(&mrt_lock);
+ rcu_read_unlock();
return err;
}
@@ -2460,7 +2464,7 @@ static size_t mrt6msg_netlink_msgsize(size_t payloadlen)
return len;
}
-static void mrt6msg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt)
+static void mrt6msg_netlink_event(const struct mr_table *mrt, struct sk_buff *pkt)
{
struct net *net = read_pnet(&mrt->net);
struct nlmsghdr *nlh;
@@ -2508,6 +2512,95 @@ errout:
rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE_R, -ENOBUFS);
}
+static const struct nla_policy ip6mr_getroute_policy[RTA_MAX + 1] = {
+ [RTA_SRC] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
+ [RTA_DST] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
+ [RTA_TABLE] = { .type = NLA_U32 },
+};
+
+static int ip6mr_rtm_valid_getroute_req(struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ struct nlattr **tb,
+ struct netlink_ext_ack *extack)
+{
+ struct rtmsg *rtm;
+ int err;
+
+ err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, ip6mr_getroute_policy,
+ extack);
+ if (err)
+ return err;
+
+ rtm = nlmsg_data(nlh);
+ if ((rtm->rtm_src_len && rtm->rtm_src_len != 128) ||
+ (rtm->rtm_dst_len && rtm->rtm_dst_len != 128) ||
+ rtm->rtm_tos || rtm->rtm_table || rtm->rtm_protocol ||
+ rtm->rtm_scope || rtm->rtm_type || rtm->rtm_flags) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid values in header for multicast route get request");
+ return -EINVAL;
+ }
+
+ if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
+ (tb[RTA_DST] && !rtm->rtm_dst_len)) {
+ NL_SET_ERR_MSG_MOD(extack, "rtm_src_len and rtm_dst_len must be 128 for IPv6");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ip6mr_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct net *net = sock_net(in_skb->sk);
+ struct in6_addr src = {}, grp = {};
+ struct nlattr *tb[RTA_MAX + 1];
+ struct mfc6_cache *cache;
+ struct mr_table *mrt;
+ struct sk_buff *skb;
+ u32 tableid;
+ int err;
+
+ err = ip6mr_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
+ if (err < 0)
+ return err;
+
+ if (tb[RTA_SRC])
+ src = nla_get_in6_addr(tb[RTA_SRC]);
+ if (tb[RTA_DST])
+ grp = nla_get_in6_addr(tb[RTA_DST]);
+ tableid = tb[RTA_TABLE] ? nla_get_u32(tb[RTA_TABLE]) : 0;
+
+ mrt = ip6mr_get_table(net, tableid ?: RT_TABLE_DEFAULT);
+ if (!mrt) {
+ NL_SET_ERR_MSG_MOD(extack, "MR table does not exist");
+ return -ENOENT;
+ }
+
+ /* entries are added/deleted only under RTNL */
+ rcu_read_lock();
+ cache = ip6mr_cache_find(mrt, &src, &grp);
+ rcu_read_unlock();
+ if (!cache) {
+ NL_SET_ERR_MSG_MOD(extack, "MR cache entry not found");
+ return -ENOENT;
+ }
+
+ skb = nlmsg_new(mr6_msgsize(false, mrt->maxvif), GFP_KERNEL);
+ if (!skb)
+ return -ENOBUFS;
+
+ err = ip6mr_fill_mroute(mrt, skb, NETLINK_CB(in_skb).portid,
+ nlh->nlmsg_seq, cache, RTM_NEWROUTE, 0);
+ if (err < 0) {
+ kfree_skb(skb);
+ return err;
+ }
+
+ return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
+}
+
static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
{
const struct nlmsghdr *nlh = cb->nlh;
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index b0dfe97ea4ee..98453693e400 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -128,6 +128,7 @@ struct neigh_table nd_tbl = {
[NEIGH_VAR_RETRANS_TIME] = ND_RETRANS_TIMER,
[NEIGH_VAR_BASE_REACHABLE_TIME] = ND_REACHABLE_TIME,
[NEIGH_VAR_DELAY_PROBE_TIME] = 5 * HZ,
+ [NEIGH_VAR_INTERVAL_PROBE_TIME_MS] = 5 * HZ,
[NEIGH_VAR_GC_STALETIME] = 60 * HZ,
[NEIGH_VAR_QUEUE_LEN_BYTES] = SK_WMEM_MAX,
[NEIGH_VAR_PROXY_QLEN] = 64,
@@ -966,6 +967,25 @@ out:
in6_dev_put(idev);
}
+static int accept_untracked_na(struct net_device *dev, struct in6_addr *saddr)
+{
+ struct inet6_dev *idev = __in6_dev_get(dev);
+
+ switch (idev->cnf.accept_untracked_na) {
+ case 0: /* Don't accept untracked na (absent in neighbor cache) */
+ return 0;
+ case 1: /* Create new entries from na if currently untracked */
+ return 1;
+ case 2: /* Create new entries from untracked na only if saddr is in the
+ * same subnet as an address configured on the interface that
+ * received the na
+ */
+ return !!ipv6_chk_prefix(saddr, dev);
+ default:
+ return 0;
+ }
+}
+
static void ndisc_recv_na(struct sk_buff *skb)
{
struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb);
@@ -1060,11 +1080,11 @@ static void ndisc_recv_na(struct sk_buff *skb)
* Note that we don't do a (daddr == all-routers-mcast) check.
*/
new_state = msg->icmph.icmp6_solicited ? NUD_REACHABLE : NUD_STALE;
- if (!neigh && lladdr &&
- idev && idev->cnf.forwarding &&
- idev->cnf.accept_untracked_na) {
- neigh = neigh_create(&nd_tbl, &msg->target, dev);
- new_state = NUD_STALE;
+ if (!neigh && lladdr && idev && idev->cnf.forwarding) {
+ if (accept_untracked_na(dev, saddr)) {
+ neigh = neigh_create(&nd_tbl, &msg->target, dev);
+ new_state = NUD_STALE;
+ }
}
if (neigh && !IS_ERR(neigh)) {
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 3b7cbd522b54..722de9dd0ff7 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -61,46 +61,30 @@
#define ICMPV6_HDRLEN 4 /* ICMPv6 header, RFC 4443 Section 2.1 */
-struct raw_hashinfo raw_v6_hashinfo = {
- .lock = __RW_LOCK_UNLOCKED(raw_v6_hashinfo.lock),
-};
+struct raw_hashinfo raw_v6_hashinfo;
EXPORT_SYMBOL_GPL(raw_v6_hashinfo);
-struct sock *__raw_v6_lookup(struct net *net, struct sock *sk,
- unsigned short num, const struct in6_addr *loc_addr,
- const struct in6_addr *rmt_addr, int dif, int sdif)
+bool raw_v6_match(struct net *net, struct sock *sk, unsigned short num,
+ const struct in6_addr *loc_addr,
+ const struct in6_addr *rmt_addr, int dif, int sdif)
{
- bool is_multicast = ipv6_addr_is_multicast(loc_addr);
-
- sk_for_each_from(sk)
- if (inet_sk(sk)->inet_num == num) {
-
- if (!net_eq(sock_net(sk), net))
- continue;
-
- if (!ipv6_addr_any(&sk->sk_v6_daddr) &&
- !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr))
- continue;
-
- if (!raw_sk_bound_dev_eq(net, sk->sk_bound_dev_if,
- dif, sdif))
- continue;
-
- if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
- if (ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))
- goto found;
- if (is_multicast &&
- inet6_mc_check(sk, loc_addr, rmt_addr))
- goto found;
- continue;
- }
- goto found;
- }
- sk = NULL;
-found:
- return sk;
+ if (inet_sk(sk)->inet_num != num ||
+ !net_eq(sock_net(sk), net) ||
+ (!ipv6_addr_any(&sk->sk_v6_daddr) &&
+ !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
+ !raw_sk_bound_dev_eq(net, sk->sk_bound_dev_if,
+ dif, sdif))
+ return false;
+
+ if (ipv6_addr_any(&sk->sk_v6_rcv_saddr) ||
+ ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr) ||
+ (ipv6_addr_is_multicast(loc_addr) &&
+ inet6_mc_check(sk, loc_addr, rmt_addr)))
+ return true;
+
+ return false;
}
-EXPORT_SYMBOL_GPL(__raw_v6_lookup);
+EXPORT_SYMBOL_GPL(raw_v6_match);
/*
* 0 - deliver
@@ -156,31 +140,27 @@ EXPORT_SYMBOL(rawv6_mh_filter_unregister);
*/
static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
{
+ struct net *net = dev_net(skb->dev);
+ struct hlist_nulls_head *hlist;
+ struct hlist_nulls_node *hnode;
const struct in6_addr *saddr;
const struct in6_addr *daddr;
struct sock *sk;
bool delivered = false;
__u8 hash;
- struct net *net;
saddr = &ipv6_hdr(skb)->saddr;
daddr = saddr + 1;
hash = nexthdr & (RAW_HTABLE_SIZE - 1);
-
- read_lock(&raw_v6_hashinfo.lock);
- sk = sk_head(&raw_v6_hashinfo.ht[hash]);
-
- if (!sk)
- goto out;
-
- net = dev_net(skb->dev);
- sk = __raw_v6_lookup(net, sk, nexthdr, daddr, saddr,
- inet6_iif(skb), inet6_sdif(skb));
-
- while (sk) {
+ hlist = &raw_v6_hashinfo.ht[hash];
+ rcu_read_lock();
+ sk_nulls_for_each(sk, hnode, hlist) {
int filtered;
+ if (!raw_v6_match(net, sk, nexthdr, daddr, saddr,
+ inet6_iif(skb), inet6_sdif(skb)))
+ continue;
delivered = true;
switch (nexthdr) {
case IPPROTO_ICMPV6:
@@ -219,23 +199,14 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
rawv6_rcv(sk, clone);
}
}
- sk = __raw_v6_lookup(net, sk_next(sk), nexthdr, daddr, saddr,
- inet6_iif(skb), inet6_sdif(skb));
}
-out:
- read_unlock(&raw_v6_hashinfo.lock);
+ rcu_read_unlock();
return delivered;
}
bool raw6_local_deliver(struct sk_buff *skb, int nexthdr)
{
- struct sock *raw_sk;
-
- raw_sk = sk_head(&raw_v6_hashinfo.ht[nexthdr & (RAW_HTABLE_SIZE - 1)]);
- if (raw_sk && !ipv6_raw_deliver(skb, nexthdr))
- raw_sk = NULL;
-
- return raw_sk != NULL;
+ return ipv6_raw_deliver(skb, nexthdr);
}
/* This cleans up af_inet6 a bit. -DaveM */
@@ -361,30 +332,25 @@ static void rawv6_err(struct sock *sk, struct sk_buff *skb,
void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
u8 type, u8 code, int inner_offset, __be32 info)
{
+ struct net *net = dev_net(skb->dev);
+ struct hlist_nulls_head *hlist;
+ struct hlist_nulls_node *hnode;
struct sock *sk;
int hash;
- const struct in6_addr *saddr, *daddr;
- struct net *net;
hash = nexthdr & (RAW_HTABLE_SIZE - 1);
-
- read_lock(&raw_v6_hashinfo.lock);
- sk = sk_head(&raw_v6_hashinfo.ht[hash]);
- if (sk) {
+ hlist = &raw_v6_hashinfo.ht[hash];
+ rcu_read_lock();
+ sk_nulls_for_each(sk, hnode, hlist) {
/* Note: ipv6_hdr(skb) != skb->data */
const struct ipv6hdr *ip6h = (const struct ipv6hdr *)skb->data;
- saddr = &ip6h->saddr;
- daddr = &ip6h->daddr;
- net = dev_net(skb->dev);
-
- while ((sk = __raw_v6_lookup(net, sk, nexthdr, saddr, daddr,
- inet6_iif(skb), inet6_iif(skb)))) {
- rawv6_err(sk, skb, NULL, type, code,
- inner_offset, info);
- sk = sk_next(sk);
- }
+
+ if (!raw_v6_match(net, sk, nexthdr, &ip6h->saddr, &ip6h->daddr,
+ inet6_iif(skb), inet6_iif(skb)))
+ continue;
+ rawv6_err(sk, skb, NULL, type, code, inner_offset, info);
}
- read_unlock(&raw_v6_hashinfo.lock);
+ rcu_read_unlock();
}
static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 916417944ec8..69252eb462b2 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -182,9 +182,9 @@ static void rt6_uncached_list_flush_dev(struct net_device *dev)
if (rt_dev == dev) {
rt->dst.dev = blackhole_netdev;
- dev_replace_track(rt_dev, blackhole_netdev,
- &rt->dst.dev_tracker,
- GFP_ATOMIC);
+ netdev_ref_replace(rt_dev, blackhole_netdev,
+ &rt->dst.dev_tracker,
+ GFP_ATOMIC);
handled = true;
}
if (handled)
@@ -607,7 +607,7 @@ static void rt6_probe_deferred(struct work_struct *w)
addrconf_addr_solict_mult(&work->target, &mcaddr);
ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0);
- dev_put_track(work->dev, &work->dev_tracker);
+ netdev_put(work->dev, &work->dev_tracker);
kfree(work);
}
@@ -661,7 +661,7 @@ static void rt6_probe(struct fib6_nh *fib6_nh)
} else {
INIT_WORK(&work->work, rt6_probe_deferred);
work->target = *nh_gw;
- dev_hold_track(dev, &work->dev_tracker, GFP_ATOMIC);
+ netdev_hold(dev, &work->dev_tracker, GFP_ATOMIC);
work->dev = dev;
schedule_work(&work->work);
}
@@ -5941,7 +5941,7 @@ int rt6_dump_route(struct fib6_info *rt, void *p_arg, unsigned int skip)
rcu_read_unlock();
if (err)
- return count += w.count;
+ return count + w.count;
}
return -1;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 6bcd5e419a08..6b73b7a5f175 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -519,7 +519,7 @@ static void ipip6_tunnel_uninit(struct net_device *dev)
ipip6_tunnel_del_prl(tunnel, NULL);
}
dst_cache_reset(&tunnel->dst_cache);
- dev_put_track(dev, &tunnel->dev_tracker);
+ netdev_put(dev, &tunnel->dev_tracker);
}
static int ipip6_err(struct sk_buff *skb, u32 info)
@@ -684,8 +684,6 @@ static int ipip6_rcv(struct sk_buff *skb)
tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev,
iph->saddr, iph->daddr, sifindex);
if (tunnel) {
- struct pcpu_sw_netstats *tstats;
-
if (tunnel->parms.iph.protocol != IPPROTO_IPV6 &&
tunnel->parms.iph.protocol != 0)
goto out;
@@ -722,11 +720,7 @@ static int ipip6_rcv(struct sk_buff *skb)
}
}
- tstats = this_cpu_ptr(tunnel->dev->tstats);
- u64_stats_update_begin(&tstats->syncp);
- tstats->rx_packets++;
- tstats->rx_bytes += skb->len;
- u64_stats_update_end(&tstats->syncp);
+ dev_sw_netstats_rx_add(tunnel->dev, skb->len);
netif_rx(skb);
@@ -1461,7 +1455,7 @@ static int ipip6_tunnel_init(struct net_device *dev)
dev->tstats = NULL;
return err;
}
- dev_hold_track(dev, &tunnel->dev_tracker, GFP_KERNEL);
+ netdev_hold(dev, &tunnel->dev_tracker, GFP_KERNEL);
return 0;
}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 9d3ede293258..85b8b765dcb1 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -952,7 +952,10 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
* Underlying function will use this to retrieve the network
* namespace
*/
- dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL);
+ if (sk && sk->sk_state != TCP_TIME_WAIT)
+ dst = ip6_dst_lookup_flow(net, sk, &fl6, NULL); /*sk's xfrm_policy can be referred*/
+ else
+ dst = ip6_dst_lookup_flow(net, ctl_sk, &fl6, NULL);
if (!IS_ERR(dst)) {
skb_dst_set(buff, dst);
ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL,
@@ -2159,7 +2162,10 @@ struct proto tcpv6_prot = {
.leave_memory_pressure = tcp_leave_memory_pressure,
.stream_memory_free = tcp_stream_memory_free,
.sockets_allocated = &tcp_sockets_allocated,
+
.memory_allocated = &tcp_memory_allocated,
+ .per_cpu_fw_alloc = &tcp_memory_per_cpu_fw_alloc,
+
.memory_pressure = &tcp_memory_pressure,
.orphan_count = &tcp_orphan_count,
.sysctl_mem = sysctl_tcp_mem,
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index e2f2e087a753..16c176e7c69a 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1735,7 +1735,10 @@ struct proto udpv6_prot = {
#ifdef CONFIG_BPF_SYSCALL
.psock_update_sk_prot = udp_bpf_update_proto,
#endif
+
.memory_allocated = &udp_memory_allocated,
+ .per_cpu_fw_alloc = &udp_memory_per_cpu_fw_alloc,
+
.sysctl_mem = sysctl_udp_mem,
.sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
.sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index fbb700d3f437..b70725856259 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -48,7 +48,10 @@ struct proto udplitev6_prot = {
.unhash = udp_lib_unhash,
.rehash = udp_v6_rehash,
.get_port = udp_v6_get_port,
+
.memory_allocated = &udp_memory_allocated,
+ .per_cpu_fw_alloc = &udp_memory_per_cpu_fw_alloc,
+
.sysctl_mem = sysctl_udp_mem,
.obj_size = sizeof(struct udp6_sock),
.h.udp_table = &udplite_table,
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index e64e427a51cf..4a4b0e49ec92 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -73,11 +73,11 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
struct rt6_info *rt = (struct rt6_info *)xdst->route;
xdst->u.dst.dev = dev;
- dev_hold_track(dev, &xdst->u.dst.dev_tracker, GFP_ATOMIC);
+ netdev_hold(dev, &xdst->u.dst.dev_tracker, GFP_ATOMIC);
xdst->u.rt6.rt6i_idev = in6_dev_get(dev);
if (!xdst->u.rt6.rt6i_idev) {
- dev_put_track(dev, &xdst->u.dst.dev_tracker);
+ netdev_put(dev, &xdst->u.dst.dev_tracker);
return -ENODEV;
}
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index a0385ddbffcf..498a0c35b7bb 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -278,8 +278,6 @@ static void iucv_sock_destruct(struct sock *sk)
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sk->sk_error_queue);
- sk_mem_reclaim(sk);
-
if (!sock_flag(sk, SOCK_DEAD)) {
pr_err("Attempt to release alive iucv socket %p\n", sk);
return;
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index 9d1aafe75f92..4595b56d175d 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -184,7 +184,7 @@ static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v)
session->pwtype == L2TP_PWTYPE_PPP ? "PPP" :
"");
if (session->send_seq || session->recv_seq)
- seq_printf(m, " nr %hu, ns %hu\n", session->nr, session->ns);
+ seq_printf(m, " nr %u, ns %u\n", session->nr, session->ns);
seq_printf(m, " refcnt %d\n", refcount_read(&session->ref_count));
seq_printf(m, " config 0/0/%c/%c/-/%s %08x %u\n",
session->recv_seq ? 'R' : '-',
@@ -192,7 +192,7 @@ static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v)
session->lns_mode ? "LNS" : "LAC",
0,
jiffies_to_msecs(session->reorder_timeout));
- seq_printf(m, " offset 0 l2specific %hu/%hu\n",
+ seq_printf(m, " offset 0 l2specific %hu/%d\n",
session->l2specific_type, l2tp_get_l2specific_len(session));
if (session->cookie_len) {
seq_printf(m, " cookie %02x%02x%02x%02x",
@@ -215,7 +215,7 @@ static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v)
seq_puts(m, "\n");
}
- seq_printf(m, " %hu/%hu tx %ld/%ld/%ld rx %ld/%ld/%ld\n",
+ seq_printf(m, " %u/%u tx %ld/%ld/%ld rx %ld/%ld/%ld\n",
session->nr, session->ns,
atomic_long_read(&session->stats.tx_packets),
atomic_long_read(&session->stats.tx_bytes),
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 8be1fdc68a0b..db2e584c625e 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -1553,7 +1553,7 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
session->lns_mode ? "LNS" : "LAC",
0,
jiffies_to_msecs(session->reorder_timeout));
- seq_printf(m, " %hu/%hu %ld/%ld/%ld %ld/%ld/%ld\n",
+ seq_printf(m, " %u/%u %ld/%ld/%ld %ld/%ld/%ld\n",
session->nr, session->ns,
atomic_long_read(&session->stats.tx_packets),
atomic_long_read(&session->stats.tx_bytes),
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 7f555d2e5357..da7fe94bea2e 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -224,7 +224,7 @@ static int llc_ui_release(struct socket *sock)
} else {
release_sock(sk);
}
- dev_put_track(llc->dev, &llc->dev_tracker);
+ netdev_put(llc->dev, &llc->dev_tracker);
sock_put(sk);
llc_sk_free(sk);
out:
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index bfab39320004..b7c50646063d 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -9,7 +9,7 @@
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2007-2010, Intel Corporation
* Copyright(c) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
/**
@@ -242,7 +242,7 @@ static void ieee80211_send_addba_resp(struct sta_info *sta, u8 *da, u16 tid,
sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
else if (sdata->vif.type == NL80211_IFTYPE_STATION)
- memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
+ memcpy(mgmt->bssid, sdata->deflink.u.mgd.bssid, ETH_ALEN);
else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN);
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 91878ed5ec46..b13f4b7b740d 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -82,7 +82,7 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
else if (sdata->vif.type == NL80211_IFTYPE_STATION)
- memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
+ memcpy(mgmt->bssid, sdata->deflink.u.mgd.bssid, ETH_ALEN);
else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN);
diff --git a/net/mac80211/airtime.c b/net/mac80211/airtime.c
index 4bab1683652d..2e66598fac79 100644
--- a/net/mac80211/airtime.c
+++ b/net/mac80211/airtime.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: ISC
/*
* Copyright (C) 2019 Felix Fietkau <nbd@nbd.name>
- * Copyright (C) 2021 Intel Corporation
+ * Copyright (C) 2021-2022 Intel Corporation
*/
#include <net/mac80211.h>
@@ -637,7 +637,7 @@ u32 ieee80211_calc_expected_tx_airtime(struct ieee80211_hw *hw,
len += 38; /* Ethernet header length */
- conf = rcu_dereference(vif->chanctx_conf);
+ conf = rcu_dereference(vif->bss_conf.chanctx_conf);
if (conf) {
band = conf->def.chan->band;
shift = ieee80211_chandef_get_shift(&conf->def);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 4ddf297f40f2..64801ab545c1 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -5,7 +5,7 @@
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#include <linux/ieee80211.h>
@@ -39,7 +39,8 @@ static void ieee80211_set_mu_mimo_follow(struct ieee80211_sub_if_data *sdata,
memcpy(sdata->vif.bss_conf.mu_group.position,
params->vht_mumimo_groups + WLAN_MEMBERSHIP_LEN,
WLAN_USER_POSITION_LEN);
- ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_MU_GROUPS);
+ ieee80211_link_info_change_notify(sdata, 0,
+ BSS_CHANGED_MU_GROUPS);
/* don't care about endianness - just check for 0 */
memcpy(&membership, params->vht_mumimo_groups,
WLAN_MEMBERSHIP_LEN);
@@ -53,7 +54,7 @@ static void ieee80211_set_mu_mimo_follow(struct ieee80211_sub_if_data *sdata,
params->vht_mumimo_follow_addr);
}
- sdata->vif.mu_mimo_owner = mu_mimo_groups || mu_mimo_follow;
+ sdata->vif.bss_conf.mu_mimo_owner = mu_mimo_groups || mu_mimo_follow;
}
static int ieee80211_set_mon_options(struct ieee80211_sub_if_data *sdata,
@@ -113,14 +114,15 @@ static int ieee80211_set_mon_options(struct ieee80211_sub_if_data *sdata,
}
static int ieee80211_set_ap_mbssid_options(struct ieee80211_sub_if_data *sdata,
- struct cfg80211_mbssid_config params)
+ struct cfg80211_mbssid_config params,
+ struct ieee80211_bss_conf *link_conf)
{
struct ieee80211_sub_if_data *tx_sdata;
sdata->vif.mbssid_tx_vif = NULL;
- sdata->vif.bss_conf.bssid_index = 0;
- sdata->vif.bss_conf.nontransmitted = false;
- sdata->vif.bss_conf.ema_ap = false;
+ link_conf->bssid_index = 0;
+ link_conf->nontransmitted = false;
+ link_conf->ema_ap = false;
if (sdata->vif.type != NL80211_IFTYPE_AP || !params.tx_wdev)
return -EINVAL;
@@ -133,11 +135,11 @@ static int ieee80211_set_ap_mbssid_options(struct ieee80211_sub_if_data *sdata,
sdata->vif.mbssid_tx_vif = &sdata->vif;
} else {
sdata->vif.mbssid_tx_vif = &tx_sdata->vif;
- sdata->vif.bss_conf.nontransmitted = true;
- sdata->vif.bss_conf.bssid_index = params.index;
+ link_conf->nontransmitted = true;
+ link_conf->bssid_index = params.index;
}
if (params.ema)
- sdata->vif.bss_conf.ema_ap = true;
+ link_conf->ema_ap = true;
return 0;
}
@@ -205,7 +207,7 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
return 0;
mutex_lock(&local->sta_mtx);
- sta = sta_info_get(sdata, ifmgd->bssid);
+ sta = sta_info_get(sdata, sdata->deflink.u.mgd.bssid);
if (sta)
drv_sta_set_4addr(local, sdata, &sta->sta,
params->use_4addr);
@@ -438,7 +440,6 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
struct sta_info *sta = NULL;
- const struct ieee80211_cipher_scheme *cs = NULL;
struct ieee80211_key *key;
int err;
@@ -456,23 +457,12 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
if (WARN_ON_ONCE(fips_enabled))
return -EINVAL;
break;
- case WLAN_CIPHER_SUITE_CCMP:
- case WLAN_CIPHER_SUITE_CCMP_256:
- case WLAN_CIPHER_SUITE_AES_CMAC:
- case WLAN_CIPHER_SUITE_BIP_CMAC_256:
- case WLAN_CIPHER_SUITE_BIP_GMAC_128:
- case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- case WLAN_CIPHER_SUITE_GCMP:
- case WLAN_CIPHER_SUITE_GCMP_256:
- break;
default:
- cs = ieee80211_cs_get(local, params->cipher, sdata->vif.type);
break;
}
key = ieee80211_key_alloc(params->cipher, key_idx, params->key_len,
- params->key, params->seq_len, params->seq,
- cs);
+ params->key, params->seq_len, params->seq);
if (IS_ERR(key))
return PTR_ERR(key);
@@ -537,9 +527,6 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
break;
}
- if (sta)
- sta->cipher_scheme = cs;
-
err = ieee80211_key_link(key, sdata, sta);
out_unlock:
@@ -548,33 +535,56 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
return err;
}
+static struct ieee80211_key *
+ieee80211_lookup_key(struct ieee80211_sub_if_data *sdata,
+ u8 key_idx, bool pairwise, const u8 *mac_addr)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_key *key;
+ struct sta_info *sta;
+
+ if (mac_addr) {
+ sta = sta_info_get_bss(sdata, mac_addr);
+ if (!sta)
+ return NULL;
+
+ if (pairwise && key_idx < NUM_DEFAULT_KEYS)
+ return rcu_dereference_check_key_mtx(local,
+ sta->ptk[key_idx]);
+
+ if (!pairwise &&
+ key_idx < NUM_DEFAULT_KEYS +
+ NUM_DEFAULT_MGMT_KEYS +
+ NUM_DEFAULT_BEACON_KEYS)
+ return rcu_dereference_check_key_mtx(local,
+ sta->deflink.gtk[key_idx]);
+
+ return NULL;
+ }
+
+ if (pairwise && key_idx < NUM_DEFAULT_KEYS)
+ return rcu_dereference_check_key_mtx(local,
+ sdata->keys[key_idx]);
+
+ key = rcu_dereference_check_key_mtx(local, sdata->deflink.gtk[key_idx]);
+ if (key)
+ return key;
+
+ return NULL;
+}
+
static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev,
u8 key_idx, bool pairwise, const u8 *mac_addr)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
- struct sta_info *sta;
- struct ieee80211_key *key = NULL;
+ struct ieee80211_key *key;
int ret;
mutex_lock(&local->sta_mtx);
mutex_lock(&local->key_mtx);
- if (mac_addr) {
- ret = -ENOENT;
-
- sta = sta_info_get_bss(sdata, mac_addr);
- if (!sta)
- goto out_unlock;
-
- if (pairwise)
- key = key_mtx_dereference(local, sta->ptk[key_idx]);
- else
- key = key_mtx_dereference(local,
- sta->deflink.gtk[key_idx]);
- } else
- key = key_mtx_dereference(local, sdata->keys[key_idx]);
-
+ key = ieee80211_lookup_key(sdata, key_idx, pairwise, mac_addr);
if (!key) {
ret = -ENOENT;
goto out_unlock;
@@ -597,10 +607,9 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
struct key_params *params))
{
struct ieee80211_sub_if_data *sdata;
- struct sta_info *sta = NULL;
u8 seq[6] = {0};
struct key_params params;
- struct ieee80211_key *key = NULL;
+ struct ieee80211_key *key;
u64 pn64;
u32 iv32;
u16 iv16;
@@ -611,20 +620,7 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
rcu_read_lock();
- if (mac_addr) {
- sta = sta_info_get_bss(sdata, mac_addr);
- if (!sta)
- goto out;
-
- if (pairwise && key_idx < NUM_DEFAULT_KEYS)
- key = rcu_dereference(sta->ptk[key_idx]);
- else if (!pairwise &&
- key_idx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS +
- NUM_DEFAULT_BEACON_KEYS)
- key = rcu_dereference(sta->deflink.gtk[key_idx]);
- } else
- key = rcu_dereference(sdata->keys[key_idx]);
-
+ key = ieee80211_lookup_key(sdata, key_idx, pairwise, mac_addr);
if (!key)
goto out;
@@ -845,9 +841,10 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
sdata = wiphy_dereference(local->hw.wiphy,
local->monitor_sdata);
if (sdata) {
- ieee80211_vif_release_channel(sdata);
- ret = ieee80211_vif_use_channel(sdata, chandef,
- IEEE80211_CHANCTX_EXCLUSIVE);
+ ieee80211_link_release_channel(sdata->link[0]);
+ ret = ieee80211_link_use_channel(sdata->link[0],
+ chandef,
+ IEEE80211_CHANCTX_EXCLUSIVE);
}
} else if (local->open_count == local->monitors) {
local->_oper_chandef = *chandef;
@@ -865,14 +862,15 @@ static int
ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
const u8 *resp, size_t resp_len,
const struct ieee80211_csa_settings *csa,
- const struct ieee80211_color_change_settings *cca)
+ const struct ieee80211_color_change_settings *cca,
+ struct ieee80211_link_data *link)
{
struct probe_resp *new, *old;
if (!resp || !resp_len)
return 1;
- old = sdata_dereference(sdata->u.ap.probe_resp, sdata);
+ old = sdata_dereference(link->u.ap.probe_resp, sdata);
new = kzalloc(sizeof(struct probe_resp) + resp_len, GFP_KERNEL);
if (!new)
@@ -888,7 +886,7 @@ ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
else if (cca)
new->cntdwn_counter_offsets[0] = cca->counter_offset_presp;
- rcu_assign_pointer(sdata->u.ap.probe_resp, new);
+ rcu_assign_pointer(link->u.ap.probe_resp, new);
if (old)
kfree_rcu(old, rcu_head);
@@ -896,7 +894,9 @@ ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
}
static int ieee80211_set_fils_discovery(struct ieee80211_sub_if_data *sdata,
- struct cfg80211_fils_discovery *params)
+ struct cfg80211_fils_discovery *params,
+ struct ieee80211_link_data *link,
+ struct ieee80211_bss_conf *link_conf)
{
struct fils_discovery_data *new, *old = NULL;
struct ieee80211_fils_discovery *fd;
@@ -904,17 +904,17 @@ static int ieee80211_set_fils_discovery(struct ieee80211_sub_if_data *sdata,
if (!params->tmpl || !params->tmpl_len)
return -EINVAL;
- fd = &sdata->vif.bss_conf.fils_discovery;
+ fd = &link_conf->fils_discovery;
fd->min_interval = params->min_interval;
fd->max_interval = params->max_interval;
- old = sdata_dereference(sdata->u.ap.fils_discovery, sdata);
+ old = sdata_dereference(link->u.ap.fils_discovery, sdata);
new = kzalloc(sizeof(*new) + params->tmpl_len, GFP_KERNEL);
if (!new)
return -ENOMEM;
new->len = params->tmpl_len;
memcpy(new->data, params->tmpl, params->tmpl_len);
- rcu_assign_pointer(sdata->u.ap.fils_discovery, new);
+ rcu_assign_pointer(link->u.ap.fils_discovery, new);
if (old)
kfree_rcu(old, rcu_head);
@@ -924,26 +924,27 @@ static int ieee80211_set_fils_discovery(struct ieee80211_sub_if_data *sdata,
static int
ieee80211_set_unsol_bcast_probe_resp(struct ieee80211_sub_if_data *sdata,
- struct cfg80211_unsol_bcast_probe_resp *params)
+ struct cfg80211_unsol_bcast_probe_resp *params,
+ struct ieee80211_link_data *link,
+ struct ieee80211_bss_conf *link_conf)
{
struct unsol_bcast_probe_resp_data *new, *old = NULL;
if (!params->tmpl || !params->tmpl_len)
return -EINVAL;
- old = sdata_dereference(sdata->u.ap.unsol_bcast_probe_resp, sdata);
+ old = sdata_dereference(link->u.ap.unsol_bcast_probe_resp, sdata);
new = kzalloc(sizeof(*new) + params->tmpl_len, GFP_KERNEL);
if (!new)
return -ENOMEM;
new->len = params->tmpl_len;
memcpy(new->data, params->tmpl, params->tmpl_len);
- rcu_assign_pointer(sdata->u.ap.unsol_bcast_probe_resp, new);
+ rcu_assign_pointer(link->u.ap.unsol_bcast_probe_resp, new);
if (old)
kfree_rcu(old, rcu_head);
- sdata->vif.bss_conf.unsol_bcast_probe_resp_interval =
- params->interval;
+ link_conf->unsol_bcast_probe_resp_interval = params->interval;
return 0;
}
@@ -951,18 +952,17 @@ ieee80211_set_unsol_bcast_probe_resp(struct ieee80211_sub_if_data *sdata,
static int ieee80211_set_ftm_responder_params(
struct ieee80211_sub_if_data *sdata,
const u8 *lci, size_t lci_len,
- const u8 *civicloc, size_t civicloc_len)
+ const u8 *civicloc, size_t civicloc_len,
+ struct ieee80211_bss_conf *link_conf)
{
struct ieee80211_ftm_responder_params *new, *old;
- struct ieee80211_bss_conf *bss_conf;
u8 *pos;
int len;
if (!lci_len && !civicloc_len)
return 0;
- bss_conf = &sdata->vif.bss_conf;
- old = bss_conf->ftmr_params;
+ old = link_conf->ftmr_params;
len = lci_len + civicloc_len;
new = kzalloc(sizeof(*new) + len, GFP_KERNEL);
@@ -984,7 +984,7 @@ static int ieee80211_set_ftm_responder_params(
pos += civicloc_len;
}
- bss_conf->ftmr_params = new;
+ link_conf->ftmr_params = new;
kfree(old);
return 0;
@@ -1017,9 +1017,11 @@ static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
int new_head_len, new_tail_len;
int size, err;
u32 changed = BSS_CHANGED_BEACON;
+ struct ieee80211_link_data *link = sdata->link[params->link_id];
+ struct ieee80211_bss_conf *link_conf =
+ sdata->vif.link_conf[params->link_id];
- old = sdata_dereference(sdata->u.ap.beacon, sdata);
-
+ old = sdata_dereference(link->u.ap.beacon, sdata);
/* Need to have a beacon head if we don't have one yet */
if (!params->head && !old)
@@ -1073,7 +1075,7 @@ static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
pos += struct_size(new->mbssid_ies, elem, mbssid->cnt);
ieee80211_copy_mbssid_beacon(pos, new->mbssid_ies, mbssid);
/* update bssid_indicator */
- sdata->vif.bss_conf.bssid_indicator =
+ link_conf->bssid_indicator =
ilog2(__roundup_pow_of_two(mbssid->cnt + 1));
}
@@ -1101,7 +1103,7 @@ static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
memcpy(new->tail, old->tail, new_tail_len);
err = ieee80211_set_probe_resp(sdata, params->probe_resp,
- params->probe_resp_len, csa, cca);
+ params->probe_resp_len, csa, cca, link);
if (err < 0) {
kfree(new);
return err;
@@ -1110,12 +1112,13 @@ static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
changed |= BSS_CHANGED_AP_PROBE_RESP;
if (params->ftm_responder != -1) {
- sdata->vif.bss_conf.ftm_responder = params->ftm_responder;
+ link_conf->ftm_responder = params->ftm_responder;
err = ieee80211_set_ftm_responder_params(sdata,
params->lci,
params->lci_len,
params->civicloc,
- params->civicloc_len);
+ params->civicloc_len,
+ link_conf);
if (err < 0) {
kfree(new);
@@ -1125,7 +1128,8 @@ static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
changed |= BSS_CHANGED_FTM_RESPONDER;
}
- rcu_assign_pointer(sdata->u.ap.beacon, new);
+ rcu_assign_pointer(link->u.ap.beacon, new);
+ sdata->u.ap.active = true;
if (old)
kfree_rcu(old, rcu_head);
@@ -1143,33 +1147,35 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
u32 changed = BSS_CHANGED_BEACON_INT |
BSS_CHANGED_BEACON_ENABLED |
BSS_CHANGED_BEACON |
- BSS_CHANGED_SSID |
BSS_CHANGED_P2P_PS |
BSS_CHANGED_TXPOWER |
BSS_CHANGED_TWT;
int i, err;
int prev_beacon_int;
+ unsigned int link_id = params->beacon.link_id;
+ struct ieee80211_link_data *link = sdata->link[link_id];
+ struct ieee80211_bss_conf *link_conf = sdata->vif.link_conf[link_id];
- old = sdata_dereference(sdata->u.ap.beacon, sdata);
+ old = sdata_dereference(link->u.ap.beacon, sdata);
if (old)
return -EALREADY;
if (params->smps_mode != NL80211_SMPS_OFF)
return -ENOTSUPP;
- sdata->smps_mode = IEEE80211_SMPS_OFF;
+ link->smps_mode = IEEE80211_SMPS_OFF;
- sdata->needed_rx_chains = sdata->local->rx_chains;
+ link->needed_rx_chains = sdata->local->rx_chains;
- prev_beacon_int = sdata->vif.bss_conf.beacon_int;
- sdata->vif.bss_conf.beacon_int = params->beacon_interval;
+ prev_beacon_int = link_conf->beacon_int;
+ link_conf->beacon_int = params->beacon_interval;
if (params->he_cap && params->he_oper) {
- sdata->vif.bss_conf.he_support = true;
- sdata->vif.bss_conf.htc_trig_based_pkt_ext =
+ link_conf->he_support = true;
+ link_conf->htc_trig_based_pkt_ext =
le32_get_bits(params->he_oper->he_oper_params,
IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK);
- sdata->vif.bss_conf.frame_time_rts_th =
+ link_conf->frame_time_rts_th =
le32_get_bits(params->he_oper->he_oper_params,
IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK);
changed |= BSS_CHANGED_HE_OBSS_PD;
@@ -1181,19 +1187,20 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
if (sdata->vif.type == NL80211_IFTYPE_AP &&
params->mbssid_config.tx_wdev) {
err = ieee80211_set_ap_mbssid_options(sdata,
- params->mbssid_config);
+ params->mbssid_config,
+ link_conf);
if (err)
return err;
}
mutex_lock(&local->mtx);
- err = ieee80211_vif_use_channel(sdata, &params->chandef,
- IEEE80211_CHANCTX_SHARED);
+ err = ieee80211_link_use_channel(link, &params->chandef,
+ IEEE80211_CHANCTX_SHARED);
if (!err)
- ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
+ ieee80211_link_copy_chanctx_to_vlans(link, false);
mutex_unlock(&local->mtx);
if (err) {
- sdata->vif.bss_conf.beacon_int = prev_beacon_int;
+ link_conf->beacon_int = prev_beacon_int;
return err;
}
@@ -1207,9 +1214,6 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
params->crypto.control_port_over_nl80211;
sdata->control_port_no_preauth =
params->crypto.control_port_no_preauth;
- sdata->encrypt_headroom = ieee80211_cs_headroom(sdata->local,
- &params->crypto,
- sdata->vif.type);
list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) {
vlan->control_port_protocol =
@@ -1220,34 +1224,30 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
params->crypto.control_port_over_nl80211;
vlan->control_port_no_preauth =
params->crypto.control_port_no_preauth;
- vlan->encrypt_headroom =
- ieee80211_cs_headroom(sdata->local,
- &params->crypto,
- vlan->vif.type);
- }
-
- sdata->vif.bss_conf.dtim_period = params->dtim_period;
- sdata->vif.bss_conf.enable_beacon = true;
- sdata->vif.bss_conf.allow_p2p_go_ps = sdata->vif.p2p;
- sdata->vif.bss_conf.twt_responder = params->twt_responder;
- sdata->vif.bss_conf.he_obss_pd = params->he_obss_pd;
- sdata->vif.bss_conf.he_bss_color = params->beacon.he_bss_color;
- sdata->vif.bss_conf.s1g = params->chandef.chan->band ==
+ }
+
+ link_conf->dtim_period = params->dtim_period;
+ link_conf->enable_beacon = true;
+ link_conf->allow_p2p_go_ps = sdata->vif.p2p;
+ link_conf->twt_responder = params->twt_responder;
+ link_conf->he_obss_pd = params->he_obss_pd;
+ link_conf->he_bss_color = params->beacon.he_bss_color;
+ sdata->vif.cfg.s1g = params->chandef.chan->band ==
NL80211_BAND_S1GHZ;
- sdata->vif.bss_conf.ssid_len = params->ssid_len;
+ sdata->vif.cfg.ssid_len = params->ssid_len;
if (params->ssid_len)
- memcpy(sdata->vif.bss_conf.ssid, params->ssid,
+ memcpy(sdata->vif.cfg.ssid, params->ssid,
params->ssid_len);
- sdata->vif.bss_conf.hidden_ssid =
+ link_conf->hidden_ssid =
(params->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE);
- memset(&sdata->vif.bss_conf.p2p_noa_attr, 0,
- sizeof(sdata->vif.bss_conf.p2p_noa_attr));
- sdata->vif.bss_conf.p2p_noa_attr.oppps_ctwindow =
+ memset(&link_conf->p2p_noa_attr, 0,
+ sizeof(link_conf->p2p_noa_attr));
+ link_conf->p2p_noa_attr.oppps_ctwindow =
params->p2p_ctwindow & IEEE80211_P2P_OPPPS_CTWINDOW_MASK;
if (params->p2p_opp_ps)
- sdata->vif.bss_conf.p2p_noa_attr.oppps_ctwindow |=
+ link_conf->p2p_noa_attr.oppps_ctwindow |=
IEEE80211_P2P_OPPPS_ENABLE_BIT;
sdata->beacon_rate_set = false;
@@ -1262,7 +1262,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
}
if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL))
- sdata->vif.bss_conf.beacon_tx_rate = params->beacon_rate;
+ link_conf->beacon_tx_rate = params->beacon_rate;
err = ieee80211_assign_beacon(sdata, &params->beacon, NULL, NULL);
if (err < 0)
@@ -1271,7 +1271,8 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
if (params->fils_discovery.max_interval) {
err = ieee80211_set_fils_discovery(sdata,
- &params->fils_discovery);
+ &params->fils_discovery,
+ link, link_conf);
if (err < 0)
goto error;
changed |= BSS_CHANGED_FILS_DISCOVERY;
@@ -1279,24 +1280,27 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
if (params->unsol_bcast_probe_resp.interval) {
err = ieee80211_set_unsol_bcast_probe_resp(sdata,
- &params->unsol_bcast_probe_resp);
+ &params->unsol_bcast_probe_resp,
+ link, link_conf);
if (err < 0)
goto error;
changed |= BSS_CHANGED_UNSOL_BCAST_PROBE_RESP;
}
- err = drv_start_ap(sdata->local, sdata);
+ err = drv_start_ap(sdata->local, sdata, link_id);
if (err) {
- old = sdata_dereference(sdata->u.ap.beacon, sdata);
+ old = sdata_dereference(link->u.ap.beacon, sdata);
if (old)
kfree_rcu(old, rcu_head);
- RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
+ RCU_INIT_POINTER(link->u.ap.beacon, NULL);
+ sdata->u.ap.active = false;
goto error;
}
ieee80211_recalc_dtim(local, sdata);
- ieee80211_bss_info_change_notify(sdata, changed);
+ ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_SSID);
+ ieee80211_link_info_change_notify(sdata, link_id, changed);
netif_carrier_on(dev);
list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
@@ -1306,7 +1310,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
error:
mutex_lock(&local->mtx);
- ieee80211_vif_release_channel(sdata);
+ ieee80211_link_release_channel(link);
mutex_unlock(&local->mtx);
return err;
@@ -1315,21 +1319,22 @@ error:
static int ieee80211_change_beacon(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_beacon_data *params)
{
- struct ieee80211_sub_if_data *sdata;
- struct ieee80211_bss_conf *bss_conf;
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct beacon_data *old;
int err;
+ struct ieee80211_bss_conf *link_conf =
+ sdata->vif.link_conf[params->link_id];
- sdata = IEEE80211_DEV_TO_SUB_IF(dev);
sdata_assert_lock(sdata);
/* don't allow changing the beacon while a countdown is in place - offset
* of channel switch counter may change
*/
- if (sdata->vif.csa_active || sdata->vif.color_change_active)
+ if (link_conf->csa_active || link_conf->color_change_active)
return -EBUSY;
- old = sdata_dereference(sdata->u.ap.beacon, sdata);
+ old = sdata_dereference(sdata->link[params->link_id]->u.ap.beacon,
+ sdata);
if (!old)
return -ENOENT;
@@ -1337,28 +1342,28 @@ static int ieee80211_change_beacon(struct wiphy *wiphy, struct net_device *dev,
if (err < 0)
return err;
- bss_conf = &sdata->vif.bss_conf;
if (params->he_bss_color_valid &&
- params->he_bss_color.enabled != bss_conf->he_bss_color.enabled) {
- bss_conf->he_bss_color.enabled = params->he_bss_color.enabled;
+ params->he_bss_color.enabled != link_conf->he_bss_color.enabled) {
+ link_conf->he_bss_color.enabled = params->he_bss_color.enabled;
err |= BSS_CHANGED_HE_BSS_COLOR;
}
- ieee80211_bss_info_change_notify(sdata, err);
+ ieee80211_link_info_change_notify(sdata, params->link_id, err);
return 0;
}
-static void ieee80211_free_next_beacon(struct ieee80211_sub_if_data *sdata)
+static void ieee80211_free_next_beacon(struct ieee80211_link_data *link)
{
- if (!sdata->u.ap.next_beacon)
+ if (!link->u.ap.next_beacon)
return;
- kfree(sdata->u.ap.next_beacon->mbssid_ies);
- kfree(sdata->u.ap.next_beacon);
- sdata->u.ap.next_beacon = NULL;
+ kfree(link->u.ap.next_beacon->mbssid_ies);
+ kfree(link->u.ap.next_beacon);
+ link->u.ap.next_beacon = NULL;
}
-static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
+static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
+ unsigned int link_id)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_sub_if_data *vlan;
@@ -1368,31 +1373,34 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
struct fils_discovery_data *old_fils_discovery;
struct unsol_bcast_probe_resp_data *old_unsol_bcast_probe_resp;
struct cfg80211_chan_def chandef;
+ struct ieee80211_link_data *link = sdata->link[link_id];
+ struct ieee80211_bss_conf *link_conf = sdata->vif.link_conf[link_id];
sdata_assert_lock(sdata);
- old_beacon = sdata_dereference(sdata->u.ap.beacon, sdata);
+ old_beacon = sdata_dereference(link->u.ap.beacon, sdata);
if (!old_beacon)
return -ENOENT;
- old_probe_resp = sdata_dereference(sdata->u.ap.probe_resp, sdata);
- old_fils_discovery = sdata_dereference(sdata->u.ap.fils_discovery,
+ old_probe_resp = sdata_dereference(link->u.ap.probe_resp,
+ sdata);
+ old_fils_discovery = sdata_dereference(link->u.ap.fils_discovery,
sdata);
old_unsol_bcast_probe_resp =
- sdata_dereference(sdata->u.ap.unsol_bcast_probe_resp,
+ sdata_dereference(link->u.ap.unsol_bcast_probe_resp,
sdata);
/* abort any running channel switch */
mutex_lock(&local->mtx);
- sdata->vif.csa_active = false;
- if (sdata->csa_block_tx) {
+ link_conf->csa_active = false;
+ if (link->csa_block_tx) {
ieee80211_wake_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
- sdata->csa_block_tx = false;
+ link->csa_block_tx = false;
}
mutex_unlock(&local->mtx);
- ieee80211_free_next_beacon(sdata);
+ ieee80211_free_next_beacon(link);
/* turn off carrier for this interface and dependent VLANs */
list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
@@ -1400,10 +1408,11 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
netif_carrier_off(dev);
/* remove beacon and probe response */
- RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
- RCU_INIT_POINTER(sdata->u.ap.probe_resp, NULL);
- RCU_INIT_POINTER(sdata->u.ap.fils_discovery, NULL);
- RCU_INIT_POINTER(sdata->u.ap.unsol_bcast_probe_resp, NULL);
+ sdata->u.ap.active = false;
+ RCU_INIT_POINTER(link->u.ap.beacon, NULL);
+ RCU_INIT_POINTER(link->u.ap.probe_resp, NULL);
+ RCU_INIT_POINTER(link->u.ap.fils_discovery, NULL);
+ RCU_INIT_POINTER(link->u.ap.unsol_bcast_probe_resp, NULL);
kfree_rcu(old_beacon, rcu_head);
if (old_probe_resp)
kfree_rcu(old_probe_resp, rcu_head);
@@ -1412,35 +1421,36 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
if (old_unsol_bcast_probe_resp)
kfree_rcu(old_unsol_bcast_probe_resp, rcu_head);
- kfree(sdata->vif.bss_conf.ftmr_params);
- sdata->vif.bss_conf.ftmr_params = NULL;
+ kfree(link_conf->ftmr_params);
+ link_conf->ftmr_params = NULL;
__sta_info_flush(sdata, true);
ieee80211_free_keys(sdata, true);
- sdata->vif.bss_conf.enable_beacon = false;
+ link_conf->enable_beacon = false;
sdata->beacon_rate_set = false;
- sdata->vif.bss_conf.ssid_len = 0;
+ sdata->vif.cfg.ssid_len = 0;
clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state);
- ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
+ ieee80211_link_info_change_notify(sdata, link_id,
+ BSS_CHANGED_BEACON_ENABLED);
if (sdata->wdev.cac_started) {
- chandef = sdata->vif.bss_conf.chandef;
- cancel_delayed_work_sync(&sdata->dfs_cac_timer_work);
+ chandef = link_conf->chandef;
+ cancel_delayed_work_sync(&link->dfs_cac_timer_work);
cfg80211_cac_event(sdata->dev, &chandef,
NL80211_RADAR_CAC_ABORTED,
GFP_KERNEL);
}
- drv_stop_ap(sdata->local, sdata);
+ drv_stop_ap(sdata->local, sdata, link_id);
/* free all potentially still buffered bcast frames */
local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf);
ieee80211_purge_tx_queue(&local->hw, &sdata->u.ap.ps.bc_buf);
mutex_lock(&local->mtx);
- ieee80211_vif_copy_chanctx_to_vlans(sdata, true);
- ieee80211_vif_release_channel(sdata);
+ ieee80211_link_copy_chanctx_to_vlans(link, true);
+ ieee80211_link_release_channel(link);
mutex_unlock(&local->mtx);
return 0;
@@ -1571,38 +1581,6 @@ static void sta_apply_mesh_params(struct ieee80211_local *local,
#endif
}
-static void sta_apply_airtime_params(struct ieee80211_local *local,
- struct sta_info *sta,
- struct station_parameters *params)
-{
- u8 ac;
-
- for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
- struct airtime_sched_info *air_sched = &local->airtime[ac];
- struct airtime_info *air_info = &sta->airtime[ac];
- struct txq_info *txqi;
- u8 tid;
-
- spin_lock_bh(&air_sched->lock);
- for (tid = 0; tid < IEEE80211_NUM_TIDS + 1; tid++) {
- if (air_info->weight == params->airtime_weight ||
- !sta->sta.txq[tid] ||
- ac != ieee80211_ac_from_tid(tid))
- continue;
-
- airtime_weight_set(air_info, params->airtime_weight);
-
- txqi = to_txq_info(sta->sta.txq[tid]);
- if (RB_EMPTY_NODE(&txqi->schedule_order))
- continue;
-
- ieee80211_update_airtime_weight(local, air_sched,
- 0, true);
- }
- spin_unlock_bh(&air_sched->lock);
- }
-}
-
static int sta_apply_parameters(struct ieee80211_local *local,
struct sta_info *sta,
struct station_parameters *params)
@@ -1761,19 +1739,21 @@ static int sta_apply_parameters(struct ieee80211_local *local,
if (params->ht_capa)
ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
- params->ht_capa, sta);
+ params->ht_capa,
+ &sta->deflink);
/* VHT can override some HT caps such as the A-MSDU max length */
if (params->vht_capa)
ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
- params->vht_capa, sta);
+ params->vht_capa,
+ &sta->deflink);
if (params->he_capa)
ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband,
(void *)params->he_capa,
params->he_capa_len,
(void *)params->he_6ghz_capa,
- sta);
+ &sta->deflink);
if (params->eht_capa)
ieee80211_eht_cap_ie_to_sta_eht_cap(sdata, sband,
@@ -1781,13 +1761,14 @@ static int sta_apply_parameters(struct ieee80211_local *local,
params->he_capa_len,
params->eht_capa,
params->eht_capa_len,
- sta);
+ &sta->deflink);
if (params->opmode_notif_used) {
/* returned value is only needed for rc update, but the
* rc isn't initialized here yet, so ignore it
*/
- __ieee80211_vht_handle_opmode(sdata, sta, params->opmode_notif,
+ __ieee80211_vht_handle_opmode(sdata, &sta->deflink,
+ params->opmode_notif,
sband->band);
}
@@ -1798,8 +1779,7 @@ static int sta_apply_parameters(struct ieee80211_local *local,
sta_apply_mesh_params(local, sta, params);
if (params->airtime_weight)
- sta_apply_airtime_params(local, sta, params);
-
+ sta->airtime_weight = params->airtime_weight;
/* set the STA state after all sta info from usermode has been set */
if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) ||
@@ -1841,7 +1821,7 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
!sdata->u.mgd.associated)
return -EINVAL;
- sta = sta_info_alloc(sdata, mac, GFP_KERNEL);
+ sta = sta_info_alloc(sdata, mac, -1, GFP_KERNEL);
if (!sta)
return -ENOMEM;
@@ -2355,7 +2335,7 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy,
if (_chg_mesh_attr(NL80211_MESHCONF_HT_OPMODE, mask)) {
conf->ht_opmode = nconf->ht_opmode;
sdata->vif.bss_conf.ht_operation_mode = nconf->ht_opmode;
- ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT);
+ ieee80211_link_info_change_notify(sdata, 0, BSS_CHANGED_HT);
}
if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT, mask))
conf->dot11MeshHWMPactivePathToRootTimeout =
@@ -2403,12 +2383,12 @@ static int ieee80211_join_mesh(struct wiphy *wiphy, struct net_device *dev,
sdata->control_port_over_nl80211 = setup->control_port_over_nl80211;
/* can mesh use other SMPS modes? */
- sdata->smps_mode = IEEE80211_SMPS_OFF;
- sdata->needed_rx_chains = sdata->local->rx_chains;
+ sdata->deflink.smps_mode = IEEE80211_SMPS_OFF;
+ sdata->deflink.needed_rx_chains = sdata->local->rx_chains;
mutex_lock(&sdata->local->mtx);
- err = ieee80211_vif_use_channel(sdata, &setup->chandef,
- IEEE80211_CHANCTX_SHARED);
+ err = ieee80211_link_use_channel(sdata->link[0], &setup->chandef,
+ IEEE80211_CHANCTX_SHARED);
mutex_unlock(&sdata->local->mtx);
if (err)
return err;
@@ -2422,7 +2402,7 @@ static int ieee80211_leave_mesh(struct wiphy *wiphy, struct net_device *dev)
ieee80211_stop_mesh(sdata);
mutex_lock(&sdata->local->mtx);
- ieee80211_vif_release_channel(sdata);
+ ieee80211_link_release_channel(sdata->link[0]);
kfree(sdata->u.mesh.ie);
mutex_unlock(&sdata->local->mtx);
@@ -2438,7 +2418,7 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
struct ieee80211_supported_band *sband;
u32 changed = 0;
- if (!sdata_dereference(sdata->u.ap.beacon, sdata))
+ if (!sdata_dereference(sdata->deflink.u.ap.beacon, sdata))
return -ENOENT;
sband = ieee80211_get_sband(sdata);
@@ -2510,7 +2490,7 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
changed |= BSS_CHANGED_P2P_PS;
}
- ieee80211_bss_info_change_notify(sdata, changed);
+ ieee80211_link_info_change_notify(sdata, 0, changed);
return 0;
}
@@ -2551,7 +2531,7 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy,
return -EINVAL;
}
- ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_QOS);
+ ieee80211_link_info_change_notify(sdata, 0, BSS_CHANGED_QOS);
return 0;
}
@@ -2603,7 +2583,7 @@ static int ieee80211_scan(struct wiphy *wiphy,
* the frames sent while scanning on other channel will be
* lost)
*/
- if (sdata->u.ap.beacon &&
+ if (sdata->deflink.u.ap.beacon &&
(!(wiphy->features & NL80211_FEATURE_AP_SCAN) ||
!(req->flags & NL80211_SCAN_FLAG_AP)))
return -EOPNOTSUPP;
@@ -2700,7 +2680,7 @@ static int ieee80211_set_mcast_rate(struct wiphy *wiphy, struct net_device *dev,
memcpy(sdata->vif.bss_conf.mcast_rate, rate,
sizeof(int) * NUM_NL80211_BANDS);
- ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_MCAST_RATE);
+ ieee80211_link_info_change_notify(sdata, 0, BSS_CHANGED_MCAST_RATE);
return 0;
}
@@ -2784,14 +2764,15 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
switch (type) {
case NL80211_TX_POWER_AUTOMATIC:
- sdata->user_power_level = IEEE80211_UNSET_POWER_LEVEL;
+ sdata->deflink.user_power_level =
+ IEEE80211_UNSET_POWER_LEVEL;
txp_type = NL80211_TX_POWER_LIMITED;
break;
case NL80211_TX_POWER_LIMITED:
case NL80211_TX_POWER_FIXED:
if (mbm < 0 || (mbm % 100))
return -EOPNOTSUPP;
- sdata->user_power_level = MBM_TO_DBM(mbm);
+ sdata->deflink.user_power_level = MBM_TO_DBM(mbm);
break;
}
@@ -2824,7 +2805,7 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
has_monitor = true;
continue;
}
- sdata->user_power_level = local->user_power_level;
+ sdata->deflink.user_power_level = local->user_power_level;
if (txp_type != sdata->vif.bss_conf.txpower_type)
update_txp_type = true;
sdata->vif.bss_conf.txpower_type = txp_type;
@@ -2840,7 +2821,7 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
sdata = wiphy_dereference(local->hw.wiphy,
local->monitor_sdata);
if (sdata) {
- sdata->user_power_level = local->user_power_level;
+ sdata->deflink.user_power_level = local->user_power_level;
if (txp_type != sdata->vif.bss_conf.txpower_type)
update_txp_type = true;
sdata->vif.bss_conf.txpower_type = txp_type;
@@ -2914,6 +2895,7 @@ static int ieee80211_testmode_dump(struct wiphy *wiphy,
#endif
int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata,
+ unsigned int link_id,
enum ieee80211_smps_mode smps_mode)
{
const u8 *ap;
@@ -2927,8 +2909,8 @@ int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata,
if (WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION))
return -EINVAL;
- old_req = sdata->u.mgd.req_smps;
- sdata->u.mgd.req_smps = smps_mode;
+ old_req = sdata->link[link_id]->u.mgd.req_smps;
+ sdata->link[link_id]->u.mgd.req_smps = smps_mode;
if (old_req == smps_mode &&
smps_mode != IEEE80211_SMPS_AUTOMATIC)
@@ -2940,10 +2922,10 @@ int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata,
* the new value until we associate.
*/
if (!sdata->u.mgd.associated ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT)
+ sdata->vif.link_conf[link_id]->chandef.width == NL80211_CHAN_WIDTH_20_NOHT)
return 0;
- ap = sdata->u.mgd.bssid;
+ ap = sdata->link[link_id]->u.mgd.bssid;
rcu_read_lock();
list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
@@ -2967,7 +2949,7 @@ int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata,
err = ieee80211_send_smps_action(sdata, smps_mode,
ap, ap);
if (err)
- sdata->u.mgd.req_smps = old_req;
+ sdata->link[link_id]->u.mgd.req_smps = old_req;
else if (smps_mode != IEEE80211_SMPS_OFF && tdls_peer_found)
ieee80211_teardown_tdls_peers(sdata);
@@ -2979,6 +2961,7 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
+ unsigned int link_id;
if (sdata->vif.type != NL80211_IFTYPE_STATION)
return -EOPNOTSUPP;
@@ -2995,7 +2978,12 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
/* no change, but if automatic follow powersave */
sdata_lock(sdata);
- __ieee80211_request_smps_mgd(sdata, sdata->u.mgd.req_smps);
+ for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) {
+ if (!sdata->link[link_id])
+ continue;
+ __ieee80211_request_smps_mgd(sdata, link_id,
+ sdata->link[link_id]->u.mgd.req_smps);
+ }
sdata_unlock(sdata);
if (ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS))
@@ -3028,12 +3016,12 @@ static int ieee80211_set_cqm_rssi_config(struct wiphy *wiphy,
bss_conf->cqm_rssi_hyst = rssi_hyst;
bss_conf->cqm_rssi_low = 0;
bss_conf->cqm_rssi_high = 0;
- sdata->u.mgd.last_cqm_event_signal = 0;
+ sdata->deflink.u.mgd.last_cqm_event_signal = 0;
/* tell the driver upon association, unless already associated */
if (sdata->u.mgd.associated &&
sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI)
- ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_CQM);
+ ieee80211_link_info_change_notify(sdata, 0, BSS_CHANGED_CQM);
return 0;
}
@@ -3053,18 +3041,19 @@ static int ieee80211_set_cqm_rssi_range_config(struct wiphy *wiphy,
bss_conf->cqm_rssi_high = rssi_high;
bss_conf->cqm_rssi_thold = 0;
bss_conf->cqm_rssi_hyst = 0;
- sdata->u.mgd.last_cqm_event_signal = 0;
+ sdata->deflink.u.mgd.last_cqm_event_signal = 0;
/* tell the driver upon association, unless already associated */
if (sdata->u.mgd.associated &&
sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI)
- ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_CQM);
+ ieee80211_link_info_change_notify(sdata, 0, BSS_CHANGED_CQM);
return 0;
}
static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
struct net_device *dev,
+ unsigned int link_id,
const u8 *addr,
const struct cfg80211_bitrate_mask *mask)
{
@@ -3081,7 +3070,7 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
* to send something, and if we're an AP we have to be able to do
* so at a basic rate so that all clients can receive it.
*/
- if (rcu_access_pointer(sdata->vif.chanctx_conf) &&
+ if (rcu_access_pointer(sdata->vif.bss_conf.chanctx_conf) &&
sdata->vif.bss_conf.chandef.chan) {
u32 basic_rates = sdata->vif.bss_conf.basic_rates;
enum nl80211_band band = sdata->vif.bss_conf.chandef.chan->band;
@@ -3146,16 +3135,16 @@ static int ieee80211_start_radar_detection(struct wiphy *wiphy,
}
/* whatever, but channel contexts should not complain about that one */
- sdata->smps_mode = IEEE80211_SMPS_OFF;
- sdata->needed_rx_chains = local->rx_chains;
+ sdata->deflink.smps_mode = IEEE80211_SMPS_OFF;
+ sdata->deflink.needed_rx_chains = local->rx_chains;
- err = ieee80211_vif_use_channel(sdata, chandef,
- IEEE80211_CHANCTX_SHARED);
+ err = ieee80211_link_use_channel(sdata->link[0], chandef,
+ IEEE80211_CHANCTX_SHARED);
if (err)
goto out_unlock;
ieee80211_queue_delayed_work(&sdata->local->hw,
- &sdata->dfs_cac_timer_work,
+ &sdata->deflink.dfs_cac_timer_work,
msecs_to_jiffies(cac_time_ms));
out_unlock:
@@ -3175,10 +3164,10 @@ static void ieee80211_end_cac(struct wiphy *wiphy,
* by the time it gets it, sdata->wdev.cac_started
* will no longer be true
*/
- cancel_delayed_work(&sdata->dfs_cac_timer_work);
+ cancel_delayed_work(&sdata->deflink.dfs_cac_timer_work);
if (sdata->wdev.cac_started) {
- ieee80211_vif_release_channel(sdata);
+ ieee80211_link_release_channel(sdata->link[0]);
sdata->wdev.cac_started = false;
}
}
@@ -3294,10 +3283,10 @@ void ieee80211_csa_finish(struct ieee80211_vif *vif)
continue;
ieee80211_queue_work(&iter->local->hw,
- &iter->csa_finalize_work);
+ &iter->deflink.csa_finalize_work);
}
}
- ieee80211_queue_work(&local->hw, &sdata->csa_finalize_work);
+ ieee80211_queue_work(&local->hw, &sdata->deflink.csa_finalize_work);
rcu_read_unlock();
}
@@ -3309,7 +3298,7 @@ void ieee80211_channel_switch_disconnect(struct ieee80211_vif *vif, bool block_t
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_local *local = sdata->local;
- sdata->csa_block_tx = block_tx;
+ sdata->deflink.csa_block_tx = block_tx;
sdata_info(sdata, "channel switch failed, disconnecting\n");
ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work);
}
@@ -3322,12 +3311,13 @@ static int ieee80211_set_after_csa_beacon(struct ieee80211_sub_if_data *sdata,
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP:
- if (!sdata->u.ap.next_beacon)
+ if (!sdata->deflink.u.ap.next_beacon)
return -EINVAL;
- err = ieee80211_assign_beacon(sdata, sdata->u.ap.next_beacon,
+ err = ieee80211_assign_beacon(sdata,
+ sdata->deflink.u.ap.next_beacon,
NULL, NULL);
- ieee80211_free_next_beacon(sdata);
+ ieee80211_free_next_beacon(&sdata->deflink);
if (err < 0)
return err;
@@ -3372,41 +3362,41 @@ static int __ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata)
* completed successfully
*/
- if (sdata->reserved_chanctx) {
+ if (sdata->deflink.reserved_chanctx) {
/*
* with multi-vif csa driver may call ieee80211_csa_finish()
* many times while waiting for other interfaces to use their
* reservations
*/
- if (sdata->reserved_ready)
+ if (sdata->deflink.reserved_ready)
return 0;
- return ieee80211_vif_use_reserved_context(sdata);
+ return ieee80211_link_use_reserved_context(sdata->link[0]);
}
if (!cfg80211_chandef_identical(&sdata->vif.bss_conf.chandef,
- &sdata->csa_chandef))
+ &sdata->deflink.csa_chandef))
return -EINVAL;
- sdata->vif.csa_active = false;
+ sdata->vif.bss_conf.csa_active = false;
err = ieee80211_set_after_csa_beacon(sdata, &changed);
if (err)
return err;
- ieee80211_bss_info_change_notify(sdata, changed);
+ ieee80211_link_info_change_notify(sdata, 0, changed);
- if (sdata->csa_block_tx) {
+ if (sdata->deflink.csa_block_tx) {
ieee80211_wake_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
- sdata->csa_block_tx = false;
+ sdata->deflink.csa_block_tx = false;
}
err = drv_post_channel_switch(sdata);
if (err)
return err;
- cfg80211_ch_switch_notify(sdata->dev, &sdata->csa_chandef);
+ cfg80211_ch_switch_notify(sdata->dev, &sdata->deflink.csa_chandef, 0);
return 0;
}
@@ -3424,7 +3414,7 @@ void ieee80211_csa_finalize_work(struct work_struct *work)
{
struct ieee80211_sub_if_data *sdata =
container_of(work, struct ieee80211_sub_if_data,
- csa_finalize_work);
+ deflink.csa_finalize_work);
struct ieee80211_local *local = sdata->local;
sdata_lock(sdata);
@@ -3432,7 +3422,7 @@ void ieee80211_csa_finalize_work(struct work_struct *work)
mutex_lock(&local->chanctx_mtx);
/* AP might have been stopped while waiting for the lock. */
- if (!sdata->vif.csa_active)
+ if (!sdata->vif.bss_conf.csa_active)
goto unlock;
if (!ieee80211_sdata_running(sdata))
@@ -3455,9 +3445,9 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP:
- sdata->u.ap.next_beacon =
+ sdata->deflink.u.ap.next_beacon =
cfg80211_beacon_dup(&params->beacon_after);
- if (!sdata->u.ap.next_beacon)
+ if (!sdata->deflink.u.ap.next_beacon)
return -ENOMEM;
/*
@@ -3483,7 +3473,7 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
IEEE80211_MAX_CNTDWN_COUNTERS_NUM) ||
(params->n_counter_offsets_presp >
IEEE80211_MAX_CNTDWN_COUNTERS_NUM)) {
- ieee80211_free_next_beacon(sdata);
+ ieee80211_free_next_beacon(&sdata->deflink);
return -EINVAL;
}
@@ -3495,14 +3485,14 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
err = ieee80211_assign_beacon(sdata, &params->beacon_csa, &csa, NULL);
if (err < 0) {
- ieee80211_free_next_beacon(sdata);
+ ieee80211_free_next_beacon(&sdata->deflink);
return err;
}
*changed |= err;
break;
case NL80211_IFTYPE_ADHOC:
- if (!sdata->vif.bss_conf.ibss_joined)
+ if (!sdata->vif.cfg.ibss_joined)
return -EINVAL;
if (params->chandef.width != sdata->u.ibss.chandef.width)
@@ -3584,9 +3574,9 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
static void ieee80211_color_change_abort(struct ieee80211_sub_if_data *sdata)
{
- sdata->vif.color_change_active = false;
+ sdata->vif.bss_conf.color_change_active = false;
- ieee80211_free_next_beacon(sdata);
+ ieee80211_free_next_beacon(&sdata->deflink);
cfg80211_color_change_aborted_notify(sdata->dev);
}
@@ -3617,11 +3607,11 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
return -EINVAL;
/* don't allow another channel switch if one is already active. */
- if (sdata->vif.csa_active)
+ if (sdata->vif.bss_conf.csa_active)
return -EBUSY;
mutex_lock(&local->chanctx_mtx);
- conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
+ conf = rcu_dereference_protected(sdata->vif.bss_conf.chanctx_conf,
lockdep_is_held(&local->chanctx_mtx));
if (!conf) {
err = -EBUSY;
@@ -3646,42 +3636,43 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
if (err)
goto out;
- err = ieee80211_vif_reserve_chanctx(sdata, &params->chandef,
- chanctx->mode,
- params->radar_required);
+ err = ieee80211_link_reserve_chanctx(sdata->link[0], &params->chandef,
+ chanctx->mode,
+ params->radar_required);
if (err)
goto out;
/* if reservation is invalid then this will fail */
err = ieee80211_check_combinations(sdata, NULL, chanctx->mode, 0);
if (err) {
- ieee80211_vif_unreserve_chanctx(sdata);
+ ieee80211_link_unreserve_chanctx(sdata->link[0]);
goto out;
}
/* if there is a color change in progress, abort it */
- if (sdata->vif.color_change_active)
+ if (sdata->vif.bss_conf.color_change_active)
ieee80211_color_change_abort(sdata);
err = ieee80211_set_csa_beacon(sdata, params, &changed);
if (err) {
- ieee80211_vif_unreserve_chanctx(sdata);
+ ieee80211_link_unreserve_chanctx(sdata->link[0]);
goto out;
}
- sdata->csa_chandef = params->chandef;
- sdata->csa_block_tx = params->block_tx;
- sdata->vif.csa_active = true;
+ sdata->deflink.csa_chandef = params->chandef;
+ sdata->deflink.csa_block_tx = params->block_tx;
+ sdata->vif.bss_conf.csa_active = true;
- if (sdata->csa_block_tx)
+ if (sdata->deflink.csa_block_tx)
ieee80211_stop_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
- cfg80211_ch_switch_started_notify(sdata->dev, &sdata->csa_chandef,
+ cfg80211_ch_switch_started_notify(sdata->dev,
+ &sdata->deflink.csa_chandef,
params->count, params->block_tx);
if (changed) {
- ieee80211_bss_info_change_notify(sdata, changed);
+ ieee80211_link_info_change_notify(sdata, 0, changed);
drv_channel_switch_beacon(sdata, &params->chandef);
} else {
/* if the beacon didn't change, we can finalize immediately */
@@ -3840,7 +3831,7 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
mutex_lock(&local->mtx);
rcu_read_lock();
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+ chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
if (WARN_ON(!chanctx_conf)) {
ret = -EINVAL;
goto unlock;
@@ -3914,6 +3905,7 @@ unlock:
static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
struct wireless_dev *wdev,
+ unsigned int link_id,
struct cfg80211_chan_def *chandef)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
@@ -3922,9 +3914,9 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
int ret = -ENODATA;
rcu_read_lock();
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+ chanctx_conf = rcu_dereference(sdata->vif.link_conf[link_id]->chanctx_conf);
if (chanctx_conf) {
- *chandef = sdata->vif.bss_conf.chandef;
+ *chandef = sdata->vif.link_conf[link_id]->chandef;
ret = 0;
} else if (local->open_count > 0 &&
local->open_count == local->monitors &&
@@ -3974,15 +3966,17 @@ static int ieee80211_set_qos_map(struct wiphy *wiphy,
static int ieee80211_set_ap_chanwidth(struct wiphy *wiphy,
struct net_device *dev,
+ unsigned int link_id,
struct cfg80211_chan_def *chandef)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
int ret;
u32 changed = 0;
- ret = ieee80211_vif_change_bandwidth(sdata, chandef, &changed);
+ ret = ieee80211_link_change_bandwidth(sdata->link[link_id], chandef,
+ &changed);
if (ret == 0)
- ieee80211_bss_info_change_notify(sdata, changed);
+ ieee80211_link_info_change_notify(sdata, link_id, changed);
return ret;
}
@@ -4322,12 +4316,13 @@ ieee80211_set_after_color_change_beacon(struct ieee80211_sub_if_data *sdata,
case NL80211_IFTYPE_AP: {
int ret;
- if (!sdata->u.ap.next_beacon)
+ if (!sdata->deflink.u.ap.next_beacon)
return -EINVAL;
- ret = ieee80211_assign_beacon(sdata, sdata->u.ap.next_beacon,
+ ret = ieee80211_assign_beacon(sdata,
+ sdata->deflink.u.ap.next_beacon,
NULL, NULL);
- ieee80211_free_next_beacon(sdata);
+ ieee80211_free_next_beacon(&sdata->deflink);
if (ret < 0)
return ret;
@@ -4353,9 +4348,9 @@ ieee80211_set_color_change_beacon(struct ieee80211_sub_if_data *sdata,
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP:
- sdata->u.ap.next_beacon =
+ sdata->deflink.u.ap.next_beacon =
cfg80211_beacon_dup(&params->beacon_next);
- if (!sdata->u.ap.next_beacon)
+ if (!sdata->deflink.u.ap.next_beacon)
return -ENOMEM;
if (params->count <= 1)
@@ -4370,7 +4365,7 @@ ieee80211_set_color_change_beacon(struct ieee80211_sub_if_data *sdata,
err = ieee80211_assign_beacon(sdata, &params->beacon_color_change,
NULL, &color_change);
if (err < 0) {
- ieee80211_free_next_beacon(sdata);
+ ieee80211_free_next_beacon(&sdata->deflink);
return err;
}
*changed |= err;
@@ -4390,7 +4385,7 @@ ieee80211_color_change_bss_config_notify(struct ieee80211_sub_if_data *sdata,
sdata->vif.bss_conf.he_bss_color.enabled = enable;
changed |= BSS_CHANGED_HE_BSS_COLOR;
- ieee80211_bss_info_change_notify(sdata, changed);
+ ieee80211_link_info_change_notify(sdata, 0, changed);
if (!sdata->vif.bss_conf.nontransmitted && sdata->vif.mbssid_tx_vif) {
struct ieee80211_sub_if_data *child;
@@ -4400,8 +4395,8 @@ ieee80211_color_change_bss_config_notify(struct ieee80211_sub_if_data *sdata,
if (child != sdata && child->vif.mbssid_tx_vif == &sdata->vif) {
child->vif.bss_conf.he_bss_color.color = color;
child->vif.bss_conf.he_bss_color.enabled = enable;
- ieee80211_bss_info_change_notify(child,
- BSS_CHANGED_HE_BSS_COLOR);
+ ieee80211_link_info_change_notify(child, 0,
+ BSS_CHANGED_HE_BSS_COLOR);
}
}
mutex_unlock(&sdata->local->iflist_mtx);
@@ -4417,7 +4412,7 @@ static int ieee80211_color_change_finalize(struct ieee80211_sub_if_data *sdata)
sdata_assert_lock(sdata);
lockdep_assert_held(&local->mtx);
- sdata->vif.color_change_active = false;
+ sdata->vif.bss_conf.color_change_active = false;
err = ieee80211_set_after_color_change_beacon(sdata, &changed);
if (err) {
@@ -4426,7 +4421,7 @@ static int ieee80211_color_change_finalize(struct ieee80211_sub_if_data *sdata)
}
ieee80211_color_change_bss_config_notify(sdata,
- sdata->vif.color_change_color,
+ sdata->vif.bss_conf.color_change_color,
1, changed);
cfg80211_color_change_notify(sdata->dev);
@@ -4437,14 +4432,14 @@ void ieee80211_color_change_finalize_work(struct work_struct *work)
{
struct ieee80211_sub_if_data *sdata =
container_of(work, struct ieee80211_sub_if_data,
- color_change_finalize_work);
+ deflink.color_change_finalize_work);
struct ieee80211_local *local = sdata->local;
sdata_lock(sdata);
mutex_lock(&local->mtx);
/* AP might have been stopped while waiting for the lock. */
- if (!sdata->vif.color_change_active)
+ if (!sdata->vif.bss_conf.color_change_active)
goto unlock;
if (!ieee80211_sdata_running(sdata))
@@ -4462,7 +4457,7 @@ void ieee80211_color_change_finish(struct ieee80211_vif *vif)
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
ieee80211_queue_work(&sdata->local->hw,
- &sdata->color_change_finalize_work);
+ &sdata->deflink.color_change_finalize_work);
}
EXPORT_SYMBOL_GPL(ieee80211_color_change_finish);
@@ -4472,7 +4467,7 @@ ieeee80211_obss_color_collision_notify(struct ieee80211_vif *vif,
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
- if (sdata->vif.color_change_active || sdata->vif.csa_active)
+ if (sdata->vif.bss_conf.color_change_active || sdata->vif.bss_conf.csa_active)
return;
cfg80211_obss_color_collision_notify(sdata->dev, color_bitmap, gfp);
@@ -4498,7 +4493,7 @@ ieee80211_color_change(struct wiphy *wiphy, struct net_device *dev,
/* don't allow another color change if one is already active or if csa
* is active
*/
- if (sdata->vif.color_change_active || sdata->vif.csa_active) {
+ if (sdata->vif.bss_conf.color_change_active || sdata->vif.bss_conf.csa_active) {
err = -EBUSY;
goto out;
}
@@ -4507,8 +4502,8 @@ ieee80211_color_change(struct wiphy *wiphy, struct net_device *dev,
if (err)
goto out;
- sdata->vif.color_change_active = true;
- sdata->vif.color_change_color = params->color;
+ sdata->vif.bss_conf.color_change_active = true;
+ sdata->vif.bss_conf.color_change_color = params->color;
cfg80211_color_change_started_notify(sdata->dev, params->count);
@@ -4536,6 +4531,24 @@ ieee80211_set_radar_background(struct wiphy *wiphy,
return local->ops->set_radar_background(&local->hw, chandef);
}
+static int ieee80211_add_intf_link(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ unsigned int link_id)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+
+ return ieee80211_vif_set_links(sdata, wdev->valid_links);
+}
+
+static void ieee80211_del_intf_link(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ unsigned int link_id)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+
+ ieee80211_vif_set_links(sdata, wdev->valid_links);
+}
+
const struct cfg80211_ops mac80211_config_ops = {
.add_virtual_intf = ieee80211_add_iface,
.del_virtual_intf = ieee80211_del_iface,
@@ -4641,4 +4654,6 @@ const struct cfg80211_ops mac80211_config_ops = {
.set_sar_specs = ieee80211_set_sar_specs,
.color_change = ieee80211_color_change,
.set_radar_background = ieee80211_set_radar_background,
+ .add_intf_link = ieee80211_add_intf_link,
+ .del_intf_link = ieee80211_del_intf_link,
};
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index d8246e00a10b..6853b563fb6c 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* mac80211 - channel management
- * Copyright 2020 - 2021 Intel Corporation
+ * Copyright 2020 - 2022 Intel Corporation
*/
#include <linux/nl80211.h>
@@ -15,12 +15,12 @@
static int ieee80211_chanctx_num_assigned(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx)
{
- struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_link_data *link;
int num = 0;
lockdep_assert_held(&local->chanctx_mtx);
- list_for_each_entry(sdata, &ctx->assigned_vifs, assigned_chanctx_list)
+ list_for_each_entry(link, &ctx->assigned_links, assigned_chanctx_list)
num++;
return num;
@@ -29,12 +29,12 @@ static int ieee80211_chanctx_num_assigned(struct ieee80211_local *local,
static int ieee80211_chanctx_num_reserved(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx)
{
- struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_link_data *link;
int num = 0;
lockdep_assert_held(&local->chanctx_mtx);
- list_for_each_entry(sdata, &ctx->reserved_vifs, reserved_chanctx_list)
+ list_for_each_entry(link, &ctx->reserved_links, reserved_chanctx_list)
num++;
return num;
@@ -67,12 +67,14 @@ static bool ieee80211_can_create_new_chanctx(struct ieee80211_local *local)
}
static struct ieee80211_chanctx *
-ieee80211_vif_get_chanctx(struct ieee80211_sub_if_data *sdata)
+ieee80211_vif_get_chanctx(struct ieee80211_sub_if_data *sdata,
+ unsigned int link_id)
{
+ struct ieee80211_bss_conf *link_conf = sdata->vif.link_conf[link_id];
struct ieee80211_local *local __maybe_unused = sdata->local;
struct ieee80211_chanctx_conf *conf;
- conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
+ conf = rcu_dereference_protected(link_conf->chanctx_conf,
lockdep_is_held(&local->chanctx_mtx));
if (!conf)
return NULL;
@@ -80,21 +82,27 @@ ieee80211_vif_get_chanctx(struct ieee80211_sub_if_data *sdata)
return container_of(conf, struct ieee80211_chanctx, conf);
}
+static struct ieee80211_chanctx *
+ieee80211_link_get_chanctx(struct ieee80211_link_data *link)
+{
+ return ieee80211_vif_get_chanctx(link->sdata, link->link_id);
+}
+
static const struct cfg80211_chan_def *
ieee80211_chanctx_reserved_chandef(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx,
const struct cfg80211_chan_def *compat)
{
- struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_link_data *link;
lockdep_assert_held(&local->chanctx_mtx);
- list_for_each_entry(sdata, &ctx->reserved_vifs,
+ list_for_each_entry(link, &ctx->reserved_links,
reserved_chanctx_list) {
if (!compat)
- compat = &sdata->reserved_chandef;
+ compat = &link->reserved_chandef;
- compat = cfg80211_chandef_compatible(&sdata->reserved_chandef,
+ compat = cfg80211_chandef_compatible(&link->reserved_chandef,
compat);
if (!compat)
break;
@@ -108,20 +116,23 @@ ieee80211_chanctx_non_reserved_chandef(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx,
const struct cfg80211_chan_def *compat)
{
- struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_link_data *link;
lockdep_assert_held(&local->chanctx_mtx);
- list_for_each_entry(sdata, &ctx->assigned_vifs,
+ list_for_each_entry(link, &ctx->assigned_links,
assigned_chanctx_list) {
- if (sdata->reserved_chanctx != NULL)
+ struct ieee80211_bss_conf *link_conf =
+ link->sdata->vif.link_conf[link->link_id];
+
+ if (link->reserved_chanctx)
continue;
if (!compat)
- compat = &sdata->vif.bss_conf.chandef;
+ compat = &link_conf->chandef;
compat = cfg80211_chandef_compatible(
- &sdata->vif.bss_conf.chandef, compat);
+ &link_conf->chandef, compat);
if (!compat)
break;
}
@@ -157,7 +168,7 @@ ieee80211_chanctx_can_reserve_chandef(struct ieee80211_local *local,
if (ieee80211_chanctx_combined_chandef(local, ctx, def))
return true;
- if (!list_empty(&ctx->reserved_vifs) &&
+ if (!list_empty(&ctx->reserved_links) &&
ieee80211_chanctx_reserved_chandef(local, ctx, def))
return true;
@@ -193,13 +204,23 @@ ieee80211_find_reservation_chanctx(struct ieee80211_local *local,
return NULL;
}
-static enum nl80211_chan_width ieee80211_get_sta_bw(struct sta_info *sta)
+static enum nl80211_chan_width ieee80211_get_sta_bw(struct sta_info *sta,
+ unsigned int link_id)
{
- enum ieee80211_sta_rx_bandwidth width = ieee80211_sta_cap_rx_bw(sta);
+ enum ieee80211_sta_rx_bandwidth width;
+ struct link_sta_info *link_sta;
+
+ link_sta = rcu_dereference(sta->link[link_id]);
+
+ /* no effect if this STA has no presence on this link */
+ if (!link_sta)
+ return NL80211_CHAN_WIDTH_20_NOHT;
+
+ width = ieee80211_sta_cap_rx_bw(link_sta);
switch (width) {
case IEEE80211_STA_RX_BW_20:
- if (sta->sta.deflink.ht_cap.ht_supported)
+ if (link_sta->pub->ht_cap.ht_supported)
return NL80211_CHAN_WIDTH_20;
else
return NL80211_CHAN_WIDTH_20_NOHT;
@@ -227,7 +248,8 @@ static enum nl80211_chan_width ieee80211_get_sta_bw(struct sta_info *sta)
}
static enum nl80211_chan_width
-ieee80211_get_max_required_bw(struct ieee80211_sub_if_data *sdata)
+ieee80211_get_max_required_bw(struct ieee80211_sub_if_data *sdata,
+ unsigned int link_id)
{
enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT;
struct sta_info *sta;
@@ -238,7 +260,7 @@ ieee80211_get_max_required_bw(struct ieee80211_sub_if_data *sdata)
!(sta->sdata->bss && sta->sdata->bss == sdata->bss))
continue;
- max_bw = max(max_bw, ieee80211_get_sta_bw(sta));
+ max_bw = max(max_bw, ieee80211_get_sta_bw(sta, link_id));
}
rcu_read_unlock();
@@ -246,27 +268,28 @@ ieee80211_get_max_required_bw(struct ieee80211_sub_if_data *sdata)
}
static enum nl80211_chan_width
-ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
- struct ieee80211_chanctx_conf *conf)
+ieee80211_get_chanctx_vif_max_required_bw(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_chanctx_conf *conf)
{
- struct ieee80211_sub_if_data *sdata;
enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT;
+ struct ieee80211_vif *vif = &sdata->vif;
+ int link_id;
- rcu_read_lock();
- list_for_each_entry_rcu(sdata, &local->interfaces, list) {
- struct ieee80211_vif *vif = &sdata->vif;
+ for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) {
enum nl80211_chan_width width = NL80211_CHAN_WIDTH_20_NOHT;
+ struct ieee80211_bss_conf *link_conf =
+ sdata->vif.link_conf[link_id];
- if (!ieee80211_sdata_running(sdata))
+ if (!link_conf)
continue;
- if (rcu_access_pointer(sdata->vif.chanctx_conf) != conf)
+ if (rcu_access_pointer(link_conf->chanctx_conf) != conf)
continue;
switch (vif->type) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_AP_VLAN:
- width = ieee80211_get_max_required_bw(sdata);
+ width = ieee80211_get_max_required_bw(sdata, link_id);
break;
case NL80211_IFTYPE_STATION:
/*
@@ -274,8 +297,8 @@ ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
* point, so take the width from the chandef, but
* account also for TDLS peers
*/
- width = max(vif->bss_conf.chandef.width,
- ieee80211_get_max_required_bw(sdata));
+ width = max(link_conf->chandef.width,
+ ieee80211_get_max_required_bw(sdata, link_id));
break;
case NL80211_IFTYPE_P2P_DEVICE:
case NL80211_IFTYPE_NAN:
@@ -283,7 +306,7 @@ ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_OCB:
- width = vif->bss_conf.chandef.width;
+ width = link_conf->chandef.width;
break;
case NL80211_IFTYPE_WDS:
case NL80211_IFTYPE_UNSPECIFIED:
@@ -293,12 +316,36 @@ ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
case NL80211_IFTYPE_P2P_GO:
WARN_ON_ONCE(1);
}
+
+ max_bw = max(max_bw, width);
+ }
+
+ return max_bw;
+}
+
+static enum nl80211_chan_width
+ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
+ struct ieee80211_chanctx_conf *conf)
+{
+ struct ieee80211_sub_if_data *sdata;
+ enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+ enum nl80211_chan_width width;
+
+ if (!ieee80211_sdata_running(sdata))
+ continue;
+
+ width = ieee80211_get_chanctx_vif_max_required_bw(sdata, conf);
+
max_bw = max(max_bw, width);
}
/* use the configured bandwidth in case of monitor interface */
sdata = rcu_dereference(local->monitor_sdata);
- if (sdata && rcu_access_pointer(sdata->vif.chanctx_conf) == conf)
+ if (sdata &&
+ rcu_access_pointer(sdata->vif.link_conf[0]->chanctx_conf) == conf)
max_bw = max(max_bw, conf->def.width);
rcu_read_unlock();
@@ -350,7 +397,7 @@ static u32 _ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
}
/* calling this function is assuming that station vif is updated to
- * lates changes by calling ieee80211_vif_update_chandef
+ * lates changes by calling ieee80211_link_update_chandef
*/
static void ieee80211_chan_bw_change(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx,
@@ -363,29 +410,43 @@ static void ieee80211_chan_bw_change(struct ieee80211_local *local,
rcu_read_lock();
list_for_each_entry_rcu(sta, &local->sta_list,
list) {
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
enum ieee80211_sta_rx_bandwidth new_sta_bw;
+ unsigned int link_id;
if (!ieee80211_sdata_running(sta->sdata))
continue;
- if (rcu_access_pointer(sta->sdata->vif.chanctx_conf) !=
- &ctx->conf)
- continue;
+ for (link_id = 0; link_id < ARRAY_SIZE(sta->sdata->link); link_id++) {
+ struct ieee80211_bss_conf *link_conf =
+ sdata->vif.link_conf[link_id];
+ struct link_sta_info *link_sta;
- new_sta_bw = ieee80211_sta_cur_vht_bw(sta);
+ if (!link_conf)
+ continue;
- /* nothing change */
- if (new_sta_bw == sta->sta.deflink.bandwidth)
- continue;
+ if (rcu_access_pointer(link_conf->chanctx_conf) != &ctx->conf)
+ continue;
- /* vif changed to narrow BW and narrow BW for station wasn't
- * requested or vise versa */
- if ((new_sta_bw < sta->sta.deflink.bandwidth) == !narrowed)
- continue;
+ link_sta = rcu_dereference(sta->link[link_id]);
+ if (!link_sta)
+ continue;
+
+ new_sta_bw = ieee80211_sta_cur_vht_bw(link_sta);
+
+ /* nothing change */
+ if (new_sta_bw == link_sta->pub->bandwidth)
+ continue;
- sta->sta.deflink.bandwidth = new_sta_bw;
- rate_control_rate_update(local, sband, sta,
- IEEE80211_RC_BW_CHANGED);
+ /* vif changed to narrow BW and narrow BW for station wasn't
+ * requested or vise versa */
+ if ((new_sta_bw < link_sta->pub->bandwidth) == !narrowed)
+ continue;
+
+ link_sta->pub->bandwidth = new_sta_bw;
+ rate_control_rate_update(local, sband, sta, link_id,
+ IEEE80211_RC_BW_CHANGED);
+ }
}
rcu_read_unlock();
}
@@ -508,9 +569,14 @@ bool ieee80211_is_radar_required(struct ieee80211_local *local)
rcu_read_lock();
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
- if (sdata->radar_required) {
- rcu_read_unlock();
- return true;
+ unsigned int link_id;
+
+ for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) {
+ if (sdata->link[link_id] &&
+ sdata->link[link_id]->radar_required) {
+ rcu_read_unlock();
+ return true;
+ }
}
}
rcu_read_unlock();
@@ -531,15 +597,27 @@ ieee80211_chanctx_radar_required(struct ieee80211_local *local,
rcu_read_lock();
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+ unsigned int link_id;
+
if (!ieee80211_sdata_running(sdata))
continue;
- if (rcu_access_pointer(sdata->vif.chanctx_conf) != conf)
- continue;
- if (!sdata->radar_required)
- continue;
+ for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) {
+ struct ieee80211_bss_conf *link_conf =
+ sdata->vif.link_conf[link_id];
- required = true;
- break;
+ if (!link_conf)
+ continue;
+
+ if (rcu_access_pointer(link_conf->chanctx_conf) != conf)
+ continue;
+ if (!sdata->link[link_id]->radar_required)
+ continue;
+ required = true;
+ break;
+ }
+
+ if (required)
+ break;
}
rcu_read_unlock();
@@ -559,8 +637,8 @@ ieee80211_alloc_chanctx(struct ieee80211_local *local,
if (!ctx)
return NULL;
- INIT_LIST_HEAD(&ctx->assigned_vifs);
- INIT_LIST_HEAD(&ctx->reserved_vifs);
+ INIT_LIST_HEAD(&ctx->assigned_links);
+ INIT_LIST_HEAD(&ctx->reserved_links);
ctx->conf.def = *chandef;
ctx->conf.rx_chains_static = 1;
ctx->conf.rx_chains_dynamic = 1;
@@ -686,21 +764,32 @@ void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
rcu_read_lock();
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+ int link_id;
if (!ieee80211_sdata_running(sdata))
continue;
- if (rcu_access_pointer(sdata->vif.chanctx_conf) != conf)
- continue;
+
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
continue;
- if (!compat)
- compat = &sdata->vif.bss_conf.chandef;
+ for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) {
+ struct ieee80211_bss_conf *link_conf =
+ sdata->vif.link_conf[link_id];
- compat = cfg80211_chandef_compatible(
- &sdata->vif.bss_conf.chandef, compat);
- if (WARN_ON_ONCE(!compat))
- break;
+ if (!link_conf)
+ continue;
+
+ if (rcu_access_pointer(link_conf->chanctx_conf) != conf)
+ continue;
+
+ if (!compat)
+ compat = &link_conf->chandef;
+
+ compat = cfg80211_chandef_compatible(&link_conf->chandef,
+ compat);
+ if (WARN_ON_ONCE(!compat))
+ break;
+ }
}
/* TDLS peers can sometimes affect the chandef width */
@@ -748,9 +837,11 @@ static void ieee80211_recalc_radar_chanctx(struct ieee80211_local *local,
drv_change_chanctx(local, chanctx, IEEE80211_CHANCTX_CHANGE_RADAR);
}
-static int ieee80211_assign_vif_chanctx(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_chanctx *new_ctx)
+static int ieee80211_assign_link_chanctx(struct ieee80211_link_data *link,
+ struct ieee80211_chanctx *new_ctx)
{
+ struct ieee80211_sub_if_data *sdata = link->sdata;
+ unsigned int link_id = link->link_id;
struct ieee80211_local *local = sdata->local;
struct ieee80211_chanctx_conf *conf;
struct ieee80211_chanctx *curr_ctx = NULL;
@@ -759,31 +850,31 @@ static int ieee80211_assign_vif_chanctx(struct ieee80211_sub_if_data *sdata,
if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_NAN))
return -ENOTSUPP;
- conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
+ conf = rcu_dereference_protected(sdata->vif.link_conf[link_id]->chanctx_conf,
lockdep_is_held(&local->chanctx_mtx));
if (conf) {
curr_ctx = container_of(conf, struct ieee80211_chanctx, conf);
- drv_unassign_vif_chanctx(local, sdata, curr_ctx);
+ drv_unassign_vif_chanctx(local, sdata, link_id, curr_ctx);
conf = NULL;
- list_del(&sdata->assigned_chanctx_list);
+ list_del(&link->assigned_chanctx_list);
}
if (new_ctx) {
- ret = drv_assign_vif_chanctx(local, sdata, new_ctx);
+ ret = drv_assign_vif_chanctx(local, sdata, link_id, new_ctx);
if (ret)
goto out;
conf = &new_ctx->conf;
- list_add(&sdata->assigned_chanctx_list,
- &new_ctx->assigned_vifs);
+ list_add(&link->assigned_chanctx_list,
+ &new_ctx->assigned_links);
}
out:
- rcu_assign_pointer(sdata->vif.chanctx_conf, conf);
+ rcu_assign_pointer(sdata->vif.link_conf[link_id]->chanctx_conf, conf);
- sdata->vif.bss_conf.idle = !conf;
+ sdata->vif.cfg.idle = !conf;
if (curr_ctx && ieee80211_chanctx_num_assigned(local, curr_ctx) > 0) {
ieee80211_recalc_chanctx_chantype(local, curr_ctx);
@@ -799,8 +890,7 @@ out:
if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
sdata->vif.type != NL80211_IFTYPE_MONITOR)
- ieee80211_bss_info_change_notify(sdata,
- BSS_CHANGED_IDLE);
+ ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_IDLE);
ieee80211_check_fast_xmit_iface(sdata);
@@ -821,60 +911,64 @@ void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
rcu_read_lock();
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
u8 needed_static, needed_dynamic;
+ unsigned int link_id;
if (!ieee80211_sdata_running(sdata))
continue;
- if (rcu_access_pointer(sdata->vif.chanctx_conf) !=
- &chanctx->conf)
- continue;
-
switch (sdata->vif.type) {
- case NL80211_IFTYPE_P2P_DEVICE:
- case NL80211_IFTYPE_NAN:
- continue;
case NL80211_IFTYPE_STATION:
if (!sdata->u.mgd.associated)
continue;
break;
- case NL80211_IFTYPE_AP_VLAN:
- continue;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_OCB:
break;
default:
- WARN_ON_ONCE(1);
+ continue;
}
- switch (sdata->smps_mode) {
- default:
- WARN_ONCE(1, "Invalid SMPS mode %d\n",
- sdata->smps_mode);
- fallthrough;
- case IEEE80211_SMPS_OFF:
- needed_static = sdata->needed_rx_chains;
- needed_dynamic = sdata->needed_rx_chains;
- break;
- case IEEE80211_SMPS_DYNAMIC:
- needed_static = 1;
- needed_dynamic = sdata->needed_rx_chains;
- break;
- case IEEE80211_SMPS_STATIC:
- needed_static = 1;
- needed_dynamic = 1;
- break;
- }
+ for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) {
+ struct ieee80211_link_data *link = sdata->link[link_id];
+ struct ieee80211_bss_conf *link_conf =
+ sdata->vif.link_conf[link_id];
- rx_chains_static = max(rx_chains_static, needed_static);
- rx_chains_dynamic = max(rx_chains_dynamic, needed_dynamic);
+ if (!link_conf)
+ continue;
+
+ if (rcu_access_pointer(link_conf->chanctx_conf) != &chanctx->conf)
+ continue;
+
+ switch (link->smps_mode) {
+ default:
+ WARN_ONCE(1, "Invalid SMPS mode %d\n",
+ link->smps_mode);
+ fallthrough;
+ case IEEE80211_SMPS_OFF:
+ needed_static = link->needed_rx_chains;
+ needed_dynamic = link->needed_rx_chains;
+ break;
+ case IEEE80211_SMPS_DYNAMIC:
+ needed_static = 1;
+ needed_dynamic = link->needed_rx_chains;
+ break;
+ case IEEE80211_SMPS_STATIC:
+ needed_static = 1;
+ needed_dynamic = 1;
+ break;
+ }
+
+ rx_chains_static = max(rx_chains_static, needed_static);
+ rx_chains_dynamic = max(rx_chains_dynamic, needed_dynamic);
+ }
}
/* Disable SMPS for the monitor interface */
sdata = rcu_dereference(local->monitor_sdata);
if (sdata &&
- rcu_access_pointer(sdata->vif.chanctx_conf) == &chanctx->conf)
+ rcu_access_pointer(sdata->vif.link_conf[0]->chanctx_conf) == &chanctx->conf)
rx_chains_dynamic = rx_chains_static = local->rx_chains;
rcu_read_unlock();
@@ -899,9 +993,12 @@ void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
}
static void
-__ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
- bool clear)
+__ieee80211_link_copy_chanctx_to_vlans(struct ieee80211_link_data *link,
+ bool clear)
{
+ struct ieee80211_sub_if_data *sdata = link->sdata;
+ unsigned int link_id = link->link_id;
+ struct ieee80211_bss_conf *link_conf = sdata->vif.link_conf[link_id];
struct ieee80211_local *local __maybe_unused = sdata->local;
struct ieee80211_sub_if_data *vlan;
struct ieee80211_chanctx_conf *conf;
@@ -917,7 +1014,7 @@ __ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
* channel context pointer for a while, possibly pointing
* to a channel context that has already been freed.
*/
- conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
+ conf = rcu_dereference_protected(link_conf->chanctx_conf,
lockdep_is_held(&local->chanctx_mtx));
WARN_ON(!conf);
@@ -925,32 +1022,34 @@ __ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
conf = NULL;
list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
- rcu_assign_pointer(vlan->vif.chanctx_conf, conf);
+ rcu_assign_pointer(vlan->vif.link_conf[link_id]->chanctx_conf,
+ conf);
}
-void ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
- bool clear)
+void ieee80211_link_copy_chanctx_to_vlans(struct ieee80211_link_data *link,
+ bool clear)
{
- struct ieee80211_local *local = sdata->local;
+ struct ieee80211_local *local = link->sdata->local;
mutex_lock(&local->chanctx_mtx);
- __ieee80211_vif_copy_chanctx_to_vlans(sdata, clear);
+ __ieee80211_link_copy_chanctx_to_vlans(link, clear);
mutex_unlock(&local->chanctx_mtx);
}
-int ieee80211_vif_unreserve_chanctx(struct ieee80211_sub_if_data *sdata)
+int ieee80211_link_unreserve_chanctx(struct ieee80211_link_data *link)
{
- struct ieee80211_chanctx *ctx = sdata->reserved_chanctx;
+ struct ieee80211_sub_if_data *sdata = link->sdata;
+ struct ieee80211_chanctx *ctx = link->reserved_chanctx;
lockdep_assert_held(&sdata->local->chanctx_mtx);
if (WARN_ON(!ctx))
return -EINVAL;
- list_del(&sdata->reserved_chanctx_list);
- sdata->reserved_chanctx = NULL;
+ list_del(&link->reserved_chanctx_list);
+ link->reserved_chanctx = NULL;
if (ieee80211_chanctx_refcount(sdata->local, ctx) == 0) {
if (ctx->replace_state == IEEE80211_CHANCTX_REPLACES_OTHER) {
@@ -975,17 +1074,18 @@ int ieee80211_vif_unreserve_chanctx(struct ieee80211_sub_if_data *sdata)
return 0;
}
-int ieee80211_vif_reserve_chanctx(struct ieee80211_sub_if_data *sdata,
- const struct cfg80211_chan_def *chandef,
- enum ieee80211_chanctx_mode mode,
- bool radar_required)
+int ieee80211_link_reserve_chanctx(struct ieee80211_link_data *link,
+ const struct cfg80211_chan_def *chandef,
+ enum ieee80211_chanctx_mode mode,
+ bool radar_required)
{
+ struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_local *local = sdata->local;
struct ieee80211_chanctx *new_ctx, *curr_ctx, *ctx;
lockdep_assert_held(&local->chanctx_mtx);
- curr_ctx = ieee80211_vif_get_chanctx(sdata);
+ curr_ctx = ieee80211_link_get_chanctx(link);
if (curr_ctx && local->use_chanctx && !local->ops->switch_vif_chanctx)
return -ENOTSUPP;
@@ -999,11 +1099,11 @@ int ieee80211_vif_reserve_chanctx(struct ieee80211_sub_if_data *sdata,
if (!curr_ctx ||
(curr_ctx->replace_state ==
IEEE80211_CHANCTX_WILL_BE_REPLACED) ||
- !list_empty(&curr_ctx->reserved_vifs)) {
+ !list_empty(&curr_ctx->reserved_links)) {
/*
- * Another vif already requested this context
+ * Another link already requested this context
* for a reservation. Find another one hoping
- * all vifs assigned to it will also switch
+ * all links assigned to it will also switch
* soon enough.
*
* TODO: This needs a little more work as some
@@ -1012,13 +1112,13 @@ int ieee80211_vif_reserve_chanctx(struct ieee80211_sub_if_data *sdata,
* provided some channel context juggling was
* performed.
*
- * Consider ctx1..3, vif1..6, each ctx has 2
- * vifs. vif1 and vif2 from ctx1 request new
+ * Consider ctx1..3, link1..6, each ctx has 2
+ * links. link1 and link2 from ctx1 request new
* different chandefs starting 2 in-place
* reserations with ctx4 and ctx5 replacing
- * ctx1 and ctx2 respectively. Next vif5 and
- * vif6 from ctx3 reserve ctx4. If vif3 and
- * vif4 remain on ctx2 as they are then this
+ * ctx1 and ctx2 respectively. Next link5 and
+ * link6 from ctx3 reserve ctx4. If link3 and
+ * link4 remain on ctx2 as they are then this
* fails unless `replace_ctx` from ctx5 is
* replaced with ctx3.
*/
@@ -1028,7 +1128,7 @@ int ieee80211_vif_reserve_chanctx(struct ieee80211_sub_if_data *sdata,
IEEE80211_CHANCTX_REPLACE_NONE)
continue;
- if (!list_empty(&ctx->reserved_vifs))
+ if (!list_empty(&ctx->reserved_links))
continue;
curr_ctx = ctx;
@@ -1043,7 +1143,7 @@ int ieee80211_vif_reserve_chanctx(struct ieee80211_sub_if_data *sdata,
if (!curr_ctx ||
(curr_ctx->replace_state ==
IEEE80211_CHANCTX_WILL_BE_REPLACED) ||
- !list_empty(&curr_ctx->reserved_vifs))
+ !list_empty(&curr_ctx->reserved_links))
return -EBUSY;
new_ctx = ieee80211_alloc_chanctx(local, chandef, mode);
@@ -1062,25 +1162,27 @@ int ieee80211_vif_reserve_chanctx(struct ieee80211_sub_if_data *sdata,
}
}
- list_add(&sdata->reserved_chanctx_list, &new_ctx->reserved_vifs);
- sdata->reserved_chanctx = new_ctx;
- sdata->reserved_chandef = *chandef;
- sdata->reserved_radar_required = radar_required;
- sdata->reserved_ready = false;
+ list_add(&link->reserved_chanctx_list, &new_ctx->reserved_links);
+ link->reserved_chanctx = new_ctx;
+ link->reserved_chandef = *chandef;
+ link->reserved_radar_required = radar_required;
+ link->reserved_ready = false;
return 0;
}
static void
-ieee80211_vif_chanctx_reservation_complete(struct ieee80211_sub_if_data *sdata)
+ieee80211_link_chanctx_reservation_complete(struct ieee80211_link_data *link)
{
+ struct ieee80211_sub_if_data *sdata = link->sdata;
+
switch (sdata->vif.type) {
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_OCB:
ieee80211_queue_work(&sdata->local->hw,
- &sdata->csa_finalize_work);
+ &link->csa_finalize_work);
break;
case NL80211_IFTYPE_STATION:
ieee80211_queue_work(&sdata->local->hw,
@@ -1101,23 +1203,28 @@ ieee80211_vif_chanctx_reservation_complete(struct ieee80211_sub_if_data *sdata)
}
static void
-ieee80211_vif_update_chandef(struct ieee80211_sub_if_data *sdata,
- const struct cfg80211_chan_def *chandef)
+ieee80211_link_update_chandef(struct ieee80211_link_data *link,
+ const struct cfg80211_chan_def *chandef)
{
+ struct ieee80211_sub_if_data *sdata = link->sdata;
+ unsigned int link_id = link->link_id;
struct ieee80211_sub_if_data *vlan;
- sdata->vif.bss_conf.chandef = *chandef;
+ sdata->vif.link_conf[link_id]->chandef = *chandef;
if (sdata->vif.type != NL80211_IFTYPE_AP)
return;
list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
- vlan->vif.bss_conf.chandef = *chandef;
+ vlan->vif.link_conf[link_id]->chandef = *chandef;
}
static int
-ieee80211_vif_use_reserved_reassign(struct ieee80211_sub_if_data *sdata)
+ieee80211_link_use_reserved_reassign(struct ieee80211_link_data *link)
{
+ struct ieee80211_sub_if_data *sdata = link->sdata;
+ unsigned int link_id = link->link_id;
+ struct ieee80211_bss_conf *link_conf = sdata->vif.link_conf[link_id];
struct ieee80211_local *local = sdata->local;
struct ieee80211_vif_chanctx_switch vif_chsw[1] = {};
struct ieee80211_chanctx *old_ctx, *new_ctx;
@@ -1128,10 +1235,10 @@ ieee80211_vif_use_reserved_reassign(struct ieee80211_sub_if_data *sdata)
lockdep_assert_held(&local->mtx);
lockdep_assert_held(&local->chanctx_mtx);
- new_ctx = sdata->reserved_chanctx;
- old_ctx = ieee80211_vif_get_chanctx(sdata);
+ new_ctx = link->reserved_chanctx;
+ old_ctx = ieee80211_link_get_chanctx(link);
- if (WARN_ON(!sdata->reserved_ready))
+ if (WARN_ON(!link->reserved_ready))
return -EBUSY;
if (WARN_ON(!new_ctx))
@@ -1145,23 +1252,24 @@ ieee80211_vif_use_reserved_reassign(struct ieee80211_sub_if_data *sdata)
return -EINVAL;
chandef = ieee80211_chanctx_non_reserved_chandef(local, new_ctx,
- &sdata->reserved_chandef);
+ &link->reserved_chandef);
if (WARN_ON(!chandef))
return -EINVAL;
- if (sdata->vif.bss_conf.chandef.width != sdata->reserved_chandef.width)
+ if (link_conf->chandef.width != link->reserved_chandef.width)
changed = BSS_CHANGED_BANDWIDTH;
- ieee80211_vif_update_chandef(sdata, &sdata->reserved_chandef);
+ ieee80211_link_update_chandef(link, &link->reserved_chandef);
ieee80211_change_chanctx(local, new_ctx, old_ctx, chandef);
vif_chsw[0].vif = &sdata->vif;
vif_chsw[0].old_ctx = &old_ctx->conf;
vif_chsw[0].new_ctx = &new_ctx->conf;
+ vif_chsw[0].link_id = link->link_id;
- list_del(&sdata->reserved_chanctx_list);
- sdata->reserved_chanctx = NULL;
+ list_del(&link->reserved_chanctx_list);
+ link->reserved_chanctx = NULL;
err = drv_switch_vif_chanctx(local, vif_chsw, 1,
CHANCTX_SWMODE_REASSIGN_VIF);
@@ -1172,11 +1280,11 @@ ieee80211_vif_use_reserved_reassign(struct ieee80211_sub_if_data *sdata)
goto out;
}
- list_move(&sdata->assigned_chanctx_list, &new_ctx->assigned_vifs);
- rcu_assign_pointer(sdata->vif.chanctx_conf, &new_ctx->conf);
+ list_move(&link->assigned_chanctx_list, &new_ctx->assigned_links);
+ rcu_assign_pointer(link_conf->chanctx_conf, &new_ctx->conf);
if (sdata->vif.type == NL80211_IFTYPE_AP)
- __ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
+ __ieee80211_link_copy_chanctx_to_vlans(link, false);
ieee80211_check_fast_xmit_iface(sdata);
@@ -1188,25 +1296,27 @@ ieee80211_vif_use_reserved_reassign(struct ieee80211_sub_if_data *sdata)
ieee80211_recalc_radar_chanctx(local, new_ctx);
if (changed)
- ieee80211_bss_info_change_notify(sdata, changed);
+ ieee80211_link_info_change_notify(sdata, link_id, changed);
out:
- ieee80211_vif_chanctx_reservation_complete(sdata);
+ ieee80211_link_chanctx_reservation_complete(link);
return err;
}
static int
-ieee80211_vif_use_reserved_assign(struct ieee80211_sub_if_data *sdata)
+ieee80211_link_use_reserved_assign(struct ieee80211_link_data *link)
{
+ struct ieee80211_sub_if_data *sdata = link->sdata;
+ unsigned int link_id = link->link_id;
struct ieee80211_local *local = sdata->local;
struct ieee80211_chanctx *old_ctx, *new_ctx;
const struct cfg80211_chan_def *chandef;
int err;
- old_ctx = ieee80211_vif_get_chanctx(sdata);
- new_ctx = sdata->reserved_chanctx;
+ old_ctx = ieee80211_vif_get_chanctx(sdata, link_id);
+ new_ctx = link->reserved_chanctx;
- if (WARN_ON(!sdata->reserved_ready))
+ if (WARN_ON(!link->reserved_ready))
return -EINVAL;
if (WARN_ON(old_ctx))
@@ -1220,16 +1330,16 @@ ieee80211_vif_use_reserved_assign(struct ieee80211_sub_if_data *sdata)
return -EINVAL;
chandef = ieee80211_chanctx_non_reserved_chandef(local, new_ctx,
- &sdata->reserved_chandef);
+ &link->reserved_chandef);
if (WARN_ON(!chandef))
return -EINVAL;
ieee80211_change_chanctx(local, new_ctx, new_ctx, chandef);
- list_del(&sdata->reserved_chanctx_list);
- sdata->reserved_chanctx = NULL;
+ list_del(&link->reserved_chanctx_list);
+ link->reserved_chanctx = NULL;
- err = ieee80211_assign_vif_chanctx(sdata, new_ctx);
+ err = ieee80211_assign_link_chanctx(link, new_ctx);
if (err) {
if (ieee80211_chanctx_refcount(local, new_ctx) == 0)
ieee80211_free_chanctx(local, new_ctx);
@@ -1238,19 +1348,20 @@ ieee80211_vif_use_reserved_assign(struct ieee80211_sub_if_data *sdata)
}
out:
- ieee80211_vif_chanctx_reservation_complete(sdata);
+ ieee80211_link_chanctx_reservation_complete(link);
return err;
}
static bool
-ieee80211_vif_has_in_place_reservation(struct ieee80211_sub_if_data *sdata)
+ieee80211_link_has_in_place_reservation(struct ieee80211_link_data *link)
{
+ struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_chanctx *old_ctx, *new_ctx;
lockdep_assert_held(&sdata->local->chanctx_mtx);
- new_ctx = sdata->reserved_chanctx;
- old_ctx = ieee80211_vif_get_chanctx(sdata);
+ new_ctx = link->reserved_chanctx;
+ old_ctx = ieee80211_link_get_chanctx(link);
if (!old_ctx)
return false;
@@ -1290,7 +1401,7 @@ static int ieee80211_chsw_switch_vifs(struct ieee80211_local *local,
int n_vifs)
{
struct ieee80211_vif_chanctx_switch *vif_chsw;
- struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_link_data *link;
struct ieee80211_chanctx *ctx, *old_ctx;
int i, err;
@@ -1311,16 +1422,16 @@ static int ieee80211_chsw_switch_vifs(struct ieee80211_local *local,
goto out;
}
- list_for_each_entry(sdata, &ctx->reserved_vifs,
+ list_for_each_entry(link, &ctx->reserved_links,
reserved_chanctx_list) {
- if (!ieee80211_vif_has_in_place_reservation(
- sdata))
+ if (!ieee80211_link_has_in_place_reservation(link))
continue;
- old_ctx = ieee80211_vif_get_chanctx(sdata);
- vif_chsw[i].vif = &sdata->vif;
+ old_ctx = ieee80211_link_get_chanctx(link);
+ vif_chsw[i].vif = &link->sdata->vif;
vif_chsw[i].old_ctx = &old_ctx->conf;
vif_chsw[i].new_ctx = &ctx->conf;
+ vif_chsw[i].link_id = link->link_id;
i++;
}
@@ -1346,7 +1457,7 @@ static int ieee80211_chsw_switch_ctxs(struct ieee80211_local *local)
if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER)
continue;
- if (!list_empty(&ctx->replace_ctx->assigned_vifs))
+ if (!list_empty(&ctx->replace_ctx->assigned_links))
continue;
ieee80211_del_chanctx(local, ctx->replace_ctx);
@@ -1363,7 +1474,7 @@ err:
if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER)
continue;
- if (!list_empty(&ctx->replace_ctx->assigned_vifs))
+ if (!list_empty(&ctx->replace_ctx->assigned_links))
continue;
ieee80211_del_chanctx(local, ctx);
@@ -1375,7 +1486,6 @@ err:
static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
{
- struct ieee80211_sub_if_data *sdata, *sdata_tmp;
struct ieee80211_chanctx *ctx, *ctx_tmp, *old_ctx;
struct ieee80211_chanctx *new_ctx = NULL;
int err, n_assigned, n_reserved, n_ready;
@@ -1401,6 +1511,8 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
*/
list_for_each_entry(ctx, &local->chanctx_list, list) {
+ struct ieee80211_link_data *link;
+
if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER)
continue;
@@ -1418,12 +1530,12 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
n_reserved = 0;
n_ready = 0;
- list_for_each_entry(sdata, &ctx->replace_ctx->assigned_vifs,
+ list_for_each_entry(link, &ctx->replace_ctx->assigned_links,
assigned_chanctx_list) {
n_assigned++;
- if (sdata->reserved_chanctx) {
+ if (link->reserved_chanctx) {
n_reserved++;
- if (sdata->reserved_ready)
+ if (link->reserved_ready)
n_ready++;
}
}
@@ -1440,13 +1552,13 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
}
ctx->conf.radar_enabled = false;
- list_for_each_entry(sdata, &ctx->reserved_vifs,
+ list_for_each_entry(link, &ctx->reserved_links,
reserved_chanctx_list) {
- if (ieee80211_vif_has_in_place_reservation(sdata) &&
- !sdata->reserved_ready)
+ if (ieee80211_link_has_in_place_reservation(link) &&
+ !link->reserved_ready)
return -EAGAIN;
- old_ctx = ieee80211_vif_get_chanctx(sdata);
+ old_ctx = ieee80211_link_get_chanctx(link);
if (old_ctx) {
if (old_ctx->replace_state ==
IEEE80211_CHANCTX_WILL_BE_REPLACED)
@@ -1457,7 +1569,7 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
n_vifs_ctxless++;
}
- if (sdata->reserved_radar_required)
+ if (link->reserved_radar_required)
ctx->conf.radar_enabled = true;
}
}
@@ -1500,6 +1612,8 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
* context(s).
*/
list_for_each_entry(ctx, &local->chanctx_list, list) {
+ struct ieee80211_link_data *link, *link_tmp;
+
if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER)
continue;
@@ -1508,31 +1622,35 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
goto err;
}
- list_for_each_entry(sdata, &ctx->reserved_vifs,
+ list_for_each_entry(link, &ctx->reserved_links,
reserved_chanctx_list) {
+ struct ieee80211_sub_if_data *sdata = link->sdata;
+ struct ieee80211_bss_conf *link_conf =
+ sdata->vif.link_conf[link->link_id];
u32 changed = 0;
- if (!ieee80211_vif_has_in_place_reservation(sdata))
+ if (!ieee80211_link_has_in_place_reservation(link))
continue;
- rcu_assign_pointer(sdata->vif.chanctx_conf, &ctx->conf);
+ rcu_assign_pointer(link_conf->chanctx_conf,
+ &ctx->conf);
if (sdata->vif.type == NL80211_IFTYPE_AP)
- __ieee80211_vif_copy_chanctx_to_vlans(sdata,
- false);
+ __ieee80211_link_copy_chanctx_to_vlans(link,
+ false);
ieee80211_check_fast_xmit_iface(sdata);
- sdata->radar_required = sdata->reserved_radar_required;
+ link->radar_required = link->reserved_radar_required;
- if (sdata->vif.bss_conf.chandef.width !=
- sdata->reserved_chandef.width)
+ if (link_conf->chandef.width != link->reserved_chandef.width)
changed = BSS_CHANGED_BANDWIDTH;
- ieee80211_vif_update_chandef(sdata, &sdata->reserved_chandef);
+ ieee80211_link_update_chandef(link, &link->reserved_chandef);
if (changed)
- ieee80211_bss_info_change_notify(sdata,
- changed);
+ ieee80211_link_info_change_notify(sdata,
+ link->link_id,
+ changed);
ieee80211_recalc_txpower(sdata, false);
}
@@ -1542,17 +1660,17 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
ieee80211_recalc_radar_chanctx(local, ctx);
ieee80211_recalc_chanctx_min_def(local, ctx);
- list_for_each_entry_safe(sdata, sdata_tmp, &ctx->reserved_vifs,
+ list_for_each_entry_safe(link, link_tmp, &ctx->reserved_links,
reserved_chanctx_list) {
- if (ieee80211_vif_get_chanctx(sdata) != ctx)
+ if (ieee80211_link_get_chanctx(link) != ctx)
continue;
- list_del(&sdata->reserved_chanctx_list);
- list_move(&sdata->assigned_chanctx_list,
- &ctx->assigned_vifs);
- sdata->reserved_chanctx = NULL;
+ list_del(&link->reserved_chanctx_list);
+ list_move(&link->assigned_chanctx_list,
+ &ctx->assigned_links);
+ link->reserved_chanctx = NULL;
- ieee80211_vif_chanctx_reservation_complete(sdata);
+ ieee80211_link_chanctx_reservation_complete(link);
}
/*
@@ -1562,31 +1680,29 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
* reservation for originally requested interface has already
* succeeded at this point.
*/
- list_for_each_entry_safe(sdata, sdata_tmp, &ctx->reserved_vifs,
+ list_for_each_entry_safe(link, link_tmp, &ctx->reserved_links,
reserved_chanctx_list) {
- if (WARN_ON(ieee80211_vif_has_in_place_reservation(
- sdata)))
+ if (WARN_ON(ieee80211_link_has_in_place_reservation(link)))
continue;
- if (WARN_ON(sdata->reserved_chanctx != ctx))
+ if (WARN_ON(link->reserved_chanctx != ctx))
continue;
- if (!sdata->reserved_ready)
+ if (!link->reserved_ready)
continue;
- if (ieee80211_vif_get_chanctx(sdata))
- err = ieee80211_vif_use_reserved_reassign(
- sdata);
+ if (ieee80211_link_get_chanctx(link))
+ err = ieee80211_link_use_reserved_reassign(link);
else
- err = ieee80211_vif_use_reserved_assign(sdata);
+ err = ieee80211_link_use_reserved_assign(link);
if (err) {
- sdata_info(sdata,
- "failed to finalize (re-)assign reservation (err=%d)\n",
- err);
- ieee80211_vif_unreserve_chanctx(sdata);
+ link_info(link,
+ "failed to finalize (re-)assign reservation (err=%d)\n",
+ err);
+ ieee80211_link_unreserve_chanctx(link);
cfg80211_stop_iface(local->hw.wiphy,
- &sdata->wdev,
+ &link->sdata->wdev,
GFP_KERNEL);
}
}
@@ -1612,21 +1728,26 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
err:
list_for_each_entry(ctx, &local->chanctx_list, list) {
+ struct ieee80211_link_data *link, *link_tmp;
+
if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER)
continue;
- list_for_each_entry_safe(sdata, sdata_tmp, &ctx->reserved_vifs,
+ list_for_each_entry_safe(link, link_tmp, &ctx->reserved_links,
reserved_chanctx_list) {
- ieee80211_vif_unreserve_chanctx(sdata);
- ieee80211_vif_chanctx_reservation_complete(sdata);
+ ieee80211_link_unreserve_chanctx(link);
+ ieee80211_link_chanctx_reservation_complete(link);
}
}
return err;
}
-static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
+static void __ieee80211_link_release_channel(struct ieee80211_link_data *link)
{
+ struct ieee80211_sub_if_data *sdata = link->sdata;
+ unsigned int link_id = link->link_id;
+ struct ieee80211_bss_conf *link_conf = sdata->vif.link_conf[link_id];
struct ieee80211_local *local = sdata->local;
struct ieee80211_chanctx_conf *conf;
struct ieee80211_chanctx *ctx;
@@ -1634,38 +1755,38 @@ static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
lockdep_assert_held(&local->chanctx_mtx);
- conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
+ conf = rcu_dereference_protected(link_conf->chanctx_conf,
lockdep_is_held(&local->chanctx_mtx));
if (!conf)
return;
ctx = container_of(conf, struct ieee80211_chanctx, conf);
- if (sdata->reserved_chanctx) {
- if (sdata->reserved_chanctx->replace_state ==
- IEEE80211_CHANCTX_REPLACES_OTHER &&
- ieee80211_chanctx_num_reserved(local,
- sdata->reserved_chanctx) > 1)
+ if (link->reserved_chanctx) {
+ if (link->reserved_chanctx->replace_state == IEEE80211_CHANCTX_REPLACES_OTHER &&
+ ieee80211_chanctx_num_reserved(local, link->reserved_chanctx) > 1)
use_reserved_switch = true;
- ieee80211_vif_unreserve_chanctx(sdata);
+ ieee80211_link_unreserve_chanctx(link);
}
- ieee80211_assign_vif_chanctx(sdata, NULL);
+ ieee80211_assign_link_chanctx(link, NULL);
if (ieee80211_chanctx_refcount(local, ctx) == 0)
ieee80211_free_chanctx(local, ctx);
- sdata->radar_required = false;
+ link->radar_required = false;
/* Unreserving may ready an in-place reservation. */
if (use_reserved_switch)
ieee80211_vif_use_reserved_switch(local);
}
-int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
- const struct cfg80211_chan_def *chandef,
- enum ieee80211_chanctx_mode mode)
+int ieee80211_link_use_channel(struct ieee80211_link_data *link,
+ const struct cfg80211_chan_def *chandef,
+ enum ieee80211_chanctx_mode mode)
{
+ struct ieee80211_sub_if_data *sdata = link->sdata;
+ unsigned int link_id = link->link_id;
struct ieee80211_local *local = sdata->local;
struct ieee80211_chanctx *ctx;
u8 radar_detect_width = 0;
@@ -1685,14 +1806,14 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
if (ret > 0)
radar_detect_width = BIT(chandef->width);
- sdata->radar_required = ret;
+ sdata->link[link_id]->radar_required = ret;
ret = ieee80211_check_combinations(sdata, chandef, mode,
radar_detect_width);
if (ret < 0)
goto out;
- __ieee80211_vif_release_channel(sdata);
+ __ieee80211_link_release_channel(link);
ctx = ieee80211_find_chanctx(local, chandef, mode);
if (!ctx)
@@ -1702,9 +1823,9 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
goto out;
}
- ieee80211_vif_update_chandef(sdata, chandef);
+ ieee80211_link_update_chandef(link, chandef);
- ret = ieee80211_assign_vif_chanctx(sdata, ctx);
+ ret = ieee80211_assign_link_chanctx(link, ctx);
if (ret) {
/* if assign fails refcount stays the same */
if (ieee80211_chanctx_refcount(local, ctx) == 0)
@@ -1716,14 +1837,15 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
ieee80211_recalc_radar_chanctx(local, ctx);
out:
if (ret)
- sdata->radar_required = false;
+ link->radar_required = false;
mutex_unlock(&local->chanctx_mtx);
return ret;
}
-int ieee80211_vif_use_reserved_context(struct ieee80211_sub_if_data *sdata)
+int ieee80211_link_use_reserved_context(struct ieee80211_link_data *link)
{
+ struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_local *local = sdata->local;
struct ieee80211_chanctx *new_ctx;
struct ieee80211_chanctx *old_ctx;
@@ -1732,8 +1854,8 @@ int ieee80211_vif_use_reserved_context(struct ieee80211_sub_if_data *sdata)
lockdep_assert_held(&local->mtx);
lockdep_assert_held(&local->chanctx_mtx);
- new_ctx = sdata->reserved_chanctx;
- old_ctx = ieee80211_vif_get_chanctx(sdata);
+ new_ctx = link->reserved_chanctx;
+ old_ctx = ieee80211_link_get_chanctx(link);
if (WARN_ON(!new_ctx))
return -EINVAL;
@@ -1742,16 +1864,16 @@ int ieee80211_vif_use_reserved_context(struct ieee80211_sub_if_data *sdata)
IEEE80211_CHANCTX_WILL_BE_REPLACED))
return -EINVAL;
- if (WARN_ON(sdata->reserved_ready))
+ if (WARN_ON(link->reserved_ready))
return -EINVAL;
- sdata->reserved_ready = true;
+ link->reserved_ready = true;
if (new_ctx->replace_state == IEEE80211_CHANCTX_REPLACE_NONE) {
if (old_ctx)
- return ieee80211_vif_use_reserved_reassign(sdata);
+ return ieee80211_link_use_reserved_reassign(link);
- return ieee80211_vif_use_reserved_assign(sdata);
+ return ieee80211_link_use_reserved_assign(link);
}
/*
@@ -1783,10 +1905,13 @@ int ieee80211_vif_use_reserved_context(struct ieee80211_sub_if_data *sdata)
return 0;
}
-int ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata,
- const struct cfg80211_chan_def *chandef,
- u32 *changed)
+int ieee80211_link_change_bandwidth(struct ieee80211_link_data *link,
+ const struct cfg80211_chan_def *chandef,
+ u32 *changed)
{
+ struct ieee80211_sub_if_data *sdata = link->sdata;
+ unsigned int link_id = link->link_id;
+ struct ieee80211_bss_conf *link_conf = sdata->vif.link_conf[link_id];
struct ieee80211_local *local = sdata->local;
struct ieee80211_chanctx_conf *conf;
struct ieee80211_chanctx *ctx;
@@ -1798,18 +1923,18 @@ int ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata,
return -EINVAL;
mutex_lock(&local->chanctx_mtx);
- if (cfg80211_chandef_identical(chandef, &sdata->vif.bss_conf.chandef)) {
+ if (cfg80211_chandef_identical(chandef, &link_conf->chandef)) {
ret = 0;
goto out;
}
if (chandef->width == NL80211_CHAN_WIDTH_20_NOHT ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT) {
+ link_conf->chandef.width == NL80211_CHAN_WIDTH_20_NOHT) {
ret = -EINVAL;
goto out;
}
- conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
+ conf = rcu_dereference_protected(link_conf->chanctx_conf,
lockdep_is_held(&local->chanctx_mtx));
if (!conf) {
ret = -EINVAL;
@@ -1844,7 +1969,7 @@ int ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata,
break;
}
- ieee80211_vif_update_chandef(sdata, chandef);
+ ieee80211_link_update_chandef(link, chandef);
ieee80211_recalc_chanctx_chantype(local, ctx);
@@ -1855,19 +1980,24 @@ int ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata,
return ret;
}
-void ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
+void ieee80211_link_release_channel(struct ieee80211_link_data *link)
{
+ struct ieee80211_sub_if_data *sdata = link->sdata;
+
WARN_ON(sdata->dev && netif_carrier_ok(sdata->dev));
lockdep_assert_held(&sdata->local->mtx);
mutex_lock(&sdata->local->chanctx_mtx);
- __ieee80211_vif_release_channel(sdata);
+ __ieee80211_link_release_channel(link);
mutex_unlock(&sdata->local->chanctx_mtx);
}
-void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata)
+void ieee80211_link_vlan_copy_chanctx(struct ieee80211_link_data *link)
{
+ struct ieee80211_sub_if_data *sdata = link->sdata;
+ unsigned int link_id = link->link_id;
+ struct ieee80211_bss_conf *link_conf = sdata->vif.link_conf[link_id];
struct ieee80211_local *local = sdata->local;
struct ieee80211_sub_if_data *ap;
struct ieee80211_chanctx_conf *conf;
@@ -1879,9 +2009,9 @@ void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata)
mutex_lock(&local->chanctx_mtx);
- conf = rcu_dereference_protected(ap->vif.chanctx_conf,
+ conf = rcu_dereference_protected(ap->vif.link_conf[link_id]->chanctx_conf,
lockdep_is_held(&local->chanctx_mtx));
- rcu_assign_pointer(sdata->vif.chanctx_conf, conf);
+ rcu_assign_pointer(link_conf->chanctx_conf, conf);
mutex_unlock(&local->chanctx_mtx);
}
diff --git a/net/mac80211/debug.h b/net/mac80211/debug.h
index d90a8f9cc3fd..3302e8da0314 100644
--- a/net/mac80211/debug.h
+++ b/net/mac80211/debug.h
@@ -1,4 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Portions
+ * Copyright (C) 2022 Intel Corporation
+ */
#ifndef __MAC80211_DEBUG_H
#define __MAC80211_DEBUG_H
#include <net/cfg80211.h>
@@ -130,6 +134,16 @@ do { \
#define sdata_dbg(sdata, fmt, ...) \
_sdata_dbg(1, sdata, fmt, ##__VA_ARGS__)
+#define link_info(link, fmt, ...) \
+ _sdata_info((link)->sdata, "[link %d] " fmt, (link)->link_id, \
+ ##__VA_ARGS__)
+#define link_err(link, fmt, ...) \
+ _sdata_err((link)->sdata, "[link %d] " fmt, (link)->link_id, \
+ ##__VA_ARGS__)
+#define link_dbg(link, fmt, ...) \
+ _sdata_dbg(1, (link)->sdata, "[link %d] " fmt, (link)->link_id, \
+ ##__VA_ARGS__)
+
#define ht_dbg(sdata, fmt, ...) \
_sdata_dbg(MAC80211_HT_DEBUG, \
sdata, fmt, ##__VA_ARGS__)
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 1fe43b264d75..4d4341249759 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -201,6 +201,36 @@ static const struct file_operations airtime_flags_ops = {
.llseek = default_llseek,
};
+static ssize_t aql_pending_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_local *local = file->private_data;
+ char buf[400];
+ int len = 0;
+
+ len = scnprintf(buf, sizeof(buf),
+ "AC AQL pending\n"
+ "VO %u us\n"
+ "VI %u us\n"
+ "BE %u us\n"
+ "BK %u us\n"
+ "total %u us\n",
+ atomic_read(&local->aql_ac_pending_airtime[IEEE80211_AC_VO]),
+ atomic_read(&local->aql_ac_pending_airtime[IEEE80211_AC_VI]),
+ atomic_read(&local->aql_ac_pending_airtime[IEEE80211_AC_BE]),
+ atomic_read(&local->aql_ac_pending_airtime[IEEE80211_AC_BK]),
+ atomic_read(&local->aql_total_pending_airtime));
+ return simple_read_from_buffer(user_buf, count, ppos,
+ buf, len);
+}
+
+static const struct file_operations aql_pending_ops = {
+ .read = aql_pending_read,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
static ssize_t aql_txq_limit_read(struct file *file,
char __user *user_buf,
size_t count,
@@ -216,14 +246,14 @@ static ssize_t aql_txq_limit_read(struct file *file,
"VI %u %u\n"
"BE %u %u\n"
"BK %u %u\n",
- local->airtime[IEEE80211_AC_VO].aql_txq_limit_low,
- local->airtime[IEEE80211_AC_VO].aql_txq_limit_high,
- local->airtime[IEEE80211_AC_VI].aql_txq_limit_low,
- local->airtime[IEEE80211_AC_VI].aql_txq_limit_high,
- local->airtime[IEEE80211_AC_BE].aql_txq_limit_low,
- local->airtime[IEEE80211_AC_BE].aql_txq_limit_high,
- local->airtime[IEEE80211_AC_BK].aql_txq_limit_low,
- local->airtime[IEEE80211_AC_BK].aql_txq_limit_high);
+ local->aql_txq_limit_low[IEEE80211_AC_VO],
+ local->aql_txq_limit_high[IEEE80211_AC_VO],
+ local->aql_txq_limit_low[IEEE80211_AC_VI],
+ local->aql_txq_limit_high[IEEE80211_AC_VI],
+ local->aql_txq_limit_low[IEEE80211_AC_BE],
+ local->aql_txq_limit_high[IEEE80211_AC_BE],
+ local->aql_txq_limit_low[IEEE80211_AC_BK],
+ local->aql_txq_limit_high[IEEE80211_AC_BK]);
return simple_read_from_buffer(user_buf, count, ppos,
buf, len);
}
@@ -255,11 +285,11 @@ static ssize_t aql_txq_limit_write(struct file *file,
if (ac >= IEEE80211_NUM_ACS)
return -EINVAL;
- q_limit_low_old = local->airtime[ac].aql_txq_limit_low;
- q_limit_high_old = local->airtime[ac].aql_txq_limit_high;
+ q_limit_low_old = local->aql_txq_limit_low[ac];
+ q_limit_high_old = local->aql_txq_limit_high[ac];
- local->airtime[ac].aql_txq_limit_low = q_limit_low;
- local->airtime[ac].aql_txq_limit_high = q_limit_high;
+ local->aql_txq_limit_low[ac] = q_limit_low;
+ local->aql_txq_limit_high[ac] = q_limit_high;
mutex_lock(&local->sta_mtx);
list_for_each_entry(sta, &local->sta_list, list) {
@@ -382,46 +412,6 @@ static const struct file_operations force_tx_status_ops = {
.llseek = default_llseek,
};
-static ssize_t airtime_read(struct file *file,
- char __user *user_buf,
- size_t count,
- loff_t *ppos)
-{
- struct ieee80211_local *local = file->private_data;
- char buf[200];
- u64 v_t[IEEE80211_NUM_ACS];
- u64 wt[IEEE80211_NUM_ACS];
- int len = 0, ac;
-
- for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
- spin_lock_bh(&local->airtime[ac].lock);
- v_t[ac] = local->airtime[ac].v_t;
- wt[ac] = local->airtime[ac].weight_sum;
- spin_unlock_bh(&local->airtime[ac].lock);
- }
- len = scnprintf(buf, sizeof(buf),
- "\tVO VI BE BK\n"
- "Virt-t\t%-10llu %-10llu %-10llu %-10llu\n"
- "Weight\t%-10llu %-10llu %-10llu %-10llu\n",
- v_t[0],
- v_t[1],
- v_t[2],
- v_t[3],
- wt[0],
- wt[1],
- wt[2],
- wt[3]);
-
- return simple_read_from_buffer(user_buf, count, ppos,
- buf, len);
-}
-
-static const struct file_operations airtime_ops = {
- .read = airtime_read,
- .open = simple_open,
- .llseek = default_llseek,
-};
-
#ifdef CONFIG_PM
static ssize_t reset_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
@@ -671,15 +661,12 @@ void debugfs_hw_add(struct ieee80211_local *local)
DEBUGFS_ADD(hw_conf);
DEBUGFS_ADD_MODE(force_tx_status, 0600);
DEBUGFS_ADD_MODE(aql_enable, 0600);
+ DEBUGFS_ADD(aql_pending);
if (local->ops->wake_tx_queue)
DEBUGFS_ADD_MODE(aqm, 0600);
- if (wiphy_ext_feature_isset(local->hw.wiphy,
- NL80211_EXT_FEATURE_AIRTIME_FAIRNESS)) {
- DEBUGFS_ADD_MODE(airtime, 0600);
- DEBUGFS_ADD_MODE(airtime_flags, 0600);
- }
+ DEBUGFS_ADD_MODE(airtime_flags, 0600);
DEBUGFS_ADD(aql_txq_limit);
debugfs_create_u32("aql_threshold", 0600,
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index edc7792e1361..16a04330e7dc 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -4,7 +4,7 @@
* Copyright (c) 2006 Jiri Benc <jbenc@suse.cz>
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
* Copyright (C) 2015 Intel Deutschland GmbH
- * Copyright (C) 2021 Intel Corporation
+ * Copyright (C) 2021-2022 Intel Corporation
*/
#include <linux/kobject.h>
@@ -395,9 +395,9 @@ void ieee80211_debugfs_key_update_default(struct ieee80211_sub_if_data *sdata)
debugfs_remove(sdata->debugfs.default_multicast_key);
sdata->debugfs.default_multicast_key = NULL;
- if (sdata->default_multicast_key) {
+ if (sdata->deflink.default_multicast_key) {
key = key_mtx_dereference(sdata->local,
- sdata->default_multicast_key);
+ sdata->deflink.default_multicast_key);
sprintf(buf, "../keys/%d", key->debugfs.cnt);
sdata->debugfs.default_multicast_key =
debugfs_create_symlink("default_multicast_key",
@@ -414,7 +414,7 @@ void ieee80211_debugfs_key_add_mgmt_default(struct ieee80211_sub_if_data *sdata)
return;
key = key_mtx_dereference(sdata->local,
- sdata->default_mgmt_key);
+ sdata->deflink.default_mgmt_key);
if (key) {
sprintf(buf, "../keys/%d", key->debugfs.cnt);
sdata->debugfs.default_mgmt_key =
@@ -443,7 +443,7 @@ ieee80211_debugfs_key_add_beacon_default(struct ieee80211_sub_if_data *sdata)
return;
key = key_mtx_dereference(sdata->local,
- sdata->default_beacon_key);
+ sdata->deflink.default_beacon_key);
if (key) {
sprintf(buf, "../keys/%d", key->debugfs.cnt);
sdata->debugfs.default_beacon_key =
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index cf71484658c6..ead917501d6c 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -2,7 +2,7 @@
/*
* Copyright (c) 2006 Jiri Benc <jbenc@suse.cz>
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
- * Copyright (C) 2020-2021 Intel Corporation
+ * Copyright (C) 2020-2022 Intel Corporation
*/
#include <linux/kernel.h>
@@ -208,8 +208,8 @@ IEEE80211_IF_FILE_R(rc_rateidx_vht_mcs_mask_5ghz);
IEEE80211_IF_FILE(flags, flags, HEX);
IEEE80211_IF_FILE(state, state, LHEX);
IEEE80211_IF_FILE(txpower, vif.bss_conf.txpower, DEC);
-IEEE80211_IF_FILE(ap_power_level, ap_power_level, DEC);
-IEEE80211_IF_FILE(user_power_level, user_power_level, DEC);
+IEEE80211_IF_FILE(ap_power_level, deflink.ap_power_level, DEC);
+IEEE80211_IF_FILE(user_power_level, deflink.user_power_level, DEC);
static ssize_t
ieee80211_if_fmt_hw_queues(const struct ieee80211_sub_if_data *sdata,
@@ -232,8 +232,8 @@ ieee80211_if_fmt_hw_queues(const struct ieee80211_sub_if_data *sdata,
IEEE80211_IF_FILE_R(hw_queues);
/* STA attributes */
-IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC);
-IEEE80211_IF_FILE(aid, vif.bss_conf.aid, DEC);
+IEEE80211_IF_FILE(bssid, deflink.u.mgd.bssid, MAC);
+IEEE80211_IF_FILE(aid, vif.cfg.aid, DEC);
IEEE80211_IF_FILE(beacon_timeout, u.mgd.beacon_timeout, JIFFIES_TO_MS);
static int ieee80211_set_smps(struct ieee80211_sub_if_data *sdata,
@@ -256,7 +256,7 @@ static int ieee80211_set_smps(struct ieee80211_sub_if_data *sdata,
return -EOPNOTSUPP;
sdata_lock(sdata);
- err = __ieee80211_request_smps_mgd(sdata, smps_mode);
+ err = __ieee80211_request_smps_mgd(sdata, 0, smps_mode);
sdata_unlock(sdata);
return err;
@@ -274,8 +274,8 @@ static ssize_t ieee80211_if_fmt_smps(const struct ieee80211_sub_if_data *sdata,
{
if (sdata->vif.type == NL80211_IFTYPE_STATION)
return snprintf(buf, buflen, "request: %s\nused: %s\n",
- smps_modes[sdata->u.mgd.req_smps],
- smps_modes[sdata->smps_mode]);
+ smps_modes[sdata->deflink.u.mgd.req_smps],
+ smps_modes[sdata->deflink.smps_mode]);
return -EINVAL;
}
@@ -337,7 +337,7 @@ static ssize_t ieee80211_if_parse_tkip_mic_test(
dev_kfree_skb(skb);
return -ENOTCONN;
}
- memcpy(hdr->addr1, sdata->u.mgd.bssid, ETH_ALEN);
+ memcpy(hdr->addr1, sdata->deflink.u.mgd.bssid, ETH_ALEN);
memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
memcpy(hdr->addr3, addr, ETH_ALEN);
sdata_unlock(sdata);
@@ -366,7 +366,7 @@ IEEE80211_IF_FILE_W(tkip_mic_test);
static ssize_t ieee80211_if_parse_beacon_loss(
struct ieee80211_sub_if_data *sdata, const char *buf, int buflen)
{
- if (!ieee80211_sdata_running(sdata) || !sdata->vif.bss_conf.assoc)
+ if (!ieee80211_sdata_running(sdata) || !sdata->vif.cfg.assoc)
return -ENOTCONN;
ieee80211_beacon_loss(&sdata->vif);
@@ -510,34 +510,6 @@ static ssize_t ieee80211_if_fmt_aqm(
}
IEEE80211_IF_FILE_R(aqm);
-static ssize_t ieee80211_if_fmt_airtime(
- const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
-{
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_txq *txq = sdata->vif.txq;
- struct airtime_info *air_info;
- int len;
-
- if (!txq)
- return 0;
-
- spin_lock_bh(&local->airtime[txq->ac].lock);
- air_info = to_airtime_info(txq);
- len = scnprintf(buf,
- buflen,
- "RX: %llu us\nTX: %llu us\nWeight: %u\n"
- "Virt-T: %lld us\n",
- air_info->rx_airtime,
- air_info->tx_airtime,
- air_info->weight,
- air_info->v_t);
- spin_unlock_bh(&local->airtime[txq->ac].lock);
-
- return len;
-}
-
-IEEE80211_IF_FILE_R(airtime);
-
IEEE80211_IF_FILE(multicast_to_unicast, u.ap.multicast_to_unicast, HEX);
/* IBSS attributes */
@@ -683,10 +655,8 @@ static void add_common_files(struct ieee80211_sub_if_data *sdata)
if (sdata->local->ops->wake_tx_queue &&
sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
- sdata->vif.type != NL80211_IFTYPE_NAN) {
+ sdata->vif.type != NL80211_IFTYPE_NAN)
DEBUGFS_ADD(aqm);
- DEBUGFS_ADD(airtime);
- }
}
static void add_sta_files(struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 182094be9001..d3397c1248d3 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -202,7 +202,7 @@ static ssize_t sta_airtime_read(struct file *file, char __user *userbuf,
size_t bufsz = 400;
char *buf = kzalloc(bufsz, GFP_KERNEL), *p = buf;
u64 rx_airtime = 0, tx_airtime = 0;
- u64 v_t[IEEE80211_NUM_ACS];
+ s32 deficit[IEEE80211_NUM_ACS];
ssize_t rv;
int ac;
@@ -210,18 +210,18 @@ static ssize_t sta_airtime_read(struct file *file, char __user *userbuf,
return -ENOMEM;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
- spin_lock_bh(&local->airtime[ac].lock);
+ spin_lock_bh(&local->active_txq_lock[ac]);
rx_airtime += sta->airtime[ac].rx_airtime;
tx_airtime += sta->airtime[ac].tx_airtime;
- v_t[ac] = sta->airtime[ac].v_t;
- spin_unlock_bh(&local->airtime[ac].lock);
+ deficit[ac] = sta->airtime[ac].deficit;
+ spin_unlock_bh(&local->active_txq_lock[ac]);
}
p += scnprintf(p, bufsz + buf - p,
"RX: %llu us\nTX: %llu us\nWeight: %u\n"
- "Virt-T: VO: %lld us VI: %lld us BE: %lld us BK: %lld us\n",
- rx_airtime, tx_airtime, sta->airtime[0].weight,
- v_t[0], v_t[1], v_t[2], v_t[3]);
+ "Deficit: VO: %d us VI: %d us BE: %d us BK: %d us\n",
+ rx_airtime, tx_airtime, sta->airtime_weight,
+ deficit[0], deficit[1], deficit[2], deficit[3]);
rv = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
kfree(buf);
@@ -236,11 +236,11 @@ static ssize_t sta_airtime_write(struct file *file, const char __user *userbuf,
int ac;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
- spin_lock_bh(&local->airtime[ac].lock);
+ spin_lock_bh(&local->active_txq_lock[ac]);
sta->airtime[ac].rx_airtime = 0;
sta->airtime[ac].tx_airtime = 0;
- sta->airtime[ac].v_t = 0;
- spin_unlock_bh(&local->airtime[ac].lock);
+ sta->airtime[ac].deficit = sta->airtime_weight;
+ spin_unlock_bh(&local->active_txq_lock[ac]);
}
return count;
@@ -263,10 +263,10 @@ static ssize_t sta_aql_read(struct file *file, char __user *userbuf,
return -ENOMEM;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
- spin_lock_bh(&local->airtime[ac].lock);
+ spin_lock_bh(&local->active_txq_lock[ac]);
q_limit_l[ac] = sta->airtime[ac].aql_limit_low;
q_limit_h[ac] = sta->airtime[ac].aql_limit_high;
- spin_unlock_bh(&local->airtime[ac].lock);
+ spin_unlock_bh(&local->active_txq_lock[ac]);
q_depth[ac] = atomic_read(&sta->airtime[ac].aql_tx_pending);
}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 4e2fc1a08681..db38c8cc9d8f 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -147,10 +147,27 @@ static inline int drv_config(struct ieee80211_local *local, u32 changed)
return ret;
}
-static inline void drv_bss_info_changed(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata,
- struct ieee80211_bss_conf *info,
- u32 changed)
+static inline void drv_vif_cfg_changed(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ u64 changed)
+{
+ might_sleep();
+
+ if (!check_sdata_in_driver(sdata))
+ return;
+
+ trace_drv_vif_cfg_changed(local, sdata, changed);
+ if (local->ops->vif_cfg_changed)
+ local->ops->vif_cfg_changed(&local->hw, &sdata->vif, changed);
+ else if (local->ops->bss_info_changed)
+ local->ops->bss_info_changed(&local->hw, &sdata->vif,
+ &sdata->vif.bss_conf, changed);
+ trace_drv_return_void(local);
+}
+
+static inline void drv_link_info_changed(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ int link_id, u64 changed)
{
might_sleep();
@@ -165,16 +182,20 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local,
if (WARN_ON_ONCE(sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE ||
sdata->vif.type == NL80211_IFTYPE_NAN ||
(sdata->vif.type == NL80211_IFTYPE_MONITOR &&
- !sdata->vif.mu_mimo_owner &&
+ !sdata->vif.bss_conf.mu_mimo_owner &&
!(changed & BSS_CHANGED_TXPOWER))))
return;
if (!check_sdata_in_driver(sdata))
return;
- trace_drv_bss_info_changed(local, sdata, info, changed);
- if (local->ops->bss_info_changed)
- local->ops->bss_info_changed(&local->hw, &sdata->vif, info, changed);
+ trace_drv_link_info_changed(local, sdata, link_id, changed);
+ if (local->ops->link_info_changed)
+ local->ops->link_info_changed(&local->hw, &sdata->vif,
+ link_id, changed);
+ else if (local->ops->bss_info_changed)
+ local->ops->bss_info_changed(&local->hw, &sdata->vif,
+ &sdata->vif.bss_conf, changed);
trace_drv_return_void(local);
}
@@ -917,6 +938,7 @@ static inline void drv_change_chanctx(struct ieee80211_local *local,
static inline int drv_assign_vif_chanctx(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
+ unsigned int link_id,
struct ieee80211_chanctx *ctx)
{
int ret = 0;
@@ -924,11 +946,12 @@ static inline int drv_assign_vif_chanctx(struct ieee80211_local *local,
if (!check_sdata_in_driver(sdata))
return -EIO;
- trace_drv_assign_vif_chanctx(local, sdata, ctx);
+ trace_drv_assign_vif_chanctx(local, sdata, link_id, ctx);
if (local->ops->assign_vif_chanctx) {
WARN_ON_ONCE(!ctx->driver_present);
ret = local->ops->assign_vif_chanctx(&local->hw,
&sdata->vif,
+ link_id,
&ctx->conf);
}
trace_drv_return_int(local, ret);
@@ -938,6 +961,7 @@ static inline int drv_assign_vif_chanctx(struct ieee80211_local *local,
static inline void drv_unassign_vif_chanctx(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
+ unsigned int link_id,
struct ieee80211_chanctx *ctx)
{
might_sleep();
@@ -945,11 +969,12 @@ static inline void drv_unassign_vif_chanctx(struct ieee80211_local *local,
if (!check_sdata_in_driver(sdata))
return;
- trace_drv_unassign_vif_chanctx(local, sdata, ctx);
+ trace_drv_unassign_vif_chanctx(local, sdata, link_id, ctx);
if (local->ops->unassign_vif_chanctx) {
WARN_ON_ONCE(!ctx->driver_present);
local->ops->unassign_vif_chanctx(&local->hw,
&sdata->vif,
+ link_id,
&ctx->conf);
}
trace_drv_return_void(local);
@@ -960,7 +985,8 @@ int drv_switch_vif_chanctx(struct ieee80211_local *local,
int n_vifs, enum ieee80211_chanctx_switch_mode mode);
static inline int drv_start_ap(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata)
+ struct ieee80211_sub_if_data *sdata,
+ unsigned int link_id)
{
int ret = 0;
@@ -969,22 +995,24 @@ static inline int drv_start_ap(struct ieee80211_local *local,
if (!check_sdata_in_driver(sdata))
return -EIO;
- trace_drv_start_ap(local, sdata, &sdata->vif.bss_conf);
+ trace_drv_start_ap(local, sdata, sdata->vif.link_conf[link_id],
+ link_id);
if (local->ops->start_ap)
- ret = local->ops->start_ap(&local->hw, &sdata->vif);
+ ret = local->ops->start_ap(&local->hw, &sdata->vif, link_id);
trace_drv_return_int(local, ret);
return ret;
}
static inline void drv_stop_ap(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata)
+ struct ieee80211_sub_if_data *sdata,
+ unsigned int link_id)
{
if (!check_sdata_in_driver(sdata))
return;
- trace_drv_stop_ap(local, sdata);
+ trace_drv_stop_ap(local, sdata, link_id);
if (local->ops->stop_ap)
- local->ops->stop_ap(&local->hw, &sdata->vif);
+ local->ops->stop_ap(&local->hw, &sdata->vif, link_id);
trace_drv_return_void(local);
}
@@ -1508,4 +1536,46 @@ static inline int drv_net_fill_forward_path(struct ieee80211_local *local,
return ret;
}
+static inline int drv_change_vif_links(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ u16 old_links, u16 new_links,
+ struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS])
+{
+ int ret = -EOPNOTSUPP;
+
+ might_sleep();
+
+ if (!check_sdata_in_driver(sdata))
+ return -EIO;
+
+ trace_drv_change_vif_links(local, sdata, old_links, new_links);
+ if (local->ops->change_vif_links)
+ ret = local->ops->change_vif_links(&local->hw, &sdata->vif,
+ old_links, new_links, old);
+ trace_drv_return_int(local, ret);
+
+ return ret;
+}
+
+static inline int drv_change_sta_links(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta *sta,
+ u16 old_links, u16 new_links)
+{
+ int ret = -EOPNOTSUPP;
+
+ might_sleep();
+
+ if (!check_sdata_in_driver(sdata))
+ return -EIO;
+
+ trace_drv_change_sta_links(local, sdata, sta, old_links, new_links);
+ if (local->ops->change_sta_links)
+ ret = local->ops->change_sta_links(&local->hw, &sdata->vif, sta,
+ old_links, new_links);
+ trace_drv_return_int(local, ret);
+
+ return ret;
+}
+
#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/eht.c b/net/mac80211/eht.c
index 96c9486bf2fe..31e20a342f21 100644
--- a/net/mac80211/eht.c
+++ b/net/mac80211/eht.c
@@ -12,9 +12,10 @@ ieee80211_eht_cap_ie_to_sta_eht_cap(struct ieee80211_sub_if_data *sdata,
struct ieee80211_supported_band *sband,
const u8 *he_cap_ie, u8 he_cap_len,
const struct ieee80211_eht_cap_elem *eht_cap_ie_elem,
- u8 eht_cap_len, struct sta_info *sta)
+ u8 eht_cap_len,
+ struct link_sta_info *link_sta)
{
- struct ieee80211_sta_eht_cap *eht_cap = &sta->sta.deflink.eht_cap;
+ struct ieee80211_sta_eht_cap *eht_cap = &link_sta->pub->eht_cap;
struct ieee80211_he_cap_elem *he_cap_ie_elem = (void *)he_cap_ie;
u8 eht_ppe_size = 0;
u8 mcs_nss_size;
@@ -71,6 +72,6 @@ ieee80211_eht_cap_ie_to_sta_eht_cap(struct ieee80211_sub_if_data *sdata,
eht_cap->has_eht = true;
- sta->deflink.cur_max_bandwidth = ieee80211_sta_cap_rx_bw(sta);
- sta->sta.deflink.bandwidth = ieee80211_sta_cur_vht_bw(sta);
+ link_sta->cur_max_bandwidth = ieee80211_sta_cap_rx_bw(link_sta);
+ link_sta->pub->bandwidth = ieee80211_sta_cur_vht_bw(link_sta);
}
diff --git a/net/mac80211/ethtool.c b/net/mac80211/ethtool.c
index 31cd3c1ac07f..c2b38370bfb1 100644
--- a/net/mac80211/ethtool.c
+++ b/net/mac80211/ethtool.c
@@ -5,7 +5,7 @@
* Copied from cfg.c - originally
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2014 Intel Corporation (Author: Johannes Berg)
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018, 2022 Intel Corporation
*/
#include <linux/types.h>
#include <net/cfg80211.h>
@@ -83,17 +83,17 @@ static void ieee80211_get_stats(struct net_device *dev,
#define ADD_STA_STATS(sta) \
do { \
- data[i++] += sta->rx_stats.packets; \
- data[i++] += sta->rx_stats.bytes; \
- data[i++] += sta->rx_stats.num_duplicates; \
- data[i++] += sta->rx_stats.fragments; \
- data[i++] += sta->rx_stats.dropped; \
+ data[i++] += (sta)->rx_stats.packets; \
+ data[i++] += (sta)->rx_stats.bytes; \
+ data[i++] += (sta)->rx_stats.num_duplicates; \
+ data[i++] += (sta)->rx_stats.fragments; \
+ data[i++] += (sta)->rx_stats.dropped; \
\
data[i++] += sinfo.tx_packets; \
data[i++] += sinfo.tx_bytes; \
- data[i++] += sta->status_stats.filtered; \
- data[i++] += sta->status_stats.retry_failed; \
- data[i++] += sta->status_stats.retry_count; \
+ data[i++] += (sta)->status_stats.filtered; \
+ data[i++] += (sta)->status_stats.retry_failed; \
+ data[i++] += (sta)->status_stats.retry_count; \
} while (0)
/* For Managed stations, find the single station based on BSSID
@@ -105,7 +105,7 @@ static void ieee80211_get_stats(struct net_device *dev,
mutex_lock(&local->sta_mtx);
if (sdata->vif.type == NL80211_IFTYPE_STATION) {
- sta = sta_info_get_bss(sdata, sdata->u.mgd.bssid);
+ sta = sta_info_get_bss(sdata, sdata->deflink.u.mgd.bssid);
if (!(sta && !WARN_ON(sta->sdata->dev != dev)))
goto do_survey;
@@ -114,7 +114,7 @@ static void ieee80211_get_stats(struct net_device *dev,
sta_set_sinfo(sta, &sinfo, false);
i = 0;
- ADD_STA_STATS(sta->link[0]);
+ ADD_STA_STATS(&sta->deflink);
data[i++] = sta->sta_state;
@@ -140,7 +140,7 @@ static void ieee80211_get_stats(struct net_device *dev,
memset(&sinfo, 0, sizeof(sinfo));
sta_set_sinfo(sta, &sinfo, false);
i = 0;
- ADD_STA_STATS(sta->link[0]);
+ ADD_STA_STATS(&sta->deflink);
}
}
@@ -150,7 +150,7 @@ do_survey:
survey.filled = 0;
rcu_read_lock();
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+ chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
if (chanctx_conf)
channel = chanctx_conf->def.chan;
else
diff --git a/net/mac80211/he.c b/net/mac80211/he.c
index 1a61f7552edd..d9228fd3f77a 100644
--- a/net/mac80211/he.c
+++ b/net/mac80211/he.c
@@ -3,15 +3,16 @@
* HE handling
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2019 - 2020 Intel Corporation
+ * Copyright(c) 2019 - 2022 Intel Corporation
*/
#include "ieee80211_i.h"
static void
ieee80211_update_from_he_6ghz_capa(const struct ieee80211_he_6ghz_capa *he_6ghz_capa,
- struct sta_info *sta)
+ struct link_sta_info *link_sta)
{
+ struct sta_info *sta = link_sta->sta;
enum ieee80211_smps_mode smps_mode;
if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
@@ -49,7 +50,7 @@ ieee80211_update_from_he_6ghz_capa(const struct ieee80211_he_6ghz_capa *he_6ghz_
break;
}
- sta->sta.deflink.he_6ghz_capa = *he_6ghz_capa;
+ link_sta->pub->he_6ghz_capa = *he_6ghz_capa;
}
static void ieee80211_he_mcs_disable(__le16 *he_mcs)
@@ -108,9 +109,9 @@ ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata,
struct ieee80211_supported_band *sband,
const u8 *he_cap_ie, u8 he_cap_len,
const struct ieee80211_he_6ghz_capa *he_6ghz_capa,
- struct sta_info *sta)
+ struct link_sta_info *link_sta)
{
- struct ieee80211_sta_he_cap *he_cap = &sta->sta.deflink.he_cap;
+ struct ieee80211_sta_he_cap *he_cap = &link_sta->pub->he_cap;
struct ieee80211_sta_he_cap own_he_cap;
struct ieee80211_he_cap_elem *he_cap_ie_elem = (void *)he_cap_ie;
u8 he_ppe_size;
@@ -153,11 +154,11 @@ ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata,
he_cap->has_he = true;
- sta->deflink.cur_max_bandwidth = ieee80211_sta_cap_rx_bw(sta);
- sta->sta.deflink.bandwidth = ieee80211_sta_cur_vht_bw(sta);
+ link_sta->cur_max_bandwidth = ieee80211_sta_cap_rx_bw(link_sta);
+ link_sta->pub->bandwidth = ieee80211_sta_cur_vht_bw(link_sta);
if (sband->band == NL80211_BAND_6GHZ && he_6ghz_capa)
- ieee80211_update_from_he_6ghz_capa(he_6ghz_capa, sta);
+ ieee80211_update_from_he_6ghz_capa(he_6ghz_capa, link_sta);
ieee80211_he_mcs_intersection(&own_he_cap.he_mcs_nss_supp.rx_mcs_80,
&he_cap->he_mcs_nss_supp.rx_mcs_80,
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 171bd16b13f3..2eb3a409b70f 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -9,7 +9,7 @@
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2007-2010, Intel Corporation
* Copyright 2017 Intel Deutschland GmbH
- * Copyright(c) 2020-2021 Intel Corporation
+ * Copyright(c) 2020-2022 Intel Corporation
*/
#include <linux/ieee80211.h>
@@ -138,8 +138,9 @@ void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
struct ieee80211_supported_band *sband,
const struct ieee80211_ht_cap *ht_cap_ie,
- struct sta_info *sta)
+ struct link_sta_info *link_sta)
{
+ struct sta_info *sta = link_sta->sta;
struct ieee80211_sta_ht_cap ht_cap, own_cap;
u8 ampdu_info, tx_mcs_set_cap;
int i, max_tx_streams;
@@ -243,11 +244,11 @@ bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
sta->sta.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_3839;
apply:
- changed = memcmp(&sta->sta.deflink.ht_cap, &ht_cap, sizeof(ht_cap));
+ changed = memcmp(&link_sta->pub->ht_cap, &ht_cap, sizeof(ht_cap));
- memcpy(&sta->sta.deflink.ht_cap, &ht_cap, sizeof(ht_cap));
+ memcpy(&link_sta->pub->ht_cap, &ht_cap, sizeof(ht_cap));
- switch (sdata->vif.bss_conf.chandef.width) {
+ switch (sdata->vif.link_conf[link_sta->link_id]->chandef.width) {
default:
WARN_ON_ONCE(1);
fallthrough;
@@ -264,9 +265,9 @@ bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
break;
}
- sta->sta.deflink.bandwidth = bw;
+ link_sta->pub->bandwidth = bw;
- sta->deflink.cur_max_bandwidth =
+ link_sta->cur_max_bandwidth =
ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
IEEE80211_STA_RX_BW_40 : IEEE80211_STA_RX_BW_20;
@@ -433,7 +434,7 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
else if (sdata->vif.type == NL80211_IFTYPE_STATION)
- memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
+ memcpy(mgmt->bssid, sdata->deflink.u.mgd.bssid, ETH_ALEN);
else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN);
@@ -541,29 +542,33 @@ int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
void ieee80211_request_smps_mgd_work(struct work_struct *work)
{
- struct ieee80211_sub_if_data *sdata =
- container_of(work, struct ieee80211_sub_if_data,
+ struct ieee80211_link_data *link =
+ container_of(work, struct ieee80211_link_data,
u.mgd.request_smps_work);
- sdata_lock(sdata);
- __ieee80211_request_smps_mgd(sdata, sdata->u.mgd.driver_smps_mode);
- sdata_unlock(sdata);
+ sdata_lock(link->sdata);
+ __ieee80211_request_smps_mgd(link->sdata, link->link_id,
+ link->u.mgd.driver_smps_mode);
+ sdata_unlock(link->sdata);
}
-void ieee80211_request_smps(struct ieee80211_vif *vif,
+void ieee80211_request_smps(struct ieee80211_vif *vif, unsigned int link_id,
enum ieee80211_smps_mode smps_mode)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_link_data *link = sdata->link[link_id];
if (WARN_ON_ONCE(vif->type != NL80211_IFTYPE_STATION))
return;
- if (sdata->u.mgd.driver_smps_mode == smps_mode)
+ if (WARN_ON(!link))
return;
- sdata->u.mgd.driver_smps_mode = smps_mode;
- ieee80211_queue_work(&sdata->local->hw,
- &sdata->u.mgd.request_smps_work);
+ if (link->u.mgd.driver_smps_mode == smps_mode)
+ return;
+
+ link->u.mgd.driver_smps_mode = smps_mode;
+ ieee80211_queue_work(&sdata->local->hw, &link->u.mgd.request_smps_work);
}
/* this might change ... don't want non-open drivers using it */
EXPORT_SYMBOL_GPL(ieee80211_request_smps);
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 14c04fd48b7a..65b6255c6747 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -9,7 +9,7 @@
* Copyright 2009, Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
- * Copyright(c) 2018-2021 Intel Corporation
+ * Copyright(c) 2018-2022 Intel Corporation
*/
#include <linux/delay.h>
@@ -244,9 +244,9 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
sta_info_flush(sdata);
/* if merging, indicate to driver that we leave the old IBSS */
- if (sdata->vif.bss_conf.ibss_joined) {
- sdata->vif.bss_conf.ibss_joined = false;
- sdata->vif.bss_conf.ibss_creator = false;
+ if (sdata->vif.cfg.ibss_joined) {
+ sdata->vif.cfg.ibss_joined = false;
+ sdata->vif.cfg.ibss_creator = false;
sdata->vif.bss_conf.enable_beacon = false;
netif_carrier_off(sdata->dev);
ieee80211_bss_info_change_notify(sdata,
@@ -301,15 +301,15 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
radar_required = err;
mutex_lock(&local->mtx);
- if (ieee80211_vif_use_channel(sdata, &chandef,
- ifibss->fixed_channel ?
+ if (ieee80211_link_use_channel(sdata->link[0], &chandef,
+ ifibss->fixed_channel ?
IEEE80211_CHANCTX_SHARED :
IEEE80211_CHANCTX_EXCLUSIVE)) {
sdata_info(sdata, "Failed to join IBSS, no channel context\n");
mutex_unlock(&local->mtx);
return;
}
- sdata->radar_required = radar_required;
+ sdata->deflink.radar_required = radar_required;
mutex_unlock(&local->mtx);
memcpy(ifibss->bssid, bssid, ETH_ALEN);
@@ -326,8 +326,8 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
sdata->vif.bss_conf.enable_beacon = true;
sdata->vif.bss_conf.beacon_int = beacon_int;
sdata->vif.bss_conf.basic_rates = basic_rates;
- sdata->vif.bss_conf.ssid_len = ifibss->ssid_len;
- memcpy(sdata->vif.bss_conf.ssid, ifibss->ssid, ifibss->ssid_len);
+ sdata->vif.cfg.ssid_len = ifibss->ssid_len;
+ memcpy(sdata->vif.cfg.ssid, ifibss->ssid, ifibss->ssid_len);
bss_change = BSS_CHANGED_BEACON_INT;
bss_change |= ieee80211_reset_erp_info(sdata);
bss_change |= BSS_CHANGED_BSSID;
@@ -359,19 +359,19 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
ieee80211_set_wmm_default(sdata, true, false);
- sdata->vif.bss_conf.ibss_joined = true;
- sdata->vif.bss_conf.ibss_creator = creator;
+ sdata->vif.cfg.ibss_joined = true;
+ sdata->vif.cfg.ibss_creator = creator;
err = drv_join_ibss(local, sdata);
if (err) {
- sdata->vif.bss_conf.ibss_joined = false;
- sdata->vif.bss_conf.ibss_creator = false;
+ sdata->vif.cfg.ibss_joined = false;
+ sdata->vif.cfg.ibss_creator = false;
sdata->vif.bss_conf.enable_beacon = false;
- sdata->vif.bss_conf.ssid_len = 0;
+ sdata->vif.cfg.ssid_len = 0;
RCU_INIT_POINTER(ifibss->presp, NULL);
kfree_rcu(presp, rcu_head);
mutex_lock(&local->mtx);
- ieee80211_vif_release_channel(sdata);
+ ieee80211_link_release_channel(sdata->link[0]);
mutex_unlock(&local->mtx);
sdata_info(sdata, "Failed to join IBSS, driver failure: %d\n",
err);
@@ -544,12 +544,12 @@ int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata)
IEEE80211_PRIVACY(ifibss->privacy));
/* XXX: should not really modify cfg80211 data */
if (cbss) {
- cbss->channel = sdata->csa_chandef.chan;
+ cbss->channel = sdata->deflink.csa_chandef.chan;
cfg80211_put_bss(sdata->local->hw.wiphy, cbss);
}
}
- ifibss->chandef = sdata->csa_chandef;
+ ifibss->chandef = sdata->deflink.csa_chandef;
/* generate the beacon */
return ieee80211_ibss_csa_beacon(sdata, NULL);
@@ -622,14 +622,14 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid,
}
rcu_read_lock();
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+ chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
if (WARN_ON_ONCE(!chanctx_conf))
return NULL;
band = chanctx_conf->def.chan->band;
scan_width = cfg80211_chandef_to_scan_width(&chanctx_conf->def);
rcu_read_unlock();
- sta = sta_info_alloc(sdata, addr, GFP_KERNEL);
+ sta = sta_info_alloc(sdata, addr, -1, GFP_KERNEL);
if (!sta) {
rcu_read_lock();
return NULL;
@@ -708,10 +708,10 @@ static void ieee80211_ibss_disconnect(struct ieee80211_sub_if_data *sdata)
netif_carrier_off(sdata->dev);
- sdata->vif.bss_conf.ibss_joined = false;
- sdata->vif.bss_conf.ibss_creator = false;
+ sdata->vif.cfg.ibss_joined = false;
+ sdata->vif.cfg.ibss_creator = false;
sdata->vif.bss_conf.enable_beacon = false;
- sdata->vif.bss_conf.ssid_len = 0;
+ sdata->vif.cfg.ssid_len = 0;
/* remove beacon */
presp = rcu_dereference_protected(ifibss->presp,
@@ -725,7 +725,7 @@ static void ieee80211_ibss_disconnect(struct ieee80211_sub_if_data *sdata)
BSS_CHANGED_IBSS);
drv_leave_ibss(local, sdata);
mutex_lock(&local->mtx);
- ieee80211_vif_release_channel(sdata);
+ ieee80211_link_release_channel(sdata->link[0]);
mutex_unlock(&local->mtx);
}
@@ -923,7 +923,7 @@ ieee80211_rx_mgmt_spectrum_mgmt(struct ieee80211_sub_if_data *sdata,
if (len < required_len)
return;
- if (!sdata->vif.csa_active)
+ if (!sdata->vif.bss_conf.csa_active)
ieee80211_ibss_process_chanswitch(sdata, elems, false);
}
@@ -1051,7 +1051,7 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata,
memcpy(&htcap_ie, elems->ht_cap_elem, sizeof(htcap_ie));
rates_updated |= ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
&htcap_ie,
- sta);
+ &sta->deflink);
if (elems->vht_operation && elems->vht_cap_elem &&
sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_20 &&
@@ -1068,7 +1068,8 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata,
&chandef);
memcpy(&cap_ie, elems->vht_cap_elem, sizeof(cap_ie));
ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
- &cap_ie, sta);
+ &cap_ie,
+ &sta->deflink);
if (memcmp(&cap, &sta->sta.deflink.vht_cap, sizeof(cap)))
rates_updated |= true;
}
@@ -1143,7 +1144,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
goto put_bss;
/* process channel switch */
- if (sdata->vif.csa_active ||
+ if (sdata->vif.bss_conf.csa_active ||
ieee80211_ibss_process_chanswitch(sdata, elems, true))
goto put_bss;
@@ -1220,7 +1221,7 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
return;
rcu_read_lock();
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+ chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
if (WARN_ON_ONCE(!chanctx_conf)) {
rcu_read_unlock();
return;
@@ -1229,7 +1230,7 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
scan_width = cfg80211_chandef_to_scan_width(&chanctx_conf->def);
rcu_read_unlock();
- sta = sta_info_alloc(sdata, addr, GFP_ATOMIC);
+ sta = sta_info_alloc(sdata, addr, -1, GFP_ATOMIC);
if (!sta)
return;
@@ -1851,10 +1852,10 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
| IEEE80211_HT_PARAM_RIFS_MODE;
changed |= BSS_CHANGED_HT | BSS_CHANGED_MCAST_RATE;
- ieee80211_bss_info_change_notify(sdata, changed);
+ ieee80211_link_info_change_notify(sdata, 0, changed);
- sdata->smps_mode = IEEE80211_SMPS_OFF;
- sdata->needed_rx_chains = local->rx_chains;
+ sdata->deflink.smps_mode = IEEE80211_SMPS_OFF;
+ sdata->deflink.needed_rx_chains = local->rx_chains;
sdata->control_port_over_nl80211 = params->control_port_over_nl80211;
ieee80211_queue_work(&local->hw, &sdata->work);
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 86ef0a46a68c..f08060a36ef2 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -5,7 +5,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2015 Intel Mobile Communications GmbH
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#ifndef IEEE80211_I_H
@@ -83,6 +83,13 @@ extern const u8 ieee80211_ac_to_qos_mask[IEEE80211_NUM_ACS];
#define IEEE80211_MAX_NAN_INSTANCE_ID 255
+
+/*
+ * Keep a station's queues on the active list for deficit accounting purposes
+ * if it was active or queued during the last 100ms
+ */
+#define AIRTIME_ACTIVE_DURATION (HZ / 10)
+
struct ieee80211_bss {
u32 device_ts_beacon, device_ts_presp;
@@ -293,19 +300,13 @@ struct ps_data {
};
struct ieee80211_if_ap {
- struct beacon_data __rcu *beacon;
- struct probe_resp __rcu *probe_resp;
- struct fils_discovery_data __rcu *fils_discovery;
- struct unsol_bcast_probe_resp_data __rcu *unsol_bcast_probe_resp;
-
- /* to be used after channel switch. */
- struct cfg80211_beacon_data *next_beacon;
struct list_head vlans; /* write-protected with RTNL and local->mtx */
struct ps_data ps;
atomic_t num_mcast_sta; /* number of stations receiving multicast */
bool multicast_to_unicast;
+ bool active;
};
struct ieee80211_if_vlan {
@@ -456,29 +457,14 @@ struct ieee80211_if_managed {
reconnect:1,
associated:1;
- struct cfg80211_bss *assoc_bss;
struct ieee80211_mgd_auth_data *auth_data;
struct ieee80211_mgd_assoc_data *assoc_data;
- u8 bssid[ETH_ALEN] __aligned(2);
-
bool powersave; /* powersave requested for this iface */
bool broken_ap; /* AP is broken -- turn off powersave */
- bool have_beacon;
- u8 dtim_period;
- enum ieee80211_smps_mode req_smps, /* requested smps mode */
- driver_smps_mode; /* smps mode request */
-
- struct work_struct request_smps_work;
unsigned int flags;
- bool csa_waiting_bcn;
- bool csa_ignored_same_chan;
-
- bool beacon_crc_valid;
- u32 beacon_crc;
-
bool status_acked;
bool status_received;
__le16 status_fc;
@@ -503,39 +489,14 @@ struct ieee80211_if_managed {
*/
unsigned int uapsd_max_sp_len;
- int wmm_last_param_set;
- int mu_edca_last_param_set;
-
u8 use_4addr;
- s16 p2p_noa_index;
-
- struct ewma_beacon_signal ave_beacon_signal;
-
- /*
- * Number of Beacon frames used in ave_beacon_signal. This can be used
- * to avoid generating less reliable cqm events that would be based
- * only on couple of received frames.
- */
- unsigned int count_beacon_signal;
-
- /* Number of times beacon loss was invoked. */
- unsigned int beacon_loss_count;
-
- /*
- * Last Beacon frame signal strength average (ave_beacon_signal / 16)
- * that triggered a cqm event. 0 indicates that no event has been
- * generated for the current association.
- */
- int last_cqm_event_signal;
-
/*
* State variables for keeping track of RSSI of the AP currently
* connected to and informing driver when RSSI has gone
* below/above a certain threshold.
*/
int rssi_min_thold, rssi_max_thold;
- int last_ave_beacon_signal;
struct ieee80211_ht_cap ht_capa; /* configured ht-cap over-rides */
struct ieee80211_ht_cap ht_capa_mask; /* Valid parts of ht_capa */
@@ -833,8 +794,8 @@ struct ieee80211_chanctx {
struct list_head list;
struct rcu_head rcu_head;
- struct list_head assigned_vifs;
- struct list_head reserved_vifs;
+ struct list_head assigned_links;
+ struct list_head reserved_links;
enum ieee80211_chanctx_replace_state replace_state;
struct ieee80211_chanctx *replace_ctx;
@@ -864,16 +825,20 @@ enum txq_info_flags {
* @def_flow: used as a fallback flow when a packet destined to @tin hashes to
* a fq_flow which is already owned by a different tin
* @def_cvars: codel vars for @def_flow
- * @schedule_order: used with ieee80211_local->active_txqs
* @frags: used to keep fragments created after dequeue
+ * @schedule_order: used with ieee80211_local->active_txqs
+ * @schedule_round: counter to prevent infinite loops on TXQ scheduling
*/
struct txq_info {
struct fq_tin tin;
struct codel_vars def_cvars;
struct codel_stats cstats;
- struct rb_node schedule_order;
+
+ u16 schedule_round;
+ struct list_head schedule_order;
struct sk_buff_head frags;
+
unsigned long flags;
/* keep last! */
@@ -901,6 +866,101 @@ struct ieee80211_if_nan {
struct idr function_inst_ids;
};
+struct ieee80211_link_data_managed {
+ u8 bssid[ETH_ALEN] __aligned(2);
+
+ u8 dtim_period;
+ enum ieee80211_smps_mode req_smps, /* requested smps mode */
+ driver_smps_mode; /* smps mode request */
+
+ s16 p2p_noa_index;
+
+ bool have_beacon;
+
+ bool csa_waiting_bcn;
+ bool csa_ignored_same_chan;
+
+ struct work_struct request_smps_work;
+ bool beacon_crc_valid;
+ u32 beacon_crc;
+ struct ewma_beacon_signal ave_beacon_signal;
+ int last_ave_beacon_signal;
+
+ /*
+ * Number of Beacon frames used in ave_beacon_signal. This can be used
+ * to avoid generating less reliable cqm events that would be based
+ * only on couple of received frames.
+ */
+ unsigned int count_beacon_signal;
+
+ /* Number of times beacon loss was invoked. */
+ unsigned int beacon_loss_count;
+
+ /*
+ * Last Beacon frame signal strength average (ave_beacon_signal / 16)
+ * that triggered a cqm event. 0 indicates that no event has been
+ * generated for the current association.
+ */
+ int last_cqm_event_signal;
+
+ int wmm_last_param_set;
+ int mu_edca_last_param_set;
+
+ struct cfg80211_bss *bss;
+};
+
+struct ieee80211_link_data_ap {
+ struct beacon_data __rcu *beacon;
+ struct probe_resp __rcu *probe_resp;
+ struct fils_discovery_data __rcu *fils_discovery;
+ struct unsol_bcast_probe_resp_data __rcu *unsol_bcast_probe_resp;
+
+ /* to be used after channel switch. */
+ struct cfg80211_beacon_data *next_beacon;
+};
+
+struct ieee80211_link_data {
+ struct ieee80211_sub_if_data *sdata;
+ unsigned int link_id;
+
+ struct list_head assigned_chanctx_list; /* protected by chanctx_mtx */
+ struct list_head reserved_chanctx_list; /* protected by chanctx_mtx */
+
+ /* multicast keys only */
+ struct ieee80211_key __rcu *gtk[NUM_DEFAULT_KEYS +
+ NUM_DEFAULT_MGMT_KEYS +
+ NUM_DEFAULT_BEACON_KEYS];
+ struct ieee80211_key __rcu *default_multicast_key;
+ struct ieee80211_key __rcu *default_mgmt_key;
+ struct ieee80211_key __rcu *default_beacon_key;
+
+ struct work_struct csa_finalize_work;
+ bool csa_block_tx; /* write-protected by sdata_lock and local->mtx */
+ struct cfg80211_chan_def csa_chandef;
+
+ struct work_struct color_change_finalize_work;
+
+ /* context reservation -- protected with chanctx_mtx */
+ struct ieee80211_chanctx *reserved_chanctx;
+ struct cfg80211_chan_def reserved_chandef;
+ bool reserved_radar_required;
+ bool reserved_ready;
+
+ u8 needed_rx_chains;
+ enum ieee80211_smps_mode smps_mode;
+
+ int user_power_level; /* in dBm */
+ int ap_power_level; /* in dBm */
+
+ bool radar_required;
+ struct delayed_work dfs_cac_timer_work;
+
+ union {
+ struct ieee80211_link_data_managed mgd;
+ struct ieee80211_link_data_ap ap;
+ } u;
+};
+
struct ieee80211_sub_if_data {
struct list_head list;
@@ -931,42 +991,19 @@ struct ieee80211_sub_if_data {
/* bit field of ACM bits (BIT(802.1D tag)) */
u8 wmm_acm;
- struct ieee80211_key __rcu *keys[NUM_DEFAULT_KEYS +
- NUM_DEFAULT_MGMT_KEYS +
- NUM_DEFAULT_BEACON_KEYS];
+ struct ieee80211_key __rcu *keys[NUM_DEFAULT_KEYS];
struct ieee80211_key __rcu *default_unicast_key;
- struct ieee80211_key __rcu *default_multicast_key;
- struct ieee80211_key __rcu *default_mgmt_key;
- struct ieee80211_key __rcu *default_beacon_key;
u16 sequence_number;
__be16 control_port_protocol;
bool control_port_no_encrypt;
bool control_port_no_preauth;
bool control_port_over_nl80211;
- int encrypt_headroom;
atomic_t num_tx_queued;
struct ieee80211_tx_queue_params tx_conf[IEEE80211_NUM_ACS];
struct mac80211_qos_map __rcu *qos_map;
- struct airtime_info airtime[IEEE80211_NUM_ACS];
-
- struct work_struct csa_finalize_work;
- bool csa_block_tx; /* write-protected by sdata_lock and local->mtx */
- struct cfg80211_chan_def csa_chandef;
-
- struct work_struct color_change_finalize_work;
-
- struct list_head assigned_chanctx_list; /* protected by chanctx_mtx */
- struct list_head reserved_chanctx_list; /* protected by chanctx_mtx */
-
- /* context reservation -- protected with chanctx_mtx */
- struct ieee80211_chanctx *reserved_chanctx;
- struct cfg80211_chan_def reserved_chandef;
- bool reserved_radar_required;
- bool reserved_ready;
-
/* used to reconfigure hardware SM PS */
struct work_struct recalc_smps;
@@ -974,15 +1011,6 @@ struct ieee80211_sub_if_data {
struct sk_buff_head skb_queue;
struct sk_buff_head status_queue;
- u8 needed_rx_chains;
- enum ieee80211_smps_mode smps_mode;
-
- int user_power_level; /* in dBm */
- int ap_power_level; /* in dBm */
-
- bool radar_required;
- struct delayed_work dfs_cac_timer_work;
-
/*
* AP this belongs to: self in AP mode and
* corresponding AP in VLAN mode, NULL for
@@ -1014,6 +1042,9 @@ struct ieee80211_sub_if_data {
struct ieee80211_if_nan nan;
} u;
+ struct ieee80211_link_data deflink;
+ struct ieee80211_link_data *link[IEEE80211_MLD_MAX_NUM_LINKS];
+
#ifdef CONFIG_MAC80211_DEBUGFS
struct {
struct dentry *subdir_stations;
@@ -1077,7 +1108,7 @@ ieee80211_vif_get_shift(struct ieee80211_vif *vif)
int shift = 0;
rcu_read_lock();
- chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ chanctx_conf = rcu_dereference(vif->bss_conf.chanctx_conf);
if (chanctx_conf)
shift = ieee80211_chandef_get_shift(&chanctx_conf->def);
rcu_read_unlock();
@@ -1186,44 +1217,6 @@ enum mac80211_scan_state {
SCAN_ABORT,
};
-/**
- * struct airtime_sched_info - state used for airtime scheduling and AQL
- *
- * @lock: spinlock that protects all the fields in this struct
- * @active_txqs: rbtree of currently backlogged queues, sorted by virtual time
- * @schedule_pos: the current position maintained while a driver walks the tree
- * with ieee80211_next_txq()
- * @active_list: list of struct airtime_info structs that were active within
- * the last AIRTIME_ACTIVE_DURATION (100 ms), used to compute
- * weight_sum
- * @last_weight_update: used for rate limiting walking active_list
- * @last_schedule_time: tracks the last time a transmission was scheduled; used
- * for catching up v_t if no stations are eligible for
- * transmission.
- * @v_t: global virtual time; queues with v_t < this are eligible for
- * transmission
- * @weight_sum: total sum of all active stations used for dividing airtime
- * @weight_sum_reciprocal: reciprocal of weight_sum (to avoid divisions in fast
- * path - see comment above
- * IEEE80211_RECIPROCAL_DIVISOR_64)
- * @aql_txq_limit_low: AQL limit when total outstanding airtime
- * is < IEEE80211_AQL_THRESHOLD
- * @aql_txq_limit_high: AQL limit when total outstanding airtime
- * is > IEEE80211_AQL_THRESHOLD
- */
-struct airtime_sched_info {
- spinlock_t lock;
- struct rb_root_cached active_txqs;
- struct rb_node *schedule_pos;
- struct list_head active_list;
- u64 last_weight_update;
- u64 last_schedule_activity;
- u64 v_t;
- u64 weight_sum;
- u64 weight_sum_reciprocal;
- u32 aql_txq_limit_low;
- u32 aql_txq_limit_high;
-};
DECLARE_STATIC_KEY_FALSE(aql_disable);
struct ieee80211_local {
@@ -1237,10 +1230,16 @@ struct ieee80211_local {
struct codel_params cparams;
/* protects active_txqs and txqi->schedule_order */
- struct airtime_sched_info airtime[IEEE80211_NUM_ACS];
+ spinlock_t active_txq_lock[IEEE80211_NUM_ACS];
+ struct list_head active_txqs[IEEE80211_NUM_ACS];
+ u16 schedule_round[IEEE80211_NUM_ACS];
+
u16 airtime_flags;
+ u32 aql_txq_limit_low[IEEE80211_NUM_ACS];
+ u32 aql_txq_limit_high[IEEE80211_NUM_ACS];
u32 aql_threshold;
atomic_t aql_total_pending_airtime;
+ atomic_t aql_ac_pending_airtime[IEEE80211_NUM_ACS];
const struct ieee80211_ops *ops;
@@ -1344,6 +1343,7 @@ struct ieee80211_local {
unsigned long num_sta;
struct list_head sta_list;
struct rhltable sta_hash;
+ struct rhltable link_sta_hash;
struct timer_list sta_cleanup;
int sta_generation;
@@ -1528,7 +1528,7 @@ ieee80211_get_sband(struct ieee80211_sub_if_data *sdata)
enum nl80211_band band;
rcu_read_lock();
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+ chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
if (!chanctx_conf) {
rcu_read_unlock();
@@ -1660,131 +1660,6 @@ static inline bool txq_has_queue(struct ieee80211_txq *txq)
return !(skb_queue_empty(&txqi->frags) && !txqi->tin.backlog_packets);
}
-static inline struct airtime_info *to_airtime_info(struct ieee80211_txq *txq)
-{
- struct ieee80211_sub_if_data *sdata;
- struct sta_info *sta;
-
- if (txq->sta) {
- sta = container_of(txq->sta, struct sta_info, sta);
- return &sta->airtime[txq->ac];
- }
-
- sdata = vif_to_sdata(txq->vif);
- return &sdata->airtime[txq->ac];
-}
-
-/* To avoid divisions in the fast path, we keep pre-computed reciprocals for
- * airtime weight calculations. There are two different weights to keep track
- * of: The per-station weight and the sum of weights per phy.
- *
- * For the per-station weights (kept in airtime_info below), we use 32-bit
- * reciprocals with a devisor of 2^19. This lets us keep the multiplications and
- * divisions for the station weights as 32-bit operations at the cost of a bit
- * of rounding error for high weights; but the choice of divisor keeps rounding
- * errors <10% for weights <2^15, assuming no more than 8ms of airtime is
- * reported at a time.
- *
- * For the per-phy sum of weights the values can get higher, so we use 64-bit
- * operations for those with a 32-bit divisor, which should avoid any
- * significant rounding errors.
- */
-#define IEEE80211_RECIPROCAL_DIVISOR_64 0x100000000ULL
-#define IEEE80211_RECIPROCAL_SHIFT_64 32
-#define IEEE80211_RECIPROCAL_DIVISOR_32 0x80000U
-#define IEEE80211_RECIPROCAL_SHIFT_32 19
-
-static inline void airtime_weight_set(struct airtime_info *air_info, u16 weight)
-{
- if (air_info->weight == weight)
- return;
-
- air_info->weight = weight;
- if (weight) {
- air_info->weight_reciprocal =
- IEEE80211_RECIPROCAL_DIVISOR_32 / weight;
- } else {
- air_info->weight_reciprocal = 0;
- }
-}
-
-static inline void airtime_weight_sum_set(struct airtime_sched_info *air_sched,
- int weight_sum)
-{
- if (air_sched->weight_sum == weight_sum)
- return;
-
- air_sched->weight_sum = weight_sum;
- if (air_sched->weight_sum) {
- air_sched->weight_sum_reciprocal = IEEE80211_RECIPROCAL_DIVISOR_64;
- do_div(air_sched->weight_sum_reciprocal, air_sched->weight_sum);
- } else {
- air_sched->weight_sum_reciprocal = 0;
- }
-}
-
-/* A problem when trying to enforce airtime fairness is that we want to divide
- * the airtime between the currently *active* stations. However, basing this on
- * the instantaneous queue state of stations doesn't work, as queues tend to
- * oscillate very quickly between empty and occupied, leading to the scheduler
- * thinking only a single station is active when deciding whether to allow
- * transmission (and thus not throttling correctly).
- *
- * To fix this we use a timer-based notion of activity: a station is considered
- * active if it has been scheduled within the last 100 ms; we keep a separate
- * list of all the stations considered active in this manner, and lazily update
- * the total weight of active stations from this list (filtering the stations in
- * the list by their 'last active' time).
- *
- * We add one additional safeguard to guard against stations that manage to get
- * scheduled every 100 ms but don't transmit a lot of data, and thus don't use
- * up any airtime. Such stations would be able to get priority for an extended
- * period of time if they do start transmitting at full capacity again, and so
- * we add an explicit maximum for how far behind a station is allowed to fall in
- * the virtual airtime domain. This limit is set to a relatively high value of
- * 20 ms because the main mechanism for catching up idle stations is the active
- * state as described above; i.e., the hard limit should only be hit in
- * pathological cases.
- */
-#define AIRTIME_ACTIVE_DURATION (100 * NSEC_PER_MSEC)
-#define AIRTIME_MAX_BEHIND 20000 /* 20 ms */
-
-static inline bool airtime_is_active(struct airtime_info *air_info, u64 now)
-{
- return air_info->last_scheduled >= now - AIRTIME_ACTIVE_DURATION;
-}
-
-static inline void airtime_set_active(struct airtime_sched_info *air_sched,
- struct airtime_info *air_info, u64 now)
-{
- air_info->last_scheduled = now;
- air_sched->last_schedule_activity = now;
- list_move_tail(&air_info->list, &air_sched->active_list);
-}
-
-static inline bool airtime_catchup_v_t(struct airtime_sched_info *air_sched,
- u64 v_t, u64 now)
-{
- air_sched->v_t = v_t;
- return true;
-}
-
-static inline void init_airtime_info(struct airtime_info *air_info,
- struct airtime_sched_info *air_sched)
-{
- atomic_set(&air_info->aql_tx_pending, 0);
- air_info->aql_limit_low = air_sched->aql_txq_limit_low;
- air_info->aql_limit_high = air_sched->aql_txq_limit_high;
- airtime_weight_set(air_info, IEEE80211_DEFAULT_AIRTIME_WEIGHT);
- INIT_LIST_HEAD(&air_info->list);
-}
-
-static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr)
-{
- return ether_addr_equal(raddr, addr) ||
- is_broadcast_ether_addr(raddr);
-}
-
static inline bool
ieee80211_have_rx_timestamp(struct ieee80211_rx_status *status)
{
@@ -1818,7 +1693,11 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
int ieee80211_hw_config(struct ieee80211_local *local, u32 changed);
void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx);
void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
- u32 changed);
+ u64 changed);
+void ieee80211_vif_cfg_change_notify(struct ieee80211_sub_if_data *sdata,
+ u64 changed);
+void ieee80211_link_info_change_notify(struct ieee80211_sub_if_data *sdata,
+ int link_id, u64 changed);
void ieee80211_configure_filter(struct ieee80211_local *local);
u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata);
@@ -1980,6 +1859,9 @@ void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata);
int ieee80211_add_virtual_monitor(struct ieee80211_local *local);
void ieee80211_del_virtual_monitor(struct ieee80211_local *local);
+int ieee80211_vif_set_links(struct ieee80211_sub_if_data *sdata,
+ u16 new_links);
+
bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata);
void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata,
bool update_bss);
@@ -2010,7 +1892,6 @@ struct sk_buff *
ieee80211_build_data_template(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, u32 info_flags);
void ieee80211_tx_monitor(struct ieee80211_local *local, struct sk_buff *skb,
- struct ieee80211_supported_band *sband,
int retry_count, int shift, bool send_to_cooked,
struct ieee80211_tx_status *status);
@@ -2024,14 +1905,6 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
u64 *cookie);
int ieee80211_probe_mesh_link(struct wiphy *wiphy, struct net_device *dev,
const u8 *buf, size_t len);
-void ieee80211_resort_txq(struct ieee80211_hw *hw,
- struct ieee80211_txq *txq);
-void ieee80211_unschedule_txq(struct ieee80211_hw *hw,
- struct ieee80211_txq *txq,
- bool purge);
-void ieee80211_update_airtime_weight(struct ieee80211_local *local,
- struct airtime_sched_info *air_sched,
- u64 now, bool force);
/* HT */
void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
@@ -2039,7 +1912,7 @@ void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
struct ieee80211_supported_band *sband,
const struct ieee80211_ht_cap *ht_cap_ie,
- struct sta_info *sta);
+ struct link_sta_info *link_sta);
void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
const u8 *da, u16 tid,
u16 initiator, u16 reason_code);
@@ -2095,27 +1968,31 @@ void
ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
struct ieee80211_supported_band *sband,
const struct ieee80211_vht_cap *vht_cap_ie,
- struct sta_info *sta);
-enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta);
-enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta);
-void ieee80211_sta_set_rx_nss(struct sta_info *sta);
+ struct link_sta_info *link_sta);
+enum ieee80211_sta_rx_bandwidth
+ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta);
+enum ieee80211_sta_rx_bandwidth
+ieee80211_sta_cur_vht_bw(struct link_sta_info *link_sta);
+void ieee80211_sta_set_rx_nss(struct link_sta_info *link_sta);
enum ieee80211_sta_rx_bandwidth
ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width);
-enum nl80211_chan_width ieee80211_sta_cap_chan_bw(struct sta_info *sta);
+enum nl80211_chan_width
+ieee80211_sta_cap_chan_bw(struct link_sta_info *link_sta);
void ieee80211_process_mu_groups(struct ieee80211_sub_if_data *sdata,
+ unsigned int link_id,
struct ieee80211_mgmt *mgmt);
u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
- struct sta_info *sta, u8 opmode,
- enum nl80211_band band);
+ struct link_sta_info *sta,
+ u8 opmode, enum nl80211_band band);
void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
- struct sta_info *sta, u8 opmode,
- enum nl80211_band band);
+ struct link_sta_info *sta,
+ u8 opmode, enum nl80211_band band);
void ieee80211_apply_vhtcap_overrides(struct ieee80211_sub_if_data *sdata,
struct ieee80211_sta_vht_cap *vht_cap);
void ieee80211_get_vht_mask_from_cap(__le16 vht_cap,
u16 vht_mask[NL80211_VHT_NSS_MAX]);
enum nl80211_chan_width
-ieee80211_sta_rx_bw_to_chan_width(struct sta_info *sta);
+ieee80211_sta_rx_bw_to_chan_width(struct link_sta_info *sta);
/* HE */
void
@@ -2123,7 +2000,7 @@ ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata,
struct ieee80211_supported_band *sband,
const u8 *he_cap_ie, u8 he_cap_len,
const struct ieee80211_he_6ghz_capa *he_6ghz_capa,
- struct sta_info *sta);
+ struct link_sta_info *link_sta);
void
ieee80211_he_spr_ie_to_bss_conf(struct ieee80211_vif *vif,
const struct ieee80211_he_spr *he_spr_ie_elem);
@@ -2219,23 +2096,8 @@ ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
rcu_read_unlock();
}
-static inline void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb, int tid)
-{
- struct ieee80211_chanctx_conf *chanctx_conf;
-
- rcu_read_lock();
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
- if (WARN_ON(!chanctx_conf)) {
- rcu_read_unlock();
- kfree_skb(skb);
- return;
- }
-
- __ieee80211_tx_skb_tid_band(sdata, skb, tid,
- chanctx_conf->def.chan->band);
- rcu_read_unlock();
-}
+void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb, int tid);
static inline void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
@@ -2392,8 +2254,10 @@ u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata,
struct ieee802_11_elems *elems,
enum nl80211_band band, u32 *basic_rates);
int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata,
+ unsigned int link_id,
enum ieee80211_smps_mode smps_mode);
-void ieee80211_recalc_smps(struct ieee80211_sub_if_data *sdata);
+void ieee80211_recalc_smps(struct ieee80211_sub_if_data *sdata,
+ unsigned int link_id);
void ieee80211_recalc_min_chandef(struct ieee80211_sub_if_data *sdata);
size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset);
@@ -2447,26 +2311,26 @@ bool ieee80211_chandef_s1g_oper(const struct ieee80211_s1g_oper_ie *oper,
u32 ieee80211_chandef_downgrade(struct cfg80211_chan_def *c);
int __must_check
-ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
- const struct cfg80211_chan_def *chandef,
- enum ieee80211_chanctx_mode mode);
+ieee80211_link_use_channel(struct ieee80211_link_data *link,
+ const struct cfg80211_chan_def *chandef,
+ enum ieee80211_chanctx_mode mode);
int __must_check
-ieee80211_vif_reserve_chanctx(struct ieee80211_sub_if_data *sdata,
- const struct cfg80211_chan_def *chandef,
- enum ieee80211_chanctx_mode mode,
- bool radar_required);
+ieee80211_link_reserve_chanctx(struct ieee80211_link_data *link,
+ const struct cfg80211_chan_def *chandef,
+ enum ieee80211_chanctx_mode mode,
+ bool radar_required);
int __must_check
-ieee80211_vif_use_reserved_context(struct ieee80211_sub_if_data *sdata);
-int ieee80211_vif_unreserve_chanctx(struct ieee80211_sub_if_data *sdata);
+ieee80211_link_use_reserved_context(struct ieee80211_link_data *link);
+int ieee80211_link_unreserve_chanctx(struct ieee80211_link_data *link);
int __must_check
-ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata,
- const struct cfg80211_chan_def *chandef,
- u32 *changed);
-void ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata);
-void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata);
-void ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
- bool clear);
+ieee80211_link_change_bandwidth(struct ieee80211_link_data *link,
+ const struct cfg80211_chan_def *chandef,
+ u32 *changed);
+void ieee80211_link_release_channel(struct ieee80211_link_data *link);
+void ieee80211_link_vlan_copy_chanctx(struct ieee80211_link_data *link);
+void ieee80211_link_copy_chanctx_to_vlans(struct ieee80211_link_data *link,
+ bool clear);
int ieee80211_chanctx_refcount(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx);
@@ -2483,14 +2347,6 @@ void ieee80211_dfs_radar_detected_work(struct work_struct *work);
int ieee80211_send_action_csa(struct ieee80211_sub_if_data *sdata,
struct cfg80211_csa_settings *csa_settings);
-bool ieee80211_cs_valid(const struct ieee80211_cipher_scheme *cs);
-bool ieee80211_cs_list_valid(const struct ieee80211_cipher_scheme *cs, int n);
-const struct ieee80211_cipher_scheme *
-ieee80211_cs_get(struct ieee80211_local *local, u32 cipher,
- enum nl80211_iftype iftype);
-int ieee80211_cs_headroom(struct ieee80211_local *local,
- struct cfg80211_crypto_settings *crypto,
- enum nl80211_iftype iftype);
void ieee80211_recalc_dtim(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata);
int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
@@ -2555,5 +2411,6 @@ ieee80211_eht_cap_ie_to_sta_eht_cap(struct ieee80211_sub_if_data *sdata,
struct ieee80211_supported_band *sband,
const u8 *he_cap_ie, u8 he_cap_len,
const struct ieee80211_eht_cap_elem *eht_cap_ie_elem,
- u8 eht_cap_len, struct sta_info *sta);
+ u8 eht_cap_len,
+ struct link_sta_info *link_sta);
#endif /* IEEE80211_I_H */
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 15a73b7fdd75..d5e904bff624 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -8,7 +8,7 @@
* Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (c) 2016 Intel Deutschland GmbH
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#include <linux/slab.h>
#include <linux/kernel.h>
@@ -51,7 +51,7 @@ bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata)
int power;
rcu_read_lock();
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+ chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
if (!chanctx_conf) {
rcu_read_unlock();
return false;
@@ -60,11 +60,11 @@ bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata)
power = ieee80211_chandef_max_power(&chanctx_conf->def);
rcu_read_unlock();
- if (sdata->user_power_level != IEEE80211_UNSET_POWER_LEVEL)
- power = min(power, sdata->user_power_level);
+ if (sdata->deflink.user_power_level != IEEE80211_UNSET_POWER_LEVEL)
+ power = min(power, sdata->deflink.user_power_level);
- if (sdata->ap_power_level != IEEE80211_UNSET_POWER_LEVEL)
- power = min(power, sdata->ap_power_level);
+ if (sdata->deflink.ap_power_level != IEEE80211_UNSET_POWER_LEVEL)
+ power = min(power, sdata->deflink.ap_power_level);
if (power != sdata->vif.bss_conf.txpower) {
sdata->vif.bss_conf.txpower = power;
@@ -80,7 +80,8 @@ void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata,
{
if (__ieee80211_recalc_txpower(sdata) ||
(update_bss && ieee80211_sdata_running(sdata)))
- ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER);
+ ieee80211_link_info_change_notify(sdata, 0,
+ BSS_CHANGED_TXPOWER);
}
static u32 __ieee80211_idle_off(struct ieee80211_local *local)
@@ -219,8 +220,10 @@ static int ieee80211_change_mac(struct net_device *dev, void *addr)
ret = eth_mac_addr(dev, sa);
- if (ret == 0)
+ if (ret == 0) {
memcpy(sdata->vif.addr, sa->sa_data, ETH_ALEN);
+ ether_addr_copy(sdata->vif.bss_conf.addr, sdata->vif.addr);
+ }
return ret;
}
@@ -275,7 +278,7 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
* will not add another interface while any channel
* switch is active.
*/
- if (nsdata->vif.csa_active)
+ if (nsdata->vif.bss_conf.csa_active)
return -EBUSY;
/*
@@ -450,29 +453,34 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
cancel_work_sync(&local->dynamic_ps_enable_work);
cancel_work_sync(&sdata->recalc_smps);
+
sdata_lock(sdata);
+ WARN(sdata->vif.valid_links,
+ "destroying interface with valid links 0x%04x\n",
+ sdata->vif.valid_links);
+
mutex_lock(&local->mtx);
- sdata->vif.csa_active = false;
+ sdata->vif.bss_conf.csa_active = false;
if (sdata->vif.type == NL80211_IFTYPE_STATION)
- sdata->u.mgd.csa_waiting_bcn = false;
- if (sdata->csa_block_tx) {
+ sdata->deflink.u.mgd.csa_waiting_bcn = false;
+ if (sdata->deflink.csa_block_tx) {
ieee80211_wake_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
- sdata->csa_block_tx = false;
+ sdata->deflink.csa_block_tx = false;
}
mutex_unlock(&local->mtx);
sdata_unlock(sdata);
- cancel_work_sync(&sdata->csa_finalize_work);
- cancel_work_sync(&sdata->color_change_finalize_work);
+ cancel_work_sync(&sdata->deflink.csa_finalize_work);
+ cancel_work_sync(&sdata->deflink.color_change_finalize_work);
- cancel_delayed_work_sync(&sdata->dfs_cac_timer_work);
+ cancel_delayed_work_sync(&sdata->deflink.dfs_cac_timer_work);
if (sdata->wdev.cac_started) {
chandef = sdata->vif.bss_conf.chandef;
WARN_ON(local->suspended);
mutex_lock(&local->mtx);
- ieee80211_vif_release_channel(sdata);
+ ieee80211_link_release_channel(sdata->link[0]);
mutex_unlock(&local->mtx);
cfg80211_cac_event(sdata->dev, &chandef,
NL80211_RADAR_CAC_ABORTED,
@@ -504,7 +512,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
mutex_lock(&local->mtx);
list_del(&sdata->u.vlan.list);
mutex_unlock(&local->mtx);
- RCU_INIT_POINTER(sdata->vif.chanctx_conf, NULL);
+ RCU_INIT_POINTER(sdata->vif.bss_conf.chanctx_conf, NULL);
/* see comment in the default case below */
ieee80211_free_keys(sdata, true);
/* no need to tell driver */
@@ -833,7 +841,7 @@ static int ieee80211_netdev_fill_forward_path(struct net_device_path_ctx *ctx,
}
}
- sta = sta_info_get(sdata, sdata->u.mgd.bssid);
+ sta = sta_info_get(sdata, sdata->deflink.u.mgd.bssid);
break;
default:
goto out;
@@ -1013,6 +1021,65 @@ static void ieee80211_set_default_queues(struct ieee80211_sub_if_data *sdata)
sdata->vif.cab_queue = IEEE80211_INVAL_HW_QUEUE;
}
+static void ieee80211_link_init(struct ieee80211_sub_if_data *sdata,
+ int link_id,
+ struct ieee80211_link_data *link,
+ struct ieee80211_bss_conf *link_conf)
+{
+ bool deflink = link_id < 0;
+
+ if (link_id < 0)
+ link_id = 0;
+
+ sdata->vif.link_conf[link_id] = link_conf;
+ sdata->link[link_id] = link;
+
+ link->sdata = sdata;
+ link->link_id = link_id;
+
+ INIT_WORK(&link->csa_finalize_work,
+ ieee80211_csa_finalize_work);
+ INIT_WORK(&link->color_change_finalize_work,
+ ieee80211_color_change_finalize_work);
+ INIT_LIST_HEAD(&link->assigned_chanctx_list);
+ INIT_LIST_HEAD(&link->reserved_chanctx_list);
+ INIT_DELAYED_WORK(&link->dfs_cac_timer_work,
+ ieee80211_dfs_cac_timer_work);
+
+ if (!deflink) {
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_AP:
+ ether_addr_copy(link_conf->addr,
+ sdata->wdev.links[link_id].addr);
+ WARN_ON(!(sdata->wdev.valid_links & BIT(link_id)));
+ break;
+ case NL80211_IFTYPE_STATION:
+ eth_random_addr(link_conf->addr);
+ ether_addr_copy(sdata->wdev.links[link_id].addr,
+ link_conf->addr);
+ break;
+ default:
+ WARN_ON(1);
+ }
+ }
+}
+
+static void ieee80211_sdata_init(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata)
+{
+ sdata->local = local;
+
+ /*
+ * Initialize the default link, so we can use link_id 0 for non-MLD,
+ * and that continues to work for non-MLD-aware drivers that use just
+ * vif.bss_conf instead of vif.link_conf.
+ *
+ * Note that we never change this, so if link ID 0 isn't used in an
+ * MLD connection, we get a separate allocation for it.
+ */
+ ieee80211_link_init(sdata, -1, &sdata->deflink, &sdata->vif.bss_conf);
+}
+
int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
{
struct ieee80211_sub_if_data *sdata;
@@ -1032,13 +1099,12 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
return -ENOMEM;
/* set up data */
- sdata->local = local;
sdata->vif.type = NL80211_IFTYPE_MONITOR;
snprintf(sdata->name, IFNAMSIZ, "%s-monitor",
wiphy_name(local->hw.wiphy));
sdata->wdev.iftype = NL80211_IFTYPE_MONITOR;
- sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM;
+ ieee80211_sdata_init(local, sdata);
ieee80211_set_default_queues(sdata);
@@ -1062,8 +1128,8 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
mutex_unlock(&local->iflist_mtx);
mutex_lock(&local->mtx);
- ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef,
- IEEE80211_CHANCTX_EXCLUSIVE);
+ ret = ieee80211_link_use_channel(sdata->link[0], &local->monitor_chandef,
+ IEEE80211_CHANCTX_EXCLUSIVE);
mutex_unlock(&local->mtx);
if (ret) {
mutex_lock(&local->iflist_mtx);
@@ -1107,7 +1173,7 @@ void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
synchronize_net();
mutex_lock(&local->mtx);
- ieee80211_vif_release_channel(sdata);
+ ieee80211_link_release_channel(sdata->link[0]);
mutex_unlock(&local->mtx);
drv_remove_interface(local, sdata);
@@ -1212,8 +1278,8 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP_VLAN:
/* no need to tell driver, but set carrier and chanctx */
- if (rtnl_dereference(sdata->bss->beacon)) {
- ieee80211_vif_vlan_copy_chanctx(sdata);
+ if (sdata->bss->active) {
+ ieee80211_link_vlan_copy_chanctx(sdata->link[0]);
netif_carrier_on(dev);
ieee80211_set_vif_encap_ops(sdata);
} else {
@@ -1285,7 +1351,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
sdata->vif.type != NL80211_IFTYPE_NAN)
changed |= ieee80211_reset_erp_info(sdata);
- ieee80211_bss_info_change_notify(sdata, changed);
+ ieee80211_link_info_change_notify(sdata, 0, changed);
switch (sdata->vif.type) {
case NL80211_IFTYPE_STATION:
@@ -1461,14 +1527,15 @@ static void ieee80211_iface_process_skb(struct ieee80211_local *local,
sta = sta_info_get_bss(sdata, mgmt->sa);
if (sta)
- ieee80211_vht_handle_opmode(sdata, sta, opmode,
- band);
+ ieee80211_vht_handle_opmode(sdata,
+ &sta->deflink,
+ opmode, band);
mutex_unlock(&local->sta_mtx);
break;
}
case WLAN_VHT_ACTION_GROUPID_MGMT:
- ieee80211_process_mu_groups(sdata, mgmt);
+ ieee80211_process_mu_groups(sdata, 0, mgmt);
break;
default:
WARN_ON(1);
@@ -1622,7 +1689,7 @@ static void ieee80211_recalc_smps_work(struct work_struct *work)
struct ieee80211_sub_if_data *sdata =
container_of(work, struct ieee80211_sub_if_data, recalc_smps);
- ieee80211_recalc_smps(sdata);
+ ieee80211_recalc_smps(sdata, 0);
}
/*
@@ -1634,8 +1701,9 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
static const u8 bssid_wildcard[ETH_ALEN] = {0xff, 0xff, 0xff,
0xff, 0xff, 0xff};
- /* clear type-dependent union */
+ /* clear type-dependent unions */
memset(&sdata->u, 0, sizeof(sdata->u));
+ memset(&sdata->deflink.u, 0, sizeof(sdata->deflink.u));
/* and set some type-dependent values */
sdata->vif.type = type;
@@ -1646,8 +1714,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
sdata->control_port_no_encrypt = false;
sdata->control_port_over_nl80211 = false;
sdata->control_port_no_preauth = false;
- sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM;
- sdata->vif.bss_conf.idle = true;
+ sdata->vif.cfg.idle = true;
sdata->vif.bss_conf.txpower = INT_MIN; /* unset */
sdata->noack_map = 0;
@@ -1662,10 +1729,6 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
skb_queue_head_init(&sdata->status_queue);
INIT_WORK(&sdata->work, ieee80211_iface_work);
INIT_WORK(&sdata->recalc_smps, ieee80211_recalc_smps_work);
- INIT_WORK(&sdata->csa_finalize_work, ieee80211_csa_finalize_work);
- INIT_WORK(&sdata->color_change_finalize_work, ieee80211_color_change_finalize_work);
- INIT_LIST_HEAD(&sdata->assigned_chanctx_list);
- INIT_LIST_HEAD(&sdata->reserved_chanctx_list);
switch (type) {
case NL80211_IFTYPE_P2P_GO:
@@ -1684,7 +1747,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
sdata->vif.p2p = true;
fallthrough;
case NL80211_IFTYPE_STATION:
- sdata->vif.bss_conf.bssid = sdata->u.mgd.bssid;
+ sdata->vif.bss_conf.bssid = sdata->deflink.u.mgd.bssid;
ieee80211_sta_setup_sdata(sdata);
break;
case NL80211_IFTYPE_OCB:
@@ -1737,6 +1800,10 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
if (!local->ops->change_interface)
return -EBUSY;
+ /* for now, don't support changing while links exist */
+ if (sdata->vif.valid_links)
+ return -EBUSY;
+
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP:
if (!list_empty(&sdata->u.ap.vlans))
@@ -1999,6 +2066,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
strlcpy(sdata->name, name, IFNAMSIZ);
ieee80211_assign_perm_addr(local, wdev->address, type);
memcpy(sdata->vif.addr, wdev->address, ETH_ALEN);
+ ether_addr_copy(sdata->vif.bss_conf.addr, sdata->vif.addr);
} else {
int size = ALIGN(sizeof(*sdata) + local->hw.vif_data_size,
sizeof(void *));
@@ -2063,6 +2131,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
sdata = netdev_priv(ndev);
ndev->ieee80211_ptr = &sdata->wdev;
memcpy(sdata->vif.addr, ndev->dev_addr, ETH_ALEN);
+ ether_addr_copy(sdata->vif.bss_conf.addr, sdata->vif.addr);
memcpy(sdata->name, ndev->name, IFNAMSIZ);
if (txq_size) {
@@ -2075,14 +2144,13 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
/* initialise type-independent data */
sdata->wdev.wiphy = local->hw.wiphy;
- sdata->local = local;
+
+ ieee80211_sdata_init(local, sdata);
ieee80211_init_frag_cache(&sdata->frags);
INIT_LIST_HEAD(&sdata->key_list);
- INIT_DELAYED_WORK(&sdata->dfs_cac_timer_work,
- ieee80211_dfs_cac_timer_work);
INIT_DELAYED_WORK(&sdata->dec_tailroom_needed_wk,
ieee80211_delayed_tailroom_dec);
@@ -2110,15 +2178,10 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
}
}
- for (i = 0; i < IEEE80211_NUM_ACS; i++)
- init_airtime_info(&sdata->airtime[i], &local->airtime[i]);
-
ieee80211_set_default_queues(sdata);
- sdata->ap_power_level = IEEE80211_UNSET_POWER_LEVEL;
- sdata->user_power_level = local->user_power_level;
-
- sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM;
+ sdata->deflink.ap_power_level = IEEE80211_UNSET_POWER_LEVEL;
+ sdata->deflink.user_power_level = local->user_power_level;
/* setup type-dependent data */
ieee80211_setup_sdata(sdata, type);
@@ -2289,3 +2352,96 @@ void ieee80211_vif_dec_num_mcast(struct ieee80211_sub_if_data *sdata)
else if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
atomic_dec(&sdata->u.vlan.num_mcast_sta);
}
+
+int ieee80211_vif_set_links(struct ieee80211_sub_if_data *sdata,
+ u16 new_links)
+{
+ u16 old_links = sdata->vif.valid_links;
+ unsigned long add = new_links & ~old_links;
+ unsigned long rem = old_links & ~new_links;
+ unsigned int link_id;
+ int ret;
+ struct {
+ struct ieee80211_link_data data;
+ struct ieee80211_bss_conf conf;
+ } *links[IEEE80211_MLD_MAX_NUM_LINKS] = {}, *link;
+ struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS];
+ struct ieee80211_link_data *old_data[IEEE80211_MLD_MAX_NUM_LINKS];
+ bool use_deflink = old_links == 0; /* set for error case */
+
+ sdata_assert_lock(sdata);
+
+ if (old_links == new_links)
+ return 0;
+
+ /* allocate new link structures first */
+ for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) {
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ if (!link) {
+ ret = -ENOMEM;
+ goto free;
+ }
+ links[link_id] = link;
+ }
+
+ /* keep track of the old pointers for the driver */
+ BUILD_BUG_ON(sizeof(old) != sizeof(sdata->vif.link_conf));
+ memcpy(old, sdata->vif.link_conf, sizeof(old));
+ /* and for us in error cases */
+ BUILD_BUG_ON(sizeof(old_data) != sizeof(sdata->link));
+ memcpy(old_data, sdata->link, sizeof(old_data));
+
+ /* link them into data structures */
+ for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) {
+ WARN_ON(!use_deflink &&
+ sdata->link[link_id] == &sdata->deflink);
+
+ link = links[link_id];
+ ieee80211_link_init(sdata, link_id, &link->data, &link->conf);
+ }
+
+ for_each_set_bit(link_id, &rem, IEEE80211_MLD_MAX_NUM_LINKS) {
+ sdata->link[link_id] = NULL;
+ sdata->vif.link_conf[link_id] = NULL;
+ }
+
+ sdata->vif.valid_links = new_links;
+
+ /* tell the driver */
+ ret = drv_change_vif_links(sdata->local, sdata,
+ old_links, new_links,
+ old);
+ if (ret) {
+ /* restore config */
+ memcpy(sdata->link, old_data, sizeof(old_data));
+ memcpy(sdata->vif.link_conf, old, sizeof(old));
+ sdata->vif.valid_links = old_links;
+ /* and free the newly allocated links */
+ goto free;
+ }
+
+ /* use deflink/bss_conf again if and only if there are no more links */
+ use_deflink = new_links == 0;
+
+ /* now use this to free the old links */
+ memset(links, 0, sizeof(links));
+ for_each_set_bit(link_id, &rem, IEEE80211_MLD_MAX_NUM_LINKS) {
+ if (sdata->link[link_id] == &sdata->deflink)
+ continue;
+ /*
+ * we must have allocated the data through this path so
+ * we know we can free both at the same time
+ */
+ links[link_id] = container_of(sdata->link[link_id],
+ typeof(*links[link_id]),
+ data);
+ }
+
+free:
+ for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++)
+ kfree(links[link_id]);
+ if (use_deflink)
+ ieee80211_link_init(sdata, -1, &sdata->deflink,
+ &sdata->vif.bss_conf);
+ return ret;
+}
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 0fcf8aebedc4..6befb578ed9e 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -6,7 +6,7 @@
* Copyright 2007-2008 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright 2018-2020 Intel Corporation
+ * Copyright 2018-2020, 2022 Intel Corporation
*/
#include <linux/if_ether.h>
@@ -351,8 +351,11 @@ static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata,
assert_key_lock(sdata->local);
- if (idx >= 0 && idx < NUM_DEFAULT_KEYS)
+ if (idx >= 0 && idx < NUM_DEFAULT_KEYS) {
key = key_mtx_dereference(sdata->local, sdata->keys[idx]);
+ if (!key)
+ key = key_mtx_dereference(sdata->local, sdata->deflink.gtk[idx]);
+ }
if (uni) {
rcu_assign_pointer(sdata->default_unicast_key, key);
@@ -362,7 +365,7 @@ static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata,
}
if (multi)
- rcu_assign_pointer(sdata->default_multicast_key, key);
+ rcu_assign_pointer(sdata->deflink.default_multicast_key, key);
ieee80211_debugfs_key_update_default(sdata);
}
@@ -384,9 +387,10 @@ __ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata, int idx)
if (idx >= NUM_DEFAULT_KEYS &&
idx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
- key = key_mtx_dereference(sdata->local, sdata->keys[idx]);
+ key = key_mtx_dereference(sdata->local,
+ sdata->deflink.gtk[idx]);
- rcu_assign_pointer(sdata->default_mgmt_key, key);
+ rcu_assign_pointer(sdata->deflink.default_mgmt_key, key);
ieee80211_debugfs_key_update_default(sdata);
}
@@ -409,9 +413,10 @@ __ieee80211_set_default_beacon_key(struct ieee80211_sub_if_data *sdata, int idx)
if (idx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS &&
idx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS +
NUM_DEFAULT_BEACON_KEYS)
- key = key_mtx_dereference(sdata->local, sdata->keys[idx]);
+ key = key_mtx_dereference(sdata->local,
+ sdata->deflink.gtk[idx]);
- rcu_assign_pointer(sdata->default_beacon_key, key);
+ rcu_assign_pointer(sdata->deflink.default_beacon_key, key);
ieee80211_debugfs_key_update_default(sdata);
}
@@ -433,13 +438,25 @@ static int ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
int idx;
int ret = 0;
bool defunikey, defmultikey, defmgmtkey, defbeaconkey;
+ bool is_wep;
/* caller must provide at least one old/new */
if (WARN_ON(!new && !old))
return 0;
- if (new)
+ if (new) {
+ idx = new->conf.keyidx;
list_add_tail_rcu(&new->list, &sdata->key_list);
+ is_wep = new->conf.cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ new->conf.cipher == WLAN_CIPHER_SUITE_WEP104;
+ } else {
+ idx = old->conf.keyidx;
+ is_wep = old->conf.cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ old->conf.cipher == WLAN_CIPHER_SUITE_WEP104;
+ }
+
+ if ((is_wep || pairwise) && idx >= NUM_DEFAULT_KEYS)
+ return -EINVAL;
WARN_ON(new && old && new->conf.keyidx != old->conf.keyidx);
@@ -451,8 +468,6 @@ static int ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
}
if (old) {
- idx = old->conf.keyidx;
-
if (old->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
ieee80211_key_disable_hw_accel(old);
@@ -460,8 +475,6 @@ static int ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
ret = ieee80211_key_enable_hw_accel(new);
}
} else {
- /* new must be provided in case old is not */
- idx = new->conf.keyidx;
if (!new->local->wowlan)
ret = ieee80211_key_enable_hw_accel(new);
}
@@ -490,13 +503,13 @@ static int ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
sdata->default_unicast_key);
defmultikey = old &&
old == key_mtx_dereference(sdata->local,
- sdata->default_multicast_key);
+ sdata->deflink.default_multicast_key);
defmgmtkey = old &&
old == key_mtx_dereference(sdata->local,
- sdata->default_mgmt_key);
+ sdata->deflink.default_mgmt_key);
defbeaconkey = old &&
old == key_mtx_dereference(sdata->local,
- sdata->default_beacon_key);
+ sdata->deflink.default_beacon_key);
if (defunikey && !new)
__ieee80211_set_default_key(sdata, -1, true, false);
@@ -507,7 +520,11 @@ static int ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
if (defbeaconkey && !new)
__ieee80211_set_default_beacon_key(sdata, -1);
- rcu_assign_pointer(sdata->keys[idx], new);
+ if (is_wep || pairwise)
+ rcu_assign_pointer(sdata->keys[idx], new);
+ else
+ rcu_assign_pointer(sdata->deflink.gtk[idx], new);
+
if (defunikey && new)
__ieee80211_set_default_key(sdata, new->conf.keyidx,
true, false);
@@ -531,8 +548,7 @@ static int ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
struct ieee80211_key *
ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
const u8 *key_data,
- size_t seq_len, const u8 *seq,
- const struct ieee80211_cipher_scheme *cs)
+ size_t seq_len, const u8 *seq)
{
struct ieee80211_key *key;
int i, j, err;
@@ -675,21 +691,6 @@ ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
return ERR_PTR(err);
}
break;
- default:
- if (cs) {
- if (seq_len && seq_len != cs->pn_len) {
- kfree(key);
- return ERR_PTR(-EINVAL);
- }
-
- key->conf.iv_len = cs->hdr_len;
- key->conf.icv_len = cs->mic_len;
- for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++)
- for (j = 0; j < seq_len; j++)
- key->u.gen.rx_pn[i][j] =
- seq[seq_len - j - 1];
- key->flags |= KEY_FLAG_CIPHER_SCHEME;
- }
}
memcpy(key->conf.key, key_data, key_len);
INIT_LIST_HEAD(&key->list);
@@ -800,7 +801,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
struct sta_info *sta)
{
static atomic_t key_color = ATOMIC_INIT(0);
- struct ieee80211_key *old_key;
+ struct ieee80211_key *old_key = NULL;
int idx = key->conf.keyidx;
bool pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE;
/*
@@ -829,7 +830,12 @@ int ieee80211_key_link(struct ieee80211_key *key,
old_key = key_mtx_dereference(sdata->local,
sta->deflink.gtk[idx]);
} else {
- old_key = key_mtx_dereference(sdata->local, sdata->keys[idx]);
+ if (idx < NUM_DEFAULT_KEYS)
+ old_key = key_mtx_dereference(sdata->local,
+ sdata->keys[idx]);
+ if (!old_key)
+ old_key = key_mtx_dereference(sdata->local,
+ sdata->deflink.gtk[idx]);
}
/* Non-pairwise keys must also not switch the cipher on rekey */
@@ -1294,7 +1300,7 @@ ieee80211_gtk_rekey_add(struct ieee80211_vif *vif,
key = ieee80211_key_alloc(keyconf->cipher, keyconf->keyidx,
keyconf->keylen, keyconf->key,
- 0, NULL, NULL);
+ 0, NULL);
if (IS_ERR(key))
return ERR_CAST(key);
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index 1e326c89d721..e994dcea1ce3 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -2,7 +2,7 @@
/*
* Copyright 2002-2004, Instant802 Networks, Inc.
* Copyright 2005, Devicescape Software, Inc.
- * Copyright (C) 2019 Intel Corporation
+ * Copyright (C) 2019, 2022 Intel Corporation
*/
#ifndef IEEE80211_KEY_H
@@ -30,12 +30,10 @@ struct sta_info;
* @KEY_FLAG_UPLOADED_TO_HARDWARE: Indicates that this key is present
* in the hardware for TX crypto hardware acceleration.
* @KEY_FLAG_TAINTED: Key is tainted and packets should be dropped.
- * @KEY_FLAG_CIPHER_SCHEME: This key is for a hardware cipher scheme
*/
enum ieee80211_internal_key_flags {
KEY_FLAG_UPLOADED_TO_HARDWARE = BIT(0),
KEY_FLAG_TAINTED = BIT(1),
- KEY_FLAG_CIPHER_SCHEME = BIT(2),
};
enum ieee80211_internal_tkip_state {
@@ -140,8 +138,7 @@ struct ieee80211_key {
struct ieee80211_key *
ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
const u8 *key_data,
- size_t seq_len, const u8 *seq,
- const struct ieee80211_cipher_scheme *cs);
+ size_t seq_len, const u8 *seq);
/*
* Insert a key into data structures (sdata, sta if necessary)
* to make it used, free old key. On failure, also free the new key.
@@ -166,6 +163,8 @@ void ieee80211_reenable_keys(struct ieee80211_sub_if_data *sdata);
#define key_mtx_dereference(local, ref) \
rcu_dereference_protected(ref, lockdep_is_held(&((local)->key_mtx)))
+#define rcu_dereference_check_key_mtx(local, ref) \
+ rcu_dereference_check(ref, lockdep_is_held(&((local)->key_mtx)))
void ieee80211_delayed_tailroom_dec(struct work_struct *wk);
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 5a385d4146b9..c34f06039dda 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -5,7 +5,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#include <net/mac80211.h>
@@ -147,7 +147,7 @@ static u32 ieee80211_hw_conf_chan(struct ieee80211_local *local)
rcu_read_lock();
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
- if (!rcu_access_pointer(sdata->vif.chanctx_conf))
+ if (!rcu_access_pointer(sdata->vif.bss_conf.chanctx_conf))
continue;
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
continue;
@@ -175,7 +175,8 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
changed |= ieee80211_hw_conf_chan(local);
else
changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
- IEEE80211_CONF_CHANGE_POWER);
+ IEEE80211_CONF_CHANGE_POWER |
+ IEEE80211_CONF_CHANGE_SMPS);
if (changed && local->open_count) {
ret = drv_config(local, changed);
@@ -199,15 +200,91 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
return ret;
}
+#define BSS_CHANGED_VIF_CFG_FLAGS (BSS_CHANGED_ASSOC |\
+ BSS_CHANGED_IDLE |\
+ BSS_CHANGED_IBSS |\
+ BSS_CHANGED_ARP_FILTER |\
+ BSS_CHANGED_SSID)
+
void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
- u32 changed)
+ u64 changed)
+{
+ struct ieee80211_local *local = sdata->local;
+
+ might_sleep();
+
+ if (!changed || sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ return;
+
+ if (WARN_ON_ONCE(changed & (BSS_CHANGED_BEACON |
+ BSS_CHANGED_BEACON_ENABLED) &&
+ sdata->vif.type != NL80211_IFTYPE_AP &&
+ sdata->vif.type != NL80211_IFTYPE_ADHOC &&
+ sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
+ sdata->vif.type != NL80211_IFTYPE_OCB))
+ return;
+
+ if (WARN_ON_ONCE(sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE ||
+ sdata->vif.type == NL80211_IFTYPE_NAN ||
+ (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
+ !sdata->vif.bss_conf.mu_mimo_owner &&
+ !(changed & BSS_CHANGED_TXPOWER))))
+ return;
+
+ if (!check_sdata_in_driver(sdata))
+ return;
+
+ if (changed & BSS_CHANGED_VIF_CFG_FLAGS) {
+ u64 ch = changed & BSS_CHANGED_VIF_CFG_FLAGS;
+
+ trace_drv_vif_cfg_changed(local, sdata, changed);
+ if (local->ops->vif_cfg_changed)
+ local->ops->vif_cfg_changed(&local->hw, &sdata->vif, ch);
+ }
+
+ if (changed & ~BSS_CHANGED_VIF_CFG_FLAGS) {
+ u64 ch = changed & ~BSS_CHANGED_VIF_CFG_FLAGS;
+
+ /* FIXME: should be for each link */
+ trace_drv_link_info_changed(local, sdata, 0, changed);
+ if (local->ops->link_info_changed)
+ local->ops->link_info_changed(&local->hw, &sdata->vif,
+ 0, ch);
+ }
+
+ if (local->ops->bss_info_changed)
+ local->ops->bss_info_changed(&local->hw, &sdata->vif,
+ &sdata->vif.bss_conf, changed);
+ trace_drv_return_void(local);
+}
+
+void ieee80211_vif_cfg_change_notify(struct ieee80211_sub_if_data *sdata,
+ u64 changed)
{
struct ieee80211_local *local = sdata->local;
+ WARN_ON_ONCE(changed & ~BSS_CHANGED_VIF_CFG_FLAGS);
+
+ if (!changed || sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ return;
+
+ drv_vif_cfg_changed(local, sdata, changed);
+}
+
+void ieee80211_link_info_change_notify(struct ieee80211_sub_if_data *sdata,
+ int link_id, u64 changed)
+{
+ struct ieee80211_local *local = sdata->local;
+
+ WARN_ON_ONCE(changed & BSS_CHANGED_VIF_CFG_FLAGS);
+
if (!changed || sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
return;
- drv_bss_info_changed(local, sdata, &sdata->vif.bss_conf, changed);
+ if (!check_sdata_in_driver(sdata))
+ return;
+
+ drv_link_info_changed(local, sdata, link_id, changed);
}
u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata)
@@ -284,7 +361,7 @@ static void ieee80211_restart_work(struct work_struct *work)
* Then we can have a race...
*/
cancel_work_sync(&sdata->u.mgd.csa_connection_drop_work);
- if (sdata->vif.csa_active) {
+ if (sdata->vif.bss_conf.csa_active) {
sdata_lock(sdata);
ieee80211_sta_connection_lost(sdata,
WLAN_REASON_UNSPECIFIED,
@@ -349,7 +426,7 @@ static int ieee80211_ifa_changed(struct notifier_block *nb,
struct wireless_dev *wdev = ndev->ieee80211_ptr;
struct in_device *idev;
struct ieee80211_sub_if_data *sdata;
- struct ieee80211_bss_conf *bss_conf;
+ struct ieee80211_vif_cfg *vif_cfg;
struct ieee80211_if_managed *ifmgd;
int c = 0;
@@ -361,7 +438,7 @@ static int ieee80211_ifa_changed(struct notifier_block *nb,
return NOTIFY_DONE;
sdata = IEEE80211_DEV_TO_SUB_IF(ndev);
- bss_conf = &sdata->vif.bss_conf;
+ vif_cfg = &sdata->vif.cfg;
/* ARP filtering is only supported in managed mode */
if (sdata->vif.type != NL80211_IFTYPE_STATION)
@@ -374,21 +451,20 @@ static int ieee80211_ifa_changed(struct notifier_block *nb,
ifmgd = &sdata->u.mgd;
sdata_lock(sdata);
- /* Copy the addresses to the bss_conf list */
+ /* Copy the addresses to the vif config list */
ifa = rtnl_dereference(idev->ifa_list);
while (ifa) {
if (c < IEEE80211_BSS_ARP_ADDR_LIST_LEN)
- bss_conf->arp_addr_list[c] = ifa->ifa_address;
+ vif_cfg->arp_addr_list[c] = ifa->ifa_address;
ifa = rtnl_dereference(ifa->ifa_next);
c++;
}
- bss_conf->arp_addr_cnt = c;
+ vif_cfg->arp_addr_cnt = c;
/* Configure driver only if associated (which also implies it is up) */
if (ifmgd->associated)
- ieee80211_bss_info_change_notify(sdata,
- BSS_CHANGED_ARP_FILTER);
+ ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_ARP_FILTER);
sdata_unlock(sdata);
@@ -557,6 +633,10 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
if (WARN_ON(ops->sta_state && (ops->sta_add || ops->sta_remove)))
return NULL;
+ if (WARN_ON(!!ops->link_info_changed != !!ops->vif_cfg_changed ||
+ (ops->link_info_changed && ops->bss_info_changed)))
+ return NULL;
+
/* check all or no channel context operations exist */
i = !!ops->add_chanctx + !!ops->remove_chanctx +
!!ops->change_chanctx + !!ops->assign_vif_chanctx +
@@ -707,14 +787,12 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
spin_lock_init(&local->queue_stop_reason_lock);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- struct airtime_sched_info *air_sched = &local->airtime[i];
-
- air_sched->active_txqs = RB_ROOT_CACHED;
- INIT_LIST_HEAD(&air_sched->active_list);
- spin_lock_init(&air_sched->lock);
- air_sched->aql_txq_limit_low = IEEE80211_DEFAULT_AQL_TXQ_LIMIT_L;
- air_sched->aql_txq_limit_high =
+ INIT_LIST_HEAD(&local->active_txqs[i]);
+ spin_lock_init(&local->active_txq_lock[i]);
+ local->aql_txq_limit_low[i] = IEEE80211_DEFAULT_AQL_TXQ_LIMIT_L;
+ local->aql_txq_limit_high[i] =
IEEE80211_DEFAULT_AQL_TXQ_LIMIT_H;
+ atomic_set(&local->aql_ac_pending_airtime[i], 0);
}
local->airtime_flags = AIRTIME_USE_TX | AIRTIME_USE_RX;
@@ -778,7 +856,7 @@ static int ieee80211_init_cipher_suites(struct ieee80211_local *local)
{
bool have_wep = !fips_enabled; /* FIPS does not permit the use of RC4 */
bool have_mfp = ieee80211_hw_check(&local->hw, MFP_CAPABLE);
- int n_suites = 0, r = 0, w = 0;
+ int r = 0, w = 0;
u32 *suites;
static const u32 cipher_suites[] = {
/* keep WEP first, it may be removed below */
@@ -824,10 +902,9 @@ static int ieee80211_init_cipher_suites(struct ieee80211_local *local)
continue;
suites[w++] = suite;
}
- } else if (!local->hw.cipher_schemes) {
- /* If the driver doesn't have cipher schemes, there's nothing
- * else to do other than assign the (software supported and
- * perhaps offloaded) cipher suites.
+ } else {
+ /* assign the (software supported and perhaps offloaded)
+ * cipher suites
*/
local->hw.wiphy->cipher_suites = cipher_suites;
local->hw.wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
@@ -842,58 +919,6 @@ static int ieee80211_init_cipher_suites(struct ieee80211_local *local)
/* not dynamically allocated, so just return */
return 0;
- } else {
- const struct ieee80211_cipher_scheme *cs;
-
- cs = local->hw.cipher_schemes;
-
- /* Driver specifies cipher schemes only (but not cipher suites
- * including the schemes)
- *
- * We start counting ciphers defined by schemes, TKIP, CCMP,
- * CCMP-256, GCMP, and GCMP-256
- */
- n_suites = local->hw.n_cipher_schemes + 5;
-
- /* check if we have WEP40 and WEP104 */
- if (have_wep)
- n_suites += 2;
-
- /* check if we have AES_CMAC, BIP-CMAC-256, BIP-GMAC-128,
- * BIP-GMAC-256
- */
- if (have_mfp)
- n_suites += 4;
-
- suites = kmalloc_array(n_suites, sizeof(u32), GFP_KERNEL);
- if (!suites)
- return -ENOMEM;
-
- suites[w++] = WLAN_CIPHER_SUITE_CCMP;
- suites[w++] = WLAN_CIPHER_SUITE_CCMP_256;
- suites[w++] = WLAN_CIPHER_SUITE_TKIP;
- suites[w++] = WLAN_CIPHER_SUITE_GCMP;
- suites[w++] = WLAN_CIPHER_SUITE_GCMP_256;
-
- if (have_wep) {
- suites[w++] = WLAN_CIPHER_SUITE_WEP40;
- suites[w++] = WLAN_CIPHER_SUITE_WEP104;
- }
-
- if (have_mfp) {
- suites[w++] = WLAN_CIPHER_SUITE_AES_CMAC;
- suites[w++] = WLAN_CIPHER_SUITE_BIP_CMAC_256;
- suites[w++] = WLAN_CIPHER_SUITE_BIP_GMAC_128;
- suites[w++] = WLAN_CIPHER_SUITE_BIP_GMAC_256;
- }
-
- for (r = 0; r < local->hw.n_cipher_schemes; r++) {
- suites[w++] = cs[r].cipher;
- if (WARN_ON(cs[r].pn_len > IEEE80211_MAX_PN_LEN)) {
- kfree(suites);
- return -EINVAL;
- }
- }
}
local->hw.wiphy->cipher_suites = suites;
@@ -932,6 +957,48 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
(!local->ops->start_nan || !local->ops->stop_nan)))
return -EINVAL;
+ if (hw->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO) {
+ /*
+ * For drivers capable of doing MLO, assume modern driver
+ * or firmware facilities, so software doesn't have to do
+ * as much, e.g. monitoring beacons would be hard if we
+ * might not even know which link is active at which time.
+ */
+ if (WARN_ON(!local->use_chanctx))
+ return -EINVAL;
+
+ if (WARN_ON(!local->ops->link_info_changed))
+ return -EINVAL;
+
+ if (WARN_ON(!ieee80211_hw_check(hw, HAS_RATE_CONTROL)))
+ return -EINVAL;
+
+ if (WARN_ON(!ieee80211_hw_check(hw, AMPDU_AGGREGATION)))
+ return -EINVAL;
+
+ if (WARN_ON(ieee80211_hw_check(hw, HOST_BROADCAST_PS_BUFFERING)))
+ return -EINVAL;
+
+ if (WARN_ON(ieee80211_hw_check(hw, SUPPORTS_PS) &&
+ !ieee80211_hw_check(hw, SUPPORTS_DYNAMIC_PS)))
+ return -EINVAL;
+
+ if (WARN_ON(!ieee80211_hw_check(hw, MFP_CAPABLE)))
+ return -EINVAL;
+
+ if (WARN_ON(!ieee80211_hw_check(hw, CONNECTION_MONITOR)))
+ return -EINVAL;
+
+ if (WARN_ON(ieee80211_hw_check(hw, NEED_DTIM_BEFORE_ASSOC)))
+ return -EINVAL;
+
+ if (WARN_ON(ieee80211_hw_check(hw, TIMING_BEACON_ONLY)))
+ return -EINVAL;
+
+ if (WARN_ON(!ieee80211_hw_check(hw, AP_LINK_PS)))
+ return -EINVAL;
+ }
+
#ifdef CONFIG_PM
if (hw->wiphy->wowlan && (!local->ops->suspend || !local->ops->resume))
return -EINVAL;
@@ -1168,12 +1235,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
if (local->hw.wiphy->max_scan_ie_len)
local->hw.wiphy->max_scan_ie_len -= local->scan_ies_len;
- if (WARN_ON(!ieee80211_cs_list_valid(local->hw.cipher_schemes,
- local->hw.n_cipher_schemes))) {
- result = -EINVAL;
- goto fail_workqueue;
- }
-
result = ieee80211_init_cipher_suites(local);
if (result < 0)
goto fail_workqueue;
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 5275f4f32a78..13722a7f2254 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2008, 2009 open80211s Ltd.
- * Copyright (C) 2018 - 2021 Intel Corporation
+ * Copyright (C) 2018 - 2022 Intel Corporation
* Authors: Luis Carlos Cobo <luisca@cozybit.com>
* Javier Cardona <javier@cozybit.com>
*/
@@ -399,7 +399,7 @@ static int mesh_add_ds_params_ie(struct ieee80211_sub_if_data *sdata,
return -ENOMEM;
rcu_read_lock();
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+ chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
if (WARN_ON(!chanctx_conf)) {
rcu_read_unlock();
return -EINVAL;
@@ -455,7 +455,7 @@ int mesh_add_ht_oper_ie(struct ieee80211_sub_if_data *sdata,
u8 *pos;
rcu_read_lock();
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+ chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
if (WARN_ON(!chanctx_conf)) {
rcu_read_unlock();
return -EINVAL;
@@ -527,7 +527,7 @@ int mesh_add_vht_oper_ie(struct ieee80211_sub_if_data *sdata,
u8 *pos;
rcu_read_lock();
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+ chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
if (WARN_ON(!chanctx_conf)) {
rcu_read_unlock();
return -EINVAL;
@@ -820,7 +820,7 @@ ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh)
sdata = container_of(ifmsh, struct ieee80211_sub_if_data, u.mesh);
rcu_read_lock();
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+ chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
band = chanctx_conf->def.chan->band;
rcu_read_unlock();
@@ -1057,7 +1057,7 @@ int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
}
ieee80211_recalc_dtim(local, sdata);
- ieee80211_bss_info_change_notify(sdata, changed);
+ ieee80211_link_info_change_notify(sdata, 0, changed);
netif_carrier_on(sdata->dev);
return 0;
@@ -1081,7 +1081,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
sdata->vif.bss_conf.enable_beacon = false;
sdata->beacon_rate_set = false;
clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state);
- ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
+ ieee80211_link_info_change_notify(sdata, 0, BSS_CHANGED_BEACON_ENABLED);
/* remove beacon */
bcn = rcu_dereference_protected(ifmsh->beacon,
@@ -1357,7 +1357,7 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
rx_status);
if (ifmsh->csa_role != IEEE80211_MESH_CSA_ROLE_INIT &&
- !sdata->vif.csa_active)
+ !sdata->vif.bss_conf.csa_active)
ieee80211_mesh_process_chnswitch(sdata, elems, true);
}
@@ -1488,7 +1488,7 @@ static void mesh_rx_csa_frame(struct ieee80211_sub_if_data *sdata,
ifmsh->pre_value = pre_value;
- if (!sdata->vif.csa_active &&
+ if (!sdata->vif.bss_conf.csa_active &&
!ieee80211_mesh_process_chnswitch(sdata, elems, false)) {
mcsa_dbg(sdata, "Failed to process CSA action frame");
goto free;
@@ -1581,7 +1581,7 @@ static void mesh_bss_info_changed(struct ieee80211_sub_if_data *sdata)
if (ieee80211_mesh_rebuild_beacon(sdata))
return;
- ieee80211_bss_info_change_notify(sdata, changed);
+ ieee80211_link_info_change_notify(sdata, 0, changed);
}
void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 58ebdcd69d05..45e7c1b307bc 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2008, 2009 open80211s Ltd.
- * Copyright (C) 2019, 2021 Intel Corporation
+ * Copyright (C) 2019, 2021-2022 Intel Corporation
* Author: Luis Carlos Cobo <luisca@cozybit.com>
*/
@@ -247,13 +247,13 @@ int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
return -EAGAIN;
skb = dev_alloc_skb(local->tx_headroom +
- sdata->encrypt_headroom +
+ IEEE80211_ENCRYPT_HEADROOM +
IEEE80211_ENCRYPT_TAILROOM +
hdr_len +
2 + 15 /* PERR IE */);
if (!skb)
return -1;
- skb_reserve(skb, local->tx_headroom + sdata->encrypt_headroom);
+ skb_reserve(skb, local->tx_headroom + IEEE80211_ENCRYPT_HEADROOM);
mgmt = skb_put_zero(skb, hdr_len);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 42ba7424589e..d67011745048 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2008, 2009 open80211s Ltd.
- * Copyright (C) 2019, 2021 Intel Corporation
+ * Copyright (C) 2019, 2021-2022 Intel Corporation
* Author: Luis Carlos Cobo <luisca@cozybit.com>
*/
#include <linux/gfp.h>
@@ -438,16 +438,18 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
sta->sta.deflink.supp_rates[sband->band] = rates;
if (ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
- elems->ht_cap_elem, sta))
+ elems->ht_cap_elem,
+ &sta->deflink))
changed |= IEEE80211_RC_BW_CHANGED;
ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
- elems->vht_cap_elem, sta);
+ elems->vht_cap_elem,
+ &sta->deflink);
ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband, elems->he_cap,
elems->he_cap_len,
elems->he_6ghz_capa,
- sta);
+ &sta->deflink);
if (bw != sta->sta.deflink.bandwidth)
changed |= IEEE80211_RC_BW_CHANGED;
@@ -464,7 +466,7 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
if (!test_sta_flag(sta, WLAN_STA_RATE_CONTROL))
rate_control_rate_init(sta);
else
- rate_control_rate_update(local, sband, sta, changed);
+ rate_control_rate_update(local, sband, sta, 0, changed);
out:
spin_unlock_bh(&sta->mesh->plink_lock);
}
@@ -475,8 +477,7 @@ static int mesh_allocate_aid(struct ieee80211_sub_if_data *sdata)
unsigned long *aid_map;
int aid;
- aid_map = kcalloc(BITS_TO_LONGS(IEEE80211_MAX_AID + 1),
- sizeof(*aid_map), GFP_KERNEL);
+ aid_map = bitmap_zalloc(IEEE80211_MAX_AID + 1, GFP_KERNEL);
if (!aid_map)
return -ENOMEM;
@@ -489,7 +490,7 @@ static int mesh_allocate_aid(struct ieee80211_sub_if_data *sdata)
rcu_read_unlock();
aid = find_first_zero_bit(aid_map, IEEE80211_MAX_AID + 1);
- kfree(aid_map);
+ bitmap_free(aid_map);
if (aid > IEEE80211_MAX_AID)
return -ENOBUFS;
@@ -510,7 +511,7 @@ __mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *hw_addr)
if (aid < 0)
return NULL;
- sta = sta_info_alloc(sdata, hw_addr, GFP_KERNEL);
+ sta = sta_info_alloc(sdata, hw_addr, -1, GFP_KERNEL);
if (!sta)
return NULL;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 58d48dcae030..01a72d1fcfcc 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -8,7 +8,7 @@
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2021 Intel Corporation
+ * Copyright (C) 2018 - 2022 Intel Corporation
*/
#include <linux/delay.h>
@@ -459,7 +459,7 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
sdata_info(sdata,
"AP %pM changed bandwidth, new config is %d.%03d MHz, "
"width %d (%d.%03d/%d MHz)\n",
- ifmgd->bssid, chandef.chan->center_freq,
+ sdata->deflink.u.mgd.bssid, chandef.chan->center_freq,
chandef.chan->freq_offset, chandef.width,
chandef.center_freq1, chandef.freq1_offset,
chandef.center_freq2);
@@ -475,16 +475,16 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
!cfg80211_chandef_valid(&chandef)) {
sdata_info(sdata,
"AP %pM changed caps/bw in a way we can't support (0x%x/0x%x) - disconnect\n",
- ifmgd->bssid, flags, ifmgd->flags);
+ sdata->deflink.u.mgd.bssid, flags, ifmgd->flags);
return -EINVAL;
}
- ret = ieee80211_vif_change_bandwidth(sdata, &chandef, changed);
+ ret = ieee80211_link_change_bandwidth(&sdata->deflink, &chandef, changed);
if (ret) {
sdata_info(sdata,
"AP %pM changed bandwidth to incompatible one - disconnect\n",
- ifmgd->bssid);
+ sdata->deflink.u.mgd.bssid);
return ret;
}
@@ -624,7 +624,7 @@ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
struct ieee80211_sub_if_data *other;
list_for_each_entry_rcu(other, &local->interfaces, list) {
- if (other->vif.mu_mimo_owner) {
+ if (other->vif.bss_conf.mu_mimo_owner) {
disable_mu_mimo = true;
break;
}
@@ -632,7 +632,7 @@ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
if (disable_mu_mimo)
cap &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
else
- sdata->vif.mu_mimo_owner = true;
+ sdata->vif.bss_conf.mu_mimo_owner = true;
}
mask = IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
@@ -664,7 +664,7 @@ static void ieee80211_add_he_ie(struct ieee80211_sub_if_data *sdata,
bool reg_cap = false;
rcu_read_lock();
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+ chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
if (!WARN_ON_ONCE(!chanctx_conf))
reg_cap = cfg80211_chandef_usable(sdata->wdev.wiphy,
&chanctx_conf->def,
@@ -705,7 +705,7 @@ static void ieee80211_add_eht_ie(struct ieee80211_sub_if_data *sdata,
bool reg_cap = false;
rcu_read_lock();
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+ chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
if (!WARN_ON_ONCE(!chanctx_conf))
reg_cap = cfg80211_chandef_usable(sdata->wdev.wiphy,
&chanctx_conf->def,
@@ -766,7 +766,7 @@ static int ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
sdata_assert_lock(sdata);
rcu_read_lock();
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+ chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
if (WARN_ON(!chanctx_conf)) {
rcu_read_unlock();
return -EINVAL;
@@ -997,7 +997,7 @@ skip_rates:
if (sband->band != NL80211_BAND_6GHZ &&
!(ifmgd->flags & IEEE80211_STA_DISABLE_HT))
ieee80211_add_ht_ie(sdata, skb, assoc_data->ap_ht_param,
- sband, chan, sdata->smps_mode);
+ sband, chan, sdata->deflink.smps_mode);
/* if present, add any custom IEs that go before VHT */
if (assoc_data->ie_len) {
@@ -1201,9 +1201,9 @@ void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC |
IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
nullfunc->frame_control = fc;
- memcpy(nullfunc->addr1, sdata->u.mgd.bssid, ETH_ALEN);
+ memcpy(nullfunc->addr1, sdata->deflink.u.mgd.bssid, ETH_ALEN);
memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN);
- memcpy(nullfunc->addr3, sdata->u.mgd.bssid, ETH_ALEN);
+ memcpy(nullfunc->addr3, sdata->deflink.u.mgd.bssid, ETH_ALEN);
memcpy(nullfunc->addr4, sdata->vif.addr, ETH_ALEN);
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
@@ -1229,7 +1229,7 @@ static void ieee80211_chswitch_work(struct work_struct *work)
if (!ifmgd->associated)
goto out;
- if (!sdata->vif.csa_active)
+ if (!sdata->vif.bss_conf.csa_active)
goto out;
/*
@@ -1239,16 +1239,16 @@ static void ieee80211_chswitch_work(struct work_struct *work)
* completed successfully
*/
- if (sdata->reserved_chanctx) {
+ if (sdata->deflink.reserved_chanctx) {
/*
* with multi-vif csa driver may call ieee80211_csa_finish()
* many times while waiting for other interfaces to use their
* reservations
*/
- if (sdata->reserved_ready)
+ if (sdata->deflink.reserved_ready)
goto out;
- ret = ieee80211_vif_use_reserved_context(sdata);
+ ret = ieee80211_link_use_reserved_context(&sdata->deflink);
if (ret) {
sdata_info(sdata,
"failed to use reserved channel context, disconnecting (err=%d)\n",
@@ -1262,7 +1262,7 @@ static void ieee80211_chswitch_work(struct work_struct *work)
}
if (!cfg80211_chandef_identical(&sdata->vif.bss_conf.chandef,
- &sdata->csa_chandef)) {
+ &sdata->deflink.csa_chandef)) {
sdata_info(sdata,
"failed to finalize channel switch, disconnecting\n");
ieee80211_queue_work(&sdata->local->hw,
@@ -1270,7 +1270,7 @@ static void ieee80211_chswitch_work(struct work_struct *work)
goto out;
}
- ifmgd->csa_waiting_bcn = true;
+ sdata->deflink.u.mgd.csa_waiting_bcn = true;
ieee80211_sta_reset_beacon_monitor(sdata);
ieee80211_sta_reset_conn_monitor(sdata);
@@ -1289,21 +1289,21 @@ static void ieee80211_chswitch_post_beacon(struct ieee80211_sub_if_data *sdata)
sdata_assert_lock(sdata);
- WARN_ON(!sdata->vif.csa_active);
+ WARN_ON(!sdata->vif.bss_conf.csa_active);
- if (sdata->csa_block_tx) {
+ if (sdata->deflink.csa_block_tx) {
ieee80211_wake_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
- sdata->csa_block_tx = false;
+ sdata->deflink.csa_block_tx = false;
}
- sdata->vif.csa_active = false;
- ifmgd->csa_waiting_bcn = false;
+ sdata->vif.bss_conf.csa_active = false;
+ sdata->deflink.u.mgd.csa_waiting_bcn = false;
/*
* If the CSA IE is still present on the beacon after the switch,
* we need to consider it as a new CSA (possibly to self).
*/
- ifmgd->beacon_crc_valid = false;
+ sdata->deflink.u.mgd.beacon_crc_valid = false;
ret = drv_post_channel_switch(sdata);
if (ret) {
@@ -1314,7 +1314,8 @@ static void ieee80211_chswitch_post_beacon(struct ieee80211_sub_if_data *sdata)
return;
}
- cfg80211_ch_switch_notify(sdata->dev, &sdata->reserved_chandef);
+ cfg80211_ch_switch_notify(sdata->dev, &sdata->deflink.reserved_chandef,
+ 0);
}
void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success)
@@ -1353,15 +1354,15 @@ ieee80211_sta_abort_chanswitch(struct ieee80211_sub_if_data *sdata)
mutex_lock(&local->mtx);
mutex_lock(&local->chanctx_mtx);
- ieee80211_vif_unreserve_chanctx(sdata);
+ ieee80211_link_unreserve_chanctx(&sdata->deflink);
mutex_unlock(&local->chanctx_mtx);
- if (sdata->csa_block_tx)
+ if (sdata->deflink.csa_block_tx)
ieee80211_wake_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
- sdata->csa_block_tx = false;
- sdata->vif.csa_active = false;
+ sdata->deflink.csa_block_tx = false;
+ sdata->vif.bss_conf.csa_active = false;
mutex_unlock(&local->mtx);
@@ -1376,7 +1377,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- struct cfg80211_bss *cbss = ifmgd->assoc_bss;
+ struct cfg80211_bss *cbss = sdata->deflink.u.mgd.bss;
struct ieee80211_chanctx_conf *conf;
struct ieee80211_chanctx *chanctx;
enum nl80211_band current_band;
@@ -1398,7 +1399,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
res = ieee80211_parse_ch_switch_ie(sdata, elems, current_band,
bss->vht_cap_info,
ifmgd->flags,
- ifmgd->bssid, &csa_ie);
+ sdata->deflink.u.mgd.bssid, &csa_ie);
if (!res) {
ch_switch.timestamp = timestamp;
@@ -1412,13 +1413,14 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
if (res < 0)
goto lock_and_drop_connection;
- if (beacon && sdata->vif.csa_active && !ifmgd->csa_waiting_bcn) {
+ if (beacon && sdata->vif.bss_conf.csa_active &&
+ !sdata->deflink.u.mgd.csa_waiting_bcn) {
if (res)
ieee80211_sta_abort_chanswitch(sdata);
else
drv_channel_switch_rx_beacon(sdata, &ch_switch);
return;
- } else if (sdata->vif.csa_active || res) {
+ } else if (sdata->vif.bss_conf.csa_active || res) {
/* disregard subsequent announcements if already processing */
return;
}
@@ -1427,7 +1429,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
csa_ie.chandef.chan->band) {
sdata_info(sdata,
"AP %pM switches to different band (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n",
- ifmgd->bssid,
+ sdata->deflink.u.mgd.bssid,
csa_ie.chandef.chan->center_freq,
csa_ie.chandef.width, csa_ie.chandef.center_freq1,
csa_ie.chandef.center_freq2);
@@ -1440,7 +1442,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
"AP %pM switches to unsupported channel "
"(%d.%03d MHz, width:%d, CF1/2: %d.%03d/%d MHz), "
"disconnecting\n",
- ifmgd->bssid,
+ sdata->deflink.u.mgd.bssid,
csa_ie.chandef.chan->center_freq,
csa_ie.chandef.chan->freq_offset,
csa_ie.chandef.width, csa_ie.chandef.center_freq1,
@@ -1452,12 +1454,12 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
if (cfg80211_chandef_identical(&csa_ie.chandef,
&sdata->vif.bss_conf.chandef) &&
(!csa_ie.mode || !beacon)) {
- if (ifmgd->csa_ignored_same_chan)
+ if (sdata->deflink.u.mgd.csa_ignored_same_chan)
return;
sdata_info(sdata,
"AP %pM tries to chanswitch to same channel, ignore\n",
- ifmgd->bssid);
- ifmgd->csa_ignored_same_chan = true;
+ sdata->deflink.u.mgd.bssid);
+ sdata->deflink.u.mgd.csa_ignored_same_chan = true;
return;
}
@@ -1471,7 +1473,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
mutex_lock(&local->mtx);
mutex_lock(&local->chanctx_mtx);
- conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
+ conf = rcu_dereference_protected(sdata->vif.bss_conf.chanctx_conf,
lockdep_is_held(&local->chanctx_mtx));
if (!conf) {
sdata_info(sdata,
@@ -1494,8 +1496,8 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
goto drop_connection;
}
- res = ieee80211_vif_reserve_chanctx(sdata, &csa_ie.chandef,
- chanctx->mode, false);
+ res = ieee80211_link_reserve_chanctx(&sdata->deflink, &csa_ie.chandef,
+ chanctx->mode, false);
if (res) {
sdata_info(sdata,
"failed to reserve channel context for channel switch, disconnecting (err=%d)\n",
@@ -1504,13 +1506,13 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
}
mutex_unlock(&local->chanctx_mtx);
- sdata->vif.csa_active = true;
- sdata->csa_chandef = csa_ie.chandef;
- sdata->csa_block_tx = csa_ie.mode;
- ifmgd->csa_ignored_same_chan = false;
- ifmgd->beacon_crc_valid = false;
+ sdata->vif.bss_conf.csa_active = true;
+ sdata->deflink.csa_chandef = csa_ie.chandef;
+ sdata->deflink.csa_block_tx = csa_ie.mode;
+ sdata->deflink.u.mgd.csa_ignored_same_chan = false;
+ sdata->deflink.u.mgd.beacon_crc_valid = false;
- if (sdata->csa_block_tx)
+ if (sdata->deflink.csa_block_tx)
ieee80211_stop_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
mutex_unlock(&local->mtx);
@@ -1543,8 +1545,8 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
* send a deauthentication frame. Those two fields will be
* reset when the disconnection worker runs.
*/
- sdata->vif.csa_active = true;
- sdata->csa_block_tx = csa_ie.mode;
+ sdata->vif.bss_conf.csa_active = true;
+ sdata->deflink.csa_block_tx = csa_ie.mode;
ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work);
mutex_unlock(&local->chanctx_mtx);
@@ -1679,25 +1681,25 @@ static u32 ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
(!has_cisco_pwr || pwr_level_80211h <= pwr_level_cisco)) {
new_ap_level = pwr_level_80211h;
- if (sdata->ap_power_level == new_ap_level)
+ if (sdata->deflink.ap_power_level == new_ap_level)
return 0;
sdata_dbg(sdata,
"Limiting TX power to %d (%d - %d) dBm as advertised by %pM\n",
pwr_level_80211h, chan_pwr, pwr_reduction_80211h,
- sdata->u.mgd.bssid);
+ sdata->deflink.u.mgd.bssid);
} else { /* has_cisco_pwr is always true here. */
new_ap_level = pwr_level_cisco;
- if (sdata->ap_power_level == new_ap_level)
+ if (sdata->deflink.ap_power_level == new_ap_level)
return 0;
sdata_dbg(sdata,
"Limiting TX power to %d dBm as advertised by %pM\n",
- pwr_level_cisco, sdata->u.mgd.bssid);
+ pwr_level_cisco, sdata->deflink.u.mgd.bssid);
}
- sdata->ap_power_level = new_ap_level;
+ sdata->deflink.ap_power_level = new_ap_level;
if (__ieee80211_recalc_txpower(sdata))
return BSS_CHANGED_TXPOWER;
return 0;
@@ -1765,11 +1767,11 @@ static bool ieee80211_powersave_allowed(struct ieee80211_sub_if_data *sdata)
if (mgd->flags & IEEE80211_STA_CONNECTION_POLL)
return false;
- if (!mgd->have_beacon)
+ if (!sdata->deflink.u.mgd.have_beacon)
return false;
rcu_read_lock();
- sta = sta_info_get(sdata, mgd->bssid);
+ sta = sta_info_get(sdata, sdata->deflink.u.mgd.bssid);
if (sta)
authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
rcu_read_unlock();
@@ -1807,7 +1809,7 @@ void ieee80211_recalc_ps(struct ieee80211_local *local)
}
if (count == 1 && ieee80211_powersave_allowed(found)) {
- u8 dtimper = found->u.mgd.dtim_period;
+ u8 dtimper = found->deflink.u.mgd.dtim_period;
timeout = local->dynamic_ps_forced_timeout;
if (timeout < 0)
@@ -1833,7 +1835,7 @@ void ieee80211_recalc_ps_vif(struct ieee80211_sub_if_data *sdata)
if (sdata->vif.bss_conf.ps != ps_allowed) {
sdata->vif.bss_conf.ps = ps_allowed;
- ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_PS);
+ ieee80211_link_info_change_notify(sdata, 0, BSS_CHANGED_PS);
}
}
@@ -1935,12 +1937,12 @@ void ieee80211_dfs_cac_timer_work(struct work_struct *work)
struct delayed_work *delayed_work = to_delayed_work(work);
struct ieee80211_sub_if_data *sdata =
container_of(delayed_work, struct ieee80211_sub_if_data,
- dfs_cac_timer_work);
+ deflink.dfs_cac_timer_work);
struct cfg80211_chan_def chandef = sdata->vif.bss_conf.chandef;
mutex_lock(&sdata->local->mtx);
if (sdata->wdev.cac_started) {
- ieee80211_vif_release_channel(sdata);
+ ieee80211_link_release_channel(&sdata->deflink);
cfg80211_cac_event(sdata->dev, &chandef,
NL80211_RADAR_CAC_FINISHED,
GFP_KERNEL);
@@ -2029,7 +2031,7 @@ __ieee80211_sta_handle_tspec_ac_params(struct ieee80211_sub_if_data *sdata)
void ieee80211_sta_handle_tspec_ac_params(struct ieee80211_sub_if_data *sdata)
{
if (__ieee80211_sta_handle_tspec_ac_params(sdata))
- ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_QOS);
+ ieee80211_link_info_change_notify(sdata, 0, BSS_CHANGED_QOS);
}
static void ieee80211_sta_handle_tspec_ac_params_wk(struct work_struct *work)
@@ -2076,11 +2078,11 @@ ieee80211_sta_wmm_params(struct ieee80211_local *local,
* the driver about it.
*/
mu_edca_count = mu_edca ? mu_edca->mu_qos_info & 0x0f : -1;
- if (count == ifmgd->wmm_last_param_set &&
- mu_edca_count == ifmgd->mu_edca_last_param_set)
+ if (count == sdata->deflink.u.mgd.wmm_last_param_set &&
+ mu_edca_count == sdata->deflink.u.mgd.mu_edca_last_param_set)
return false;
- ifmgd->wmm_last_param_set = count;
- ifmgd->mu_edca_last_param_set = mu_edca_count;
+ sdata->deflink.u.mgd.wmm_last_param_set = count;
+ sdata->deflink.u.mgd.mu_edca_last_param_set = mu_edca_count;
pos = wmm_param + 8;
left = wmm_param_len - 8;
@@ -2258,6 +2260,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
struct ieee80211_bss *bss = (void *)cbss->priv;
struct ieee80211_local *local = sdata->local;
struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
+ struct ieee80211_vif_cfg *vif_cfg = &sdata->vif.cfg;
bss_info_changed |= BSS_CHANGED_ASSOC;
bss_info_changed |= ieee80211_handle_bss_capability(sdata,
@@ -2267,8 +2270,8 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
beacon_loss_count * bss_conf->beacon_int));
sdata->u.mgd.associated = true;
- sdata->u.mgd.assoc_bss = cbss;
- memcpy(sdata->u.mgd.bssid, cbss->bssid, ETH_ALEN);
+ sdata->deflink.u.mgd.bss = cbss;
+ memcpy(sdata->deflink.u.mgd.bssid, cbss->bssid, ETH_ALEN);
ieee80211_check_rate_mask(sdata);
@@ -2289,7 +2292,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
(u8 *) &bss_conf->p2p_noa_attr,
sizeof(bss_conf->p2p_noa_attr));
if (ret >= 2) {
- sdata->u.mgd.p2p_noa_index =
+ sdata->deflink.u.mgd.p2p_noa_index =
bss_conf->p2p_noa_attr.index;
bss_info_changed |= BSS_CHANGED_P2P_PS;
}
@@ -2302,14 +2305,14 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
ieee80211_led_assoc(local, 1);
- if (sdata->u.mgd.have_beacon) {
+ if (sdata->deflink.u.mgd.have_beacon) {
/*
* If the AP is buggy we may get here with no DTIM period
* known, so assume it's 1 which is the only safe assumption
* in that case, although if the TIM IE is broken powersave
* probably just won't work at all.
*/
- bss_conf->dtim_period = sdata->u.mgd.dtim_period ?: 1;
+ bss_conf->dtim_period = sdata->deflink.u.mgd.dtim_period ?: 1;
bss_conf->beacon_rate = bss->beacon_rate;
bss_info_changed |= BSS_CHANGED_BEACON_INFO;
} else {
@@ -2317,7 +2320,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
bss_conf->dtim_period = 0;
}
- bss_conf->assoc = 1;
+ vif_cfg->assoc = 1;
/* Tell the driver to monitor connection quality (if supported) */
if (sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI &&
@@ -2325,7 +2328,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
bss_info_changed |= BSS_CHANGED_CQM;
/* Enable ARP filtering */
- if (bss_conf->arp_addr_cnt)
+ if (vif_cfg->arp_addr_cnt)
bss_info_changed |= BSS_CHANGED_ARP_FILTER;
ieee80211_bss_info_change_notify(sdata, bss_info_changed);
@@ -2334,7 +2337,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
ieee80211_recalc_ps(local);
mutex_unlock(&local->iflist_mtx);
- ieee80211_recalc_smps(sdata);
+ ieee80211_recalc_smps(sdata, 0);
ieee80211_recalc_ps_vif(sdata);
netif_carrier_on(sdata->dev);
@@ -2363,7 +2366,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
ieee80211_stop_poll(sdata);
ifmgd->associated = false;
- ifmgd->assoc_bss = NULL;
+ sdata->deflink.u.mgd.bss = NULL;
netif_carrier_off(sdata->dev);
/*
@@ -2401,12 +2404,12 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
* driver requested so.
*/
if (ieee80211_hw_check(&local->hw, DEAUTH_NEED_MGD_TX_PREP) &&
- !ifmgd->have_beacon) {
+ !sdata->deflink.u.mgd.have_beacon) {
drv_mgd_prepare_tx(sdata->local, sdata, &info);
}
- ieee80211_send_deauth_disassoc(sdata, ifmgd->bssid,
- ifmgd->bssid, stype, reason,
+ ieee80211_send_deauth_disassoc(sdata, sdata->deflink.u.mgd.bssid,
+ sdata->deflink.u.mgd.bssid, stype, reason,
tx, frame_buf);
}
@@ -2417,9 +2420,9 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
drv_mgd_complete_tx(sdata->local, sdata, &info);
/* clear bssid only after building the needed mgmt frames */
- eth_zero_addr(ifmgd->bssid);
+ eth_zero_addr(sdata->deflink.u.mgd.bssid);
- sdata->vif.bss_conf.ssid_len = 0;
+ sdata->vif.cfg.ssid_len = 0;
/* remove AP and TDLS peers */
sta_info_flush(sdata);
@@ -2429,9 +2432,9 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
ieee80211_led_assoc(local, 0);
changed |= BSS_CHANGED_ASSOC;
- sdata->vif.bss_conf.assoc = false;
+ sdata->vif.cfg.assoc = false;
- ifmgd->p2p_noa_index = -1;
+ sdata->deflink.u.mgd.p2p_noa_index = -1;
memset(&sdata->vif.bss_conf.p2p_noa_attr, 0,
sizeof(sdata->vif.bss_conf.p2p_noa_attr));
@@ -2447,15 +2450,15 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
memset(sdata->vif.bss_conf.mu_group.position, 0,
sizeof(sdata->vif.bss_conf.mu_group.position));
changed |= BSS_CHANGED_MU_GROUPS;
- sdata->vif.mu_mimo_owner = false;
+ sdata->vif.bss_conf.mu_mimo_owner = false;
- sdata->ap_power_level = IEEE80211_UNSET_POWER_LEVEL;
+ sdata->deflink.ap_power_level = IEEE80211_UNSET_POWER_LEVEL;
del_timer_sync(&local->dynamic_ps_timer);
cancel_work_sync(&local->dynamic_ps_enable_work);
/* Disable ARP filtering */
- if (sdata->vif.bss_conf.arp_addr_cnt)
+ if (sdata->vif.cfg.arp_addr_cnt)
changed |= BSS_CHANGED_ARP_FILTER;
sdata->vif.bss_conf.qos = false;
@@ -2476,19 +2479,19 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
sdata->vif.bss_conf.dtim_period = 0;
sdata->vif.bss_conf.beacon_rate = NULL;
- ifmgd->have_beacon = false;
+ sdata->deflink.u.mgd.have_beacon = false;
ifmgd->flags = 0;
mutex_lock(&local->mtx);
- ieee80211_vif_release_channel(sdata);
+ ieee80211_link_release_channel(&sdata->deflink);
- sdata->vif.csa_active = false;
- ifmgd->csa_waiting_bcn = false;
- ifmgd->csa_ignored_same_chan = false;
- if (sdata->csa_block_tx) {
+ sdata->vif.bss_conf.csa_active = false;
+ sdata->deflink.u.mgd.csa_waiting_bcn = false;
+ sdata->deflink.u.mgd.csa_ignored_same_chan = false;
+ if (sdata->deflink.csa_block_tx) {
ieee80211_wake_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
- sdata->csa_block_tx = false;
+ sdata->deflink.csa_block_tx = false;
}
mutex_unlock(&local->mtx);
@@ -2496,8 +2499,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
memset(ifmgd->tx_tspec, 0, sizeof(ifmgd->tx_tspec));
cancel_delayed_work_sync(&ifmgd->tx_tspec_wk);
- sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM;
-
bss_conf->pwr_reduction = 0;
bss_conf->tx_pwr_env_num = 0;
memset(bss_conf->tx_pwr_env, 0, sizeof(bss_conf->tx_pwr_env));
@@ -2610,7 +2611,7 @@ static void ieee80211_mlme_send_probe_req(struct ieee80211_sub_if_data *sdata,
static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- u8 *dst = ifmgd->bssid;
+ u8 *dst = sdata->deflink.u.mgd.bssid;
u8 unicast_limit = max(1, max_probe_tries - 3);
struct sta_info *sta;
@@ -2644,9 +2645,9 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
ieee80211_send_nullfunc(sdata->local, sdata, false);
} else {
ieee80211_mlme_send_probe_req(sdata, sdata->vif.addr, dst,
- sdata->vif.bss_conf.ssid,
- sdata->vif.bss_conf.ssid_len,
- ifmgd->assoc_bss->channel);
+ sdata->vif.cfg.ssid,
+ sdata->vif.cfg.ssid_len,
+ sdata->deflink.u.mgd.bss->channel);
}
ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms);
@@ -2736,7 +2737,7 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
sdata_assert_lock(sdata);
if (ifmgd->associated)
- cbss = ifmgd->assoc_bss;
+ cbss = sdata->deflink.u.mgd.bss;
else if (ifmgd->auth_data)
cbss = ifmgd->auth_data->bss;
else if (ifmgd->assoc_data)
@@ -2794,14 +2795,14 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
return;
}
- tx = !sdata->csa_block_tx;
+ tx = !sdata->deflink.csa_block_tx;
if (!ifmgd->driver_disconnect) {
/*
* AP is probably out of range (or not reachable for another
* reason) so remove the bss struct for that AP.
*/
- cfg80211_unlink_bss(local->hw.wiphy, ifmgd->assoc_bss);
+ cfg80211_unlink_bss(local->hw.wiphy, sdata->deflink.u.mgd.bss);
}
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
@@ -2810,12 +2811,12 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
tx, frame_buf);
mutex_lock(&local->mtx);
- sdata->vif.csa_active = false;
- ifmgd->csa_waiting_bcn = false;
- if (sdata->csa_block_tx) {
+ sdata->vif.bss_conf.csa_active = false;
+ sdata->deflink.u.mgd.csa_waiting_bcn = false;
+ if (sdata->deflink.csa_block_tx) {
ieee80211_wake_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
- sdata->csa_block_tx = false;
+ sdata->deflink.csa_block_tx = false;
}
mutex_unlock(&local->mtx);
@@ -2835,17 +2836,17 @@ static void ieee80211_beacon_connection_loss_work(struct work_struct *work)
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
if (ifmgd->associated)
- ifmgd->beacon_loss_count++;
+ sdata->deflink.u.mgd.beacon_loss_count++;
if (ifmgd->connection_loss) {
sdata_info(sdata, "Connection to AP %pM lost\n",
- ifmgd->bssid);
+ sdata->deflink.u.mgd.bssid);
__ieee80211_disconnect(sdata);
ifmgd->connection_loss = false;
} else if (ifmgd->driver_disconnect) {
sdata_info(sdata,
"Driver requested disconnection from AP %pM\n",
- ifmgd->bssid);
+ sdata->deflink.u.mgd.bssid);
__ieee80211_disconnect(sdata);
ifmgd->driver_disconnect = false;
} else {
@@ -2918,11 +2919,11 @@ static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata,
del_timer_sync(&sdata->u.mgd.timer);
sta_info_destroy_addr(sdata, auth_data->bss->bssid);
- eth_zero_addr(sdata->u.mgd.bssid);
- ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
+ eth_zero_addr(sdata->deflink.u.mgd.bssid);
+ ieee80211_link_info_change_notify(sdata, 0, BSS_CHANGED_BSSID);
sdata->u.mgd.flags = 0;
mutex_lock(&sdata->local->mtx);
- ieee80211_vif_release_channel(sdata);
+ ieee80211_link_release_channel(&sdata->deflink);
mutex_unlock(&sdata->local->mtx);
}
@@ -2947,13 +2948,13 @@ static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata,
del_timer_sync(&sdata->u.mgd.timer);
sta_info_destroy_addr(sdata, assoc_data->bss->bssid);
- eth_zero_addr(sdata->u.mgd.bssid);
- ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
+ eth_zero_addr(sdata->deflink.u.mgd.bssid);
+ ieee80211_link_info_change_notify(sdata, 0, BSS_CHANGED_BSSID);
sdata->u.mgd.flags = 0;
- sdata->vif.mu_mimo_owner = false;
+ sdata->vif.bss_conf.mu_mimo_owner = false;
mutex_lock(&sdata->local->mtx);
- ieee80211_vif_release_channel(sdata);
+ ieee80211_link_release_channel(&sdata->deflink);
mutex_unlock(&sdata->local->mtx);
if (abandon)
@@ -3211,8 +3212,8 @@ static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
}
if (ifmgd->associated &&
- ether_addr_equal(mgmt->bssid, ifmgd->bssid)) {
- const u8 *bssid = ifmgd->bssid;
+ ether_addr_equal(mgmt->bssid, sdata->deflink.u.mgd.bssid)) {
+ const u8 *bssid = sdata->deflink.u.mgd.bssid;
sdata_info(sdata, "deauthenticated from %pM (Reason: %u=%s)\n",
bssid, reason_code,
@@ -3254,7 +3255,7 @@ static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
return;
if (!ifmgd->associated ||
- !ether_addr_equal(mgmt->bssid, ifmgd->bssid))
+ !ether_addr_equal(mgmt->bssid, sdata->deflink.u.mgd.bssid))
return;
reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
@@ -3427,7 +3428,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
goto out;
}
- sdata->vif.bss_conf.aid = aid;
+ sdata->vif.cfg.aid = aid;
ifmgd->tdls_chan_switch_prohibited =
elems->ext_capab && elems->ext_capab_len >= 5 &&
(elems->ext_capab[4] & WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED);
@@ -3565,11 +3566,13 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
/* Set up internal HT/VHT capabilities */
if (elems->ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_HT))
ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
- elems->ht_cap_elem, sta);
+ elems->ht_cap_elem,
+ &sta->deflink);
if (elems->vht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
- elems->vht_cap_elem, sta);
+ elems->vht_cap_elem,
+ &sta->deflink);
if (elems->he_operation && !(ifmgd->flags & IEEE80211_STA_DISABLE_HE) &&
elems->he_cap) {
@@ -3577,7 +3580,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
elems->he_cap,
elems->he_cap_len,
elems->he_6ghz_capa,
- sta);
+ &sta->deflink);
bss_conf->he_support = sta->sta.deflink.he_cap.has_he;
if (elems->rsnx && elems->rsnx_len &&
@@ -3597,7 +3600,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
elems->he_cap_len,
elems->eht_cap,
elems->eht_cap_len,
- sta);
+ &sta->deflink);
bss_conf->eht_support = sta->sta.deflink.eht_cap.has_eht;
} else {
@@ -3715,8 +3718,8 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
* that effect because the AP values is an unsigned
* 4-bit value.
*/
- ifmgd->wmm_last_param_set = -1;
- ifmgd->mu_edca_last_param_set = -1;
+ sdata->deflink.u.mgd.wmm_last_param_set = -1;
+ sdata->deflink.u.mgd.mu_edca_last_param_set = -1;
if (ifmgd->flags & IEEE80211_STA_DISABLE_WMM) {
ieee80211_set_wmm_default(sdata, false, false);
@@ -3964,7 +3967,7 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
ieee80211_rx_bss_info(sdata, mgmt, len, rx_status);
if (ifmgd->associated &&
- ether_addr_equal(mgmt->bssid, ifmgd->bssid))
+ ether_addr_equal(mgmt->bssid, sdata->deflink.u.mgd.bssid))
ieee80211_reset_ap_probe(sdata);
}
@@ -4002,20 +4005,21 @@ static void ieee80211_handle_beacon_sig(struct ieee80211_sub_if_data *sdata,
if (ifmgd->flags & IEEE80211_STA_RESET_SIGNAL_AVE) {
ifmgd->flags &= ~IEEE80211_STA_RESET_SIGNAL_AVE;
- ewma_beacon_signal_init(&ifmgd->ave_beacon_signal);
- ifmgd->last_cqm_event_signal = 0;
- ifmgd->count_beacon_signal = 1;
- ifmgd->last_ave_beacon_signal = 0;
+ ewma_beacon_signal_init(&sdata->deflink.u.mgd.ave_beacon_signal);
+ sdata->deflink.u.mgd.last_cqm_event_signal = 0;
+ sdata->deflink.u.mgd.count_beacon_signal = 1;
+ sdata->deflink.u.mgd.last_ave_beacon_signal = 0;
} else {
- ifmgd->count_beacon_signal++;
+ sdata->deflink.u.mgd.count_beacon_signal++;
}
- ewma_beacon_signal_add(&ifmgd->ave_beacon_signal, -rx_status->signal);
+ ewma_beacon_signal_add(&sdata->deflink.u.mgd.ave_beacon_signal,
+ -rx_status->signal);
if (ifmgd->rssi_min_thold != ifmgd->rssi_max_thold &&
- ifmgd->count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT) {
- int sig = -ewma_beacon_signal_read(&ifmgd->ave_beacon_signal);
- int last_sig = ifmgd->last_ave_beacon_signal;
+ sdata->deflink.u.mgd.count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT) {
+ int sig = -ewma_beacon_signal_read(&sdata->deflink.u.mgd.ave_beacon_signal);
+ int last_sig = sdata->deflink.u.mgd.last_ave_beacon_signal;
struct ieee80211_event event = {
.type = RSSI_EVENT,
};
@@ -4026,36 +4030,36 @@ static void ieee80211_handle_beacon_sig(struct ieee80211_sub_if_data *sdata,
*/
if (sig > ifmgd->rssi_max_thold &&
(last_sig <= ifmgd->rssi_min_thold || last_sig == 0)) {
- ifmgd->last_ave_beacon_signal = sig;
+ sdata->deflink.u.mgd.last_ave_beacon_signal = sig;
event.u.rssi.data = RSSI_EVENT_HIGH;
drv_event_callback(local, sdata, &event);
} else if (sig < ifmgd->rssi_min_thold &&
(last_sig >= ifmgd->rssi_max_thold ||
last_sig == 0)) {
- ifmgd->last_ave_beacon_signal = sig;
+ sdata->deflink.u.mgd.last_ave_beacon_signal = sig;
event.u.rssi.data = RSSI_EVENT_LOW;
drv_event_callback(local, sdata, &event);
}
}
if (bss_conf->cqm_rssi_thold &&
- ifmgd->count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT &&
+ sdata->deflink.u.mgd.count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT &&
!(sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI)) {
- int sig = -ewma_beacon_signal_read(&ifmgd->ave_beacon_signal);
- int last_event = ifmgd->last_cqm_event_signal;
+ int sig = -ewma_beacon_signal_read(&sdata->deflink.u.mgd.ave_beacon_signal);
+ int last_event = sdata->deflink.u.mgd.last_cqm_event_signal;
int thold = bss_conf->cqm_rssi_thold;
int hyst = bss_conf->cqm_rssi_hyst;
if (sig < thold &&
(last_event == 0 || sig < last_event - hyst)) {
- ifmgd->last_cqm_event_signal = sig;
+ sdata->deflink.u.mgd.last_cqm_event_signal = sig;
ieee80211_cqm_rssi_notify(
&sdata->vif,
NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
sig, GFP_KERNEL);
} else if (sig > thold &&
(last_event == 0 || sig > last_event + hyst)) {
- ifmgd->last_cqm_event_signal = sig;
+ sdata->deflink.u.mgd.last_cqm_event_signal = sig;
ieee80211_cqm_rssi_notify(
&sdata->vif,
NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
@@ -4064,22 +4068,22 @@ static void ieee80211_handle_beacon_sig(struct ieee80211_sub_if_data *sdata,
}
if (bss_conf->cqm_rssi_low &&
- ifmgd->count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT) {
- int sig = -ewma_beacon_signal_read(&ifmgd->ave_beacon_signal);
- int last_event = ifmgd->last_cqm_event_signal;
+ sdata->deflink.u.mgd.count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT) {
+ int sig = -ewma_beacon_signal_read(&sdata->deflink.u.mgd.ave_beacon_signal);
+ int last_event = sdata->deflink.u.mgd.last_cqm_event_signal;
int low = bss_conf->cqm_rssi_low;
int high = bss_conf->cqm_rssi_high;
if (sig < low &&
(last_event == 0 || last_event >= low)) {
- ifmgd->last_cqm_event_signal = sig;
+ sdata->deflink.u.mgd.last_cqm_event_signal = sig;
ieee80211_cqm_rssi_notify(
&sdata->vif,
NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
sig, GFP_KERNEL);
} else if (sig > high &&
(last_event == 0 || last_event <= high)) {
- ifmgd->last_cqm_event_signal = sig;
+ sdata->deflink.u.mgd.last_cqm_event_signal = sig;
ieee80211_cqm_rssi_notify(
&sdata->vif,
NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
@@ -4104,6 +4108,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
+ struct ieee80211_vif_cfg *vif_cfg = &sdata->vif.cfg;
struct ieee80211_mgmt *mgmt = (void *) hdr;
size_t baselen;
struct ieee802_11_elems *elems;
@@ -4136,7 +4141,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
return;
rcu_read_lock();
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+ chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
if (!chanctx_conf) {
rcu_read_unlock();
return;
@@ -4161,8 +4166,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
ieee80211_rx_bss_info(sdata, mgmt, len, rx_status);
if (elems->dtim_period)
- ifmgd->dtim_period = elems->dtim_period;
- ifmgd->have_beacon = true;
+ sdata->deflink.u.mgd.dtim_period = elems->dtim_period;
+ sdata->deflink.u.mgd.have_beacon = true;
ifmgd->assoc_data->need_beacon = false;
if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY)) {
sdata->vif.bss_conf.sync_tsf =
@@ -4193,9 +4198,9 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
}
if (!ifmgd->associated ||
- !ieee80211_rx_our_beacon(bssid, ifmgd->assoc_bss))
+ !ieee80211_rx_our_beacon(bssid, sdata->deflink.u.mgd.bss))
return;
- bssid = ifmgd->bssid;
+ bssid = sdata->deflink.u.mgd.bssid;
if (!(rx_status->flag & RX_FLAG_NO_SIGNAL_VAL))
ieee80211_handle_beacon_sig(sdata, ifmgd, bss_conf,
@@ -4228,7 +4233,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
ncrc = elems->crc;
if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK) &&
- ieee80211_check_tim(elems->tim, elems->tim_len, bss_conf->aid)) {
+ ieee80211_check_tim(elems->tim, elems->tim_len, vif_cfg->aid)) {
if (local->hw.conf.dynamic_ps_timeout > 0) {
if (local->hw.conf.flags & IEEE80211_CONF_PS) {
local->hw.conf.flags &= ~IEEE80211_CONF_PS;
@@ -4261,27 +4266,27 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
IEEE80211_P2P_ATTR_ABSENCE_NOTICE,
(u8 *) &noa, sizeof(noa));
if (ret >= 2) {
- if (sdata->u.mgd.p2p_noa_index != noa.index) {
+ if (sdata->deflink.u.mgd.p2p_noa_index != noa.index) {
/* valid noa_attr and index changed */
- sdata->u.mgd.p2p_noa_index = noa.index;
+ sdata->deflink.u.mgd.p2p_noa_index = noa.index;
memcpy(&bss_conf->p2p_noa_attr, &noa, sizeof(noa));
changed |= BSS_CHANGED_P2P_PS;
/*
* make sure we update all information, the CRC
* mechanism doesn't look at P2P attributes.
*/
- ifmgd->beacon_crc_valid = false;
+ sdata->deflink.u.mgd.beacon_crc_valid = false;
}
- } else if (sdata->u.mgd.p2p_noa_index != -1) {
+ } else if (sdata->deflink.u.mgd.p2p_noa_index != -1) {
/* noa_attr not found and we had valid noa_attr before */
- sdata->u.mgd.p2p_noa_index = -1;
+ sdata->deflink.u.mgd.p2p_noa_index = -1;
memset(&bss_conf->p2p_noa_attr, 0, sizeof(bss_conf->p2p_noa_attr));
changed |= BSS_CHANGED_P2P_PS;
- ifmgd->beacon_crc_valid = false;
+ sdata->deflink.u.mgd.beacon_crc_valid = false;
}
}
- if (ifmgd->csa_waiting_bcn)
+ if (sdata->deflink.u.mgd.csa_waiting_bcn)
ieee80211_chswitch_post_beacon(sdata);
/*
@@ -4301,11 +4306,11 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
sdata->vif.bss_conf.sync_dtim_count = elems->dtim_count;
}
- if ((ncrc == ifmgd->beacon_crc && ifmgd->beacon_crc_valid) ||
+ if ((ncrc == sdata->deflink.u.mgd.beacon_crc && sdata->deflink.u.mgd.beacon_crc_valid) ||
ieee80211_is_s1g_short_beacon(mgmt->frame_control))
goto free;
- ifmgd->beacon_crc = ncrc;
- ifmgd->beacon_crc_valid = true;
+ sdata->deflink.u.mgd.beacon_crc = ncrc;
+ sdata->deflink.u.mgd.beacon_crc_valid = true;
ieee80211_rx_bss_info(sdata, mgmt, len, rx_status);
@@ -4323,12 +4328,12 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
* If we haven't had a beacon before, tell the driver about the
* DTIM period (and beacon timing if desired) now.
*/
- if (!ifmgd->have_beacon) {
+ if (!sdata->deflink.u.mgd.have_beacon) {
/* a few bogus AP send dtim_period = 0 or no TIM IE */
bss_conf->dtim_period = elems->dtim_period ?: 1;
changed |= BSS_CHANGED_BEACON_INFO;
- ifmgd->have_beacon = true;
+ sdata->deflink.u.mgd.have_beacon = true;
mutex_lock(&local->iflist_mtx);
ieee80211_recalc_ps(local);
@@ -4374,7 +4379,9 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
}
if (sta && elems->opmode_notif)
- ieee80211_vht_handle_opmode(sdata, sta, *elems->opmode_notif,
+ ieee80211_vht_handle_opmode(sdata,
+ &sta->deflink,
+ *elems->opmode_notif,
rx_status->band);
mutex_unlock(&local->sta_mtx);
@@ -4384,7 +4391,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
elems->pwr_constr_elem,
elems->cisco_dtpc_elem);
- ieee80211_bss_info_change_notify(sdata, changed);
+ ieee80211_link_info_change_notify(sdata, 0, changed);
free:
kfree(elems);
}
@@ -4724,7 +4731,8 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
if (ifmgd->assoc_data && ifmgd->assoc_data->timeout_started &&
time_after(jiffies, ifmgd->assoc_data->timeout)) {
- if ((ifmgd->assoc_data->need_beacon && !ifmgd->have_beacon) ||
+ if ((ifmgd->assoc_data->need_beacon &&
+ !sdata->deflink.u.mgd.have_beacon) ||
ieee80211_do_assoc(sdata)) {
struct cfg80211_bss *bss = ifmgd->assoc_data->bss;
struct ieee80211_event event = {
@@ -4742,7 +4750,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL &&
ifmgd->associated) {
- u8 *bssid = ifmgd->bssid;
+ u8 *bssid = sdata->deflink.u.mgd.bssid;
int max_tries;
if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
@@ -4803,9 +4811,9 @@ static void ieee80211_sta_bcn_mon_timer(struct timer_list *t)
{
struct ieee80211_sub_if_data *sdata =
from_timer(sdata, t, u.mgd.bcn_mon_timer);
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- if (sdata->vif.csa_active && !ifmgd->csa_waiting_bcn)
+ if (sdata->vif.bss_conf.csa_active &&
+ !sdata->deflink.u.mgd.csa_waiting_bcn)
return;
if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)
@@ -4825,10 +4833,11 @@ static void ieee80211_sta_conn_mon_timer(struct timer_list *t)
struct sta_info *sta;
unsigned long timeout;
- if (sdata->vif.csa_active && !ifmgd->csa_waiting_bcn)
+ if (sdata->vif.bss_conf.csa_active &&
+ !sdata->deflink.u.mgd.csa_waiting_bcn)
return;
- sta = sta_info_get(sdata, ifmgd->bssid);
+ sta = sta_info_get(sdata, sdata->deflink.u.mgd.bssid);
if (!sta)
return;
@@ -4924,7 +4933,7 @@ void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata)
.bssid = bssid,
};
- memcpy(bssid, ifmgd->bssid, ETH_ALEN);
+ memcpy(bssid, sdata->deflink.u.mgd.bssid, ETH_ALEN);
ieee80211_mgd_deauth(sdata, &req);
}
@@ -4977,7 +4986,8 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
ieee80211_beacon_connection_loss_work);
INIT_WORK(&ifmgd->csa_connection_drop_work,
ieee80211_csa_connection_drop_work);
- INIT_WORK(&ifmgd->request_smps_work, ieee80211_request_smps_mgd_work);
+ INIT_WORK(&sdata->deflink.u.mgd.request_smps_work,
+ ieee80211_request_smps_mgd_work);
INIT_DELAYED_WORK(&ifmgd->tdls_peer_del_work,
ieee80211_tdls_peer_del_work);
timer_setup(&ifmgd->timer, ieee80211_sta_timer, 0);
@@ -4991,12 +5001,12 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
ifmgd->powersave = sdata->wdev.ps;
ifmgd->uapsd_queues = sdata->local->hw.uapsd_queues;
ifmgd->uapsd_max_sp_len = sdata->local->hw.uapsd_max_sp_len;
- ifmgd->p2p_noa_index = -1;
+ sdata->deflink.u.mgd.p2p_noa_index = -1;
if (sdata->local->hw.wiphy->features & NL80211_FEATURE_DYNAMIC_SMPS)
- ifmgd->req_smps = IEEE80211_SMPS_AUTOMATIC;
+ sdata->deflink.u.mgd.req_smps = IEEE80211_SMPS_AUTOMATIC;
else
- ifmgd->req_smps = IEEE80211_SMPS_OFF;
+ sdata->deflink.u.mgd.req_smps = IEEE80211_SMPS_OFF;
/* Setup TDLS data */
spin_lock_init(&ifmgd->teardown_lock);
@@ -5469,8 +5479,8 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
s1g_oper,
&chandef, false);
- sdata->needed_rx_chains = min(ieee80211_max_rx_chains(sdata, cbss),
- local->rx_chains);
+ sdata->deflink.needed_rx_chains =
+ min(ieee80211_max_rx_chains(sdata, cbss), local->rx_chains);
rcu_read_unlock();
/* the element data was RCU protected so no longer valid anyway */
@@ -5483,7 +5493,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
}
/* will change later if needed */
- sdata->smps_mode = IEEE80211_SMPS_OFF;
+ sdata->deflink.smps_mode = IEEE80211_SMPS_OFF;
mutex_lock(&local->mtx);
/*
@@ -5491,8 +5501,8 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
* on incompatible channels, e.g. 80+80 and 160 sharing the
* same control channel) try to use a smaller bandwidth.
*/
- ret = ieee80211_vif_use_channel(sdata, &chandef,
- IEEE80211_CHANCTX_SHARED);
+ ret = ieee80211_link_use_channel(&sdata->deflink, &chandef,
+ IEEE80211_CHANCTX_SHARED);
/* don't downgrade for 5 and 10 MHz channels, though. */
if (chandef.width == NL80211_CHAN_WIDTH_5 ||
@@ -5501,8 +5511,8 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
while (ret && chandef.width != NL80211_CHAN_WIDTH_20_NOHT) {
ifmgd->flags |= ieee80211_chandef_downgrade(&chandef);
- ret = ieee80211_vif_use_channel(sdata, &chandef,
- IEEE80211_CHANCTX_SHARED);
+ ret = ieee80211_link_use_channel(&sdata->deflink, &chandef,
+ IEEE80211_CHANCTX_SHARED);
}
out:
mutex_unlock(&local->mtx);
@@ -5571,7 +5581,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
}
if (!have_sta) {
- new_sta = sta_info_alloc(sdata, cbss->bssid, GFP_KERNEL);
+ new_sta = sta_info_alloc(sdata, cbss->bssid, -1, GFP_KERNEL);
if (!new_sta)
return -ENOMEM;
}
@@ -5647,7 +5657,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
skip_rates:
- memcpy(ifmgd->bssid, cbss->bssid, ETH_ALEN);
+ memcpy(sdata->deflink.u.mgd.bssid, cbss->bssid, ETH_ALEN);
/* set timing information */
sdata->vif.bss_conf.beacon_int = cbss->beacon_interval;
@@ -5691,7 +5701,7 @@ skip_rates:
* tell driver about BSSID, basic rates and timing
* this was set up above, before setting the channel
*/
- ieee80211_bss_info_change_notify(sdata,
+ ieee80211_link_info_change_notify(sdata, 0,
BSS_CHANGED_BSSID | BSS_CHANGED_BASIC_RATES |
BSS_CHANGED_BEACON_INT);
@@ -5707,7 +5717,7 @@ skip_rates:
return err;
}
} else
- WARN_ON_ONCE(!ether_addr_equal(ifmgd->bssid, cbss->bssid));
+ WARN_ON_ONCE(!ether_addr_equal(sdata->deflink.u.mgd.bssid, cbss->bssid));
/* Cancel scan to ensure that nothing interferes with connection */
if (local->scanning)
@@ -5830,7 +5840,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
sdata_info(sdata,
"disconnect from AP %pM for new auth to %pM\n",
- ifmgd->bssid, req->bss->bssid);
+ sdata->deflink.u.mgd.bssid, req->bss->bssid);
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
WLAN_REASON_UNSPECIFIED,
false, frame_buf);
@@ -5858,11 +5868,11 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
return 0;
err_clear:
- eth_zero_addr(ifmgd->bssid);
- ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
+ eth_zero_addr(sdata->deflink.u.mgd.bssid);
+ ieee80211_link_info_change_notify(sdata, 0, BSS_CHANGED_BSSID);
ifmgd->auth_data = NULL;
mutex_lock(&sdata->local->mtx);
- ieee80211_vif_release_channel(sdata);
+ ieee80211_link_release_channel(&sdata->deflink);
mutex_unlock(&sdata->local->mtx);
kfree(auth_data);
return err;
@@ -5879,7 +5889,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgd_assoc_data *assoc_data;
const struct cfg80211_bss_ies *beacon_ies;
struct ieee80211_supported_band *sband;
- struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
+ struct ieee80211_vif_cfg *vif_cfg = &sdata->vif.cfg;
const struct element *ssid_elem, *ht_elem, *vht_elem;
int i, err;
bool override = false;
@@ -5897,8 +5907,8 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
}
memcpy(assoc_data->ssid, ssid_elem->data, ssid_elem->datalen);
assoc_data->ssid_len = ssid_elem->datalen;
- memcpy(bss_conf->ssid, assoc_data->ssid, assoc_data->ssid_len);
- bss_conf->ssid_len = assoc_data->ssid_len;
+ memcpy(vif_cfg->ssid, assoc_data->ssid, assoc_data->ssid_len);
+ vif_cfg->ssid_len = assoc_data->ssid_len;
rcu_read_unlock();
if (ifmgd->associated) {
@@ -5906,7 +5916,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
sdata_info(sdata,
"disconnect from AP %pM for new assoc to %pM\n",
- ifmgd->bssid, req->bss->bssid);
+ sdata->deflink.u.mgd.bssid, req->bss->bssid);
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
WLAN_REASON_UNSPECIFIED,
false, frame_buf);
@@ -5931,13 +5941,14 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
bool match;
/* keep sta info, bssid if matching */
- match = ether_addr_equal(ifmgd->bssid, req->bss->bssid);
+ match = ether_addr_equal(sdata->deflink.u.mgd.bssid,
+ req->bss->bssid);
ieee80211_destroy_auth_data(sdata, match);
}
/* prepare assoc data */
- ifmgd->beacon_crc_valid = false;
+ sdata->deflink.u.mgd.beacon_crc_valid = false;
assoc_data->wmm = bss->wmm_used &&
(local->hw.queues >= IEEE80211_NUM_ACS);
@@ -6071,14 +6082,12 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
sdata->control_port_over_nl80211 =
req->crypto.control_port_over_nl80211;
sdata->control_port_no_preauth = req->crypto.control_port_no_preauth;
- sdata->encrypt_headroom = ieee80211_cs_headroom(local, &req->crypto,
- sdata->vif.type);
/* kick off associate process */
ifmgd->assoc_data = assoc_data;
- ifmgd->dtim_period = 0;
- ifmgd->have_beacon = false;
+ sdata->deflink.u.mgd.dtim_period = 0;
+ sdata->deflink.u.mgd.have_beacon = false;
/* override HT/VHT configuration only if the AP and we support it */
if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) {
@@ -6127,13 +6136,13 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
if (err)
goto err_clear;
- if (ifmgd->req_smps == IEEE80211_SMPS_AUTOMATIC) {
+ if (sdata->deflink.u.mgd.req_smps == IEEE80211_SMPS_AUTOMATIC) {
if (ifmgd->powersave)
- sdata->smps_mode = IEEE80211_SMPS_DYNAMIC;
+ sdata->deflink.smps_mode = IEEE80211_SMPS_DYNAMIC;
else
- sdata->smps_mode = IEEE80211_SMPS_OFF;
+ sdata->deflink.smps_mode = IEEE80211_SMPS_OFF;
} else {
- sdata->smps_mode = ifmgd->req_smps;
+ sdata->deflink.smps_mode = sdata->deflink.u.mgd.req_smps;
}
rcu_read_lock();
@@ -6146,7 +6155,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
* should this be more if we miss one?
*/
sdata_info(sdata, "waiting for beacon from %pM\n",
- ifmgd->bssid);
+ sdata->deflink.u.mgd.bssid);
assoc_data->timeout = TU_TO_EXP_TIME(req->bss->beacon_interval);
assoc_data->timeout_started = true;
assoc_data->need_beacon = true;
@@ -6155,9 +6164,9 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
u8 dtim_count = 0;
ieee80211_get_dtim(beacon_ies, &dtim_count,
- &ifmgd->dtim_period);
+ &sdata->deflink.u.mgd.dtim_period);
- ifmgd->have_beacon = true;
+ sdata->deflink.u.mgd.have_beacon = true;
assoc_data->timeout = jiffies;
assoc_data->timeout_started = true;
@@ -6206,8 +6215,8 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
return 0;
err_clear:
- eth_zero_addr(ifmgd->bssid);
- ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
+ eth_zero_addr(sdata->deflink.u.mgd.bssid);
+ ieee80211_link_info_change_notify(sdata, 0, BSS_CHANGED_BSSID);
ifmgd->assoc_data = NULL;
err_free:
kfree(assoc_data);
@@ -6264,7 +6273,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
}
if (ifmgd->associated &&
- ether_addr_equal(ifmgd->bssid, req->bssid)) {
+ ether_addr_equal(sdata->deflink.u.mgd.bssid, req->bssid)) {
sdata_info(sdata,
"deauthenticating from %pM by local choice (Reason: %u=%s)\n",
req->bssid, req->reason_code,
@@ -6285,7 +6294,6 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
struct cfg80211_disassoc_request *req)
{
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
u8 bssid[ETH_ALEN];
u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
@@ -6295,7 +6303,7 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
* to cfg80211 while that's in a locked section already
* trying to tell us that the user wants to disconnect.
*/
- if (ifmgd->assoc_bss != req->bss)
+ if (sdata->deflink.u.mgd.bss != req->bss)
return -ENOLINK;
sdata_info(sdata,
@@ -6324,7 +6332,7 @@ void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata)
*/
cancel_work_sync(&ifmgd->monitor_work);
cancel_work_sync(&ifmgd->beacon_connection_loss_work);
- cancel_work_sync(&ifmgd->request_smps_work);
+ cancel_work_sync(&sdata->deflink.u.mgd.request_smps_work);
cancel_work_sync(&ifmgd->csa_connection_drop_work);
cancel_work_sync(&ifmgd->chswitch_work);
cancel_delayed_work_sync(&ifmgd->tdls_peer_del_work);
diff --git a/net/mac80211/ocb.c b/net/mac80211/ocb.c
index f97cb4c453d3..0fd29d9c496c 100644
--- a/net/mac80211/ocb.c
+++ b/net/mac80211/ocb.c
@@ -4,6 +4,7 @@
*
* Copyright: (c) 2014 Czech Technical University in Prague
* (c) 2014 Volkswagen Group Research
+ * Copyright (C) 2022 Intel Corporation
* Author: Rostislav Lisovy <rostislav.lisovy@fel.cvut.cz>
* Funded by: Volkswagen Group Research
*/
@@ -59,7 +60,7 @@ void ieee80211_ocb_rx_no_sta(struct ieee80211_sub_if_data *sdata,
ocb_dbg(sdata, "Adding new OCB station %pM\n", addr);
rcu_read_lock();
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+ chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
if (WARN_ON_ONCE(!chanctx_conf)) {
rcu_read_unlock();
return;
@@ -68,7 +69,7 @@ void ieee80211_ocb_rx_no_sta(struct ieee80211_sub_if_data *sdata,
scan_width = cfg80211_chandef_to_scan_width(&chanctx_conf->def);
rcu_read_unlock();
- sta = sta_info_alloc(sdata, addr, GFP_ATOMIC);
+ sta = sta_info_alloc(sdata, addr, -1, GFP_ATOMIC);
if (!sta)
return;
@@ -181,12 +182,12 @@ int ieee80211_ocb_join(struct ieee80211_sub_if_data *sdata,
return -EINVAL;
sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
- sdata->smps_mode = IEEE80211_SMPS_OFF;
- sdata->needed_rx_chains = sdata->local->rx_chains;
+ sdata->deflink.smps_mode = IEEE80211_SMPS_OFF;
+ sdata->deflink.needed_rx_chains = sdata->local->rx_chains;
mutex_lock(&sdata->local->mtx);
- err = ieee80211_vif_use_channel(sdata, &setup->chandef,
- IEEE80211_CHANCTX_SHARED);
+ err = ieee80211_link_use_channel(sdata->link[0], &setup->chandef,
+ IEEE80211_CHANCTX_SHARED);
mutex_unlock(&sdata->local->mtx);
if (err)
return err;
@@ -228,7 +229,7 @@ int ieee80211_ocb_leave(struct ieee80211_sub_if_data *sdata)
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_OCB);
mutex_lock(&sdata->local->mtx);
- ieee80211_vif_release_channel(sdata);
+ ieee80211_link_release_channel(sdata->link[0]);
mutex_unlock(&sdata->local->mtx);
skb_queue_purge(&sdata->skb_queue);
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index c5d2ab9df1e7..2ed4e2325914 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -8,7 +8,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
- * Copyright (C) 2019 Intel Corporation
+ * Copyright (C) 2019, 2022 Intel Corporation
*/
#include <linux/export.h>
#include <net/mac80211.h>
@@ -118,8 +118,8 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local)
set_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED,
&sdata->state);
sdata->vif.bss_conf.enable_beacon = false;
- ieee80211_bss_info_change_notify(
- sdata, BSS_CHANGED_BEACON_ENABLED);
+ ieee80211_link_info_change_notify(
+ sdata, 0, BSS_CHANGED_BEACON_ENABLED);
}
if (sdata->vif.type == NL80211_IFTYPE_STATION &&
@@ -155,8 +155,8 @@ void ieee80211_offchannel_return(struct ieee80211_local *local)
if (test_and_clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED,
&sdata->state)) {
sdata->vif.bss_conf.enable_beacon = true;
- ieee80211_bss_info_change_notify(
- sdata, BSS_CHANGED_BEACON_ENABLED);
+ ieee80211_link_info_change_notify(
+ sdata, 0, BSS_CHANGED_BEACON_ENABLED);
}
}
mutex_unlock(&local->iflist_mtx);
@@ -785,7 +785,7 @@ int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
switch (sdata->vif.type) {
case NL80211_IFTYPE_ADHOC:
- if (!sdata->vif.bss_conf.ibss_joined)
+ if (!sdata->vif.cfg.ibss_joined)
need_offchan = true;
#ifdef CONFIG_MAC80211_MESH
fallthrough;
@@ -800,7 +800,7 @@ int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
case NL80211_IFTYPE_P2P_GO:
if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
!ieee80211_vif_is_mesh(&sdata->vif) &&
- !rcu_access_pointer(sdata->bss->beacon))
+ !sdata->bss->active)
need_offchan = true;
if (!ieee80211_is_action(mgmt->frame_control) ||
mgmt->u.action.category == WLAN_CATEGORY_PUBLIC ||
@@ -819,7 +819,7 @@ int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
if (!sdata->u.mgd.associated ||
(params->offchan && params->wait &&
local->ops->remain_on_channel &&
- memcmp(sdata->u.mgd.bssid,
+ memcmp(sdata->deflink.u.mgd.bssid,
mgmt->bssid, ETH_ALEN)))
need_offchan = true;
sdata_unlock(sdata);
@@ -845,7 +845,7 @@ int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
struct ieee80211_chanctx_conf *chanctx_conf;
rcu_read_lock();
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+ chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
if (chanctx_conf) {
need_offchan = params->chan &&
@@ -876,7 +876,7 @@ int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
data = skb_put_data(skb, params->buf, params->len);
/* Update CSA counters */
- if (sdata->vif.csa_active &&
+ if (sdata->vif.bss_conf.csa_active &&
(sdata->vif.type == NL80211_IFTYPE_AP ||
sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
sdata->vif.type == NL80211_IFTYPE_ADHOC) &&
@@ -887,7 +887,7 @@ int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
rcu_read_lock();
if (sdata->vif.type == NL80211_IFTYPE_AP)
- beacon = rcu_dereference(sdata->u.ap.beacon);
+ beacon = rcu_dereference(sdata->deflink.u.ap.beacon);
else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
beacon = rcu_dereference(sdata->u.ibss.presp);
else if (ieee80211_vif_is_mesh(&sdata->vif))
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index ae9700e0a1a5..7947e9a162a9 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -4,6 +4,7 @@
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright (c) 2006 Jiri Benc <jbenc@suse.cz>
* Copyright 2017 Intel Deutschland GmbH
+ * Copyright (C) 2022 Intel Corporation
*/
#include <linux/kernel.h>
@@ -36,14 +37,14 @@ void rate_control_rate_init(struct sta_info *sta)
struct ieee80211_supported_band *sband;
struct ieee80211_chanctx_conf *chanctx_conf;
- ieee80211_sta_set_rx_nss(sta);
+ ieee80211_sta_set_rx_nss(&sta->deflink);
if (!ref)
return;
rcu_read_lock();
- chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf);
+ chanctx_conf = rcu_dereference(sta->sdata->vif.bss_conf.chanctx_conf);
if (WARN_ON(!chanctx_conf)) {
rcu_read_unlock();
return;
@@ -67,16 +68,18 @@ void rate_control_rate_init(struct sta_info *sta)
}
void rate_control_tx_status(struct ieee80211_local *local,
- struct ieee80211_supported_band *sband,
struct ieee80211_tx_status *st)
{
struct rate_control_ref *ref = local->rate_ctrl;
struct sta_info *sta = container_of(st->sta, struct sta_info, sta);
void *priv_sta = sta->rate_ctrl_priv;
+ struct ieee80211_supported_band *sband;
if (!ref || !test_sta_flag(sta, WLAN_STA_RATE_CONTROL))
return;
+ sband = local->hw.wiphy->bands[st->info->band];
+
spin_lock_bh(&sta->rate_ctrl_lock);
if (ref->ops->tx_status_ext)
ref->ops->tx_status_ext(ref->priv, sband, priv_sta, st);
@@ -89,18 +92,21 @@ void rate_control_tx_status(struct ieee80211_local *local,
}
void rate_control_rate_update(struct ieee80211_local *local,
- struct ieee80211_supported_band *sband,
- struct sta_info *sta, u32 changed)
+ struct ieee80211_supported_band *sband,
+ struct sta_info *sta, unsigned int link_id,
+ u32 changed)
{
struct rate_control_ref *ref = local->rate_ctrl;
struct ieee80211_sta *ista = &sta->sta;
void *priv_sta = sta->rate_ctrl_priv;
struct ieee80211_chanctx_conf *chanctx_conf;
+ WARN_ON(link_id != 0);
+
if (ref && ref->ops->rate_update) {
rcu_read_lock();
- chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf);
+ chanctx_conf = rcu_dereference(sta->sdata->vif.bss_conf.chanctx_conf);
if (WARN_ON(!chanctx_conf)) {
rcu_read_unlock();
return;
@@ -112,6 +118,7 @@ void rate_control_rate_update(struct ieee80211_local *local,
spin_unlock_bh(&sta->rate_ctrl_lock);
rcu_read_unlock();
}
+
drv_sta_rc_update(local, sta->sdata, &sta->sta, changed);
}
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index 79b44d3db171..d89c13584dc8 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -3,6 +3,7 @@
* Copyright 2002-2005, Instant802 Networks, Inc.
* Copyright 2005, Devicescape Software, Inc.
* Copyright (c) 2006 Jiri Benc <jbenc@suse.cz>
+ * Copyright (C) 2022 Intel Corporation
*/
#ifndef IEEE80211_RATE_H
@@ -26,13 +27,14 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
struct ieee80211_tx_rate_control *txrc);
void rate_control_tx_status(struct ieee80211_local *local,
- struct ieee80211_supported_band *sband,
struct ieee80211_tx_status *st);
void rate_control_rate_init(struct sta_info *sta);
void rate_control_rate_update(struct ieee80211_local *local,
- struct ieee80211_supported_band *sband,
- struct sta_info *sta, u32 changed);
+ struct ieee80211_supported_band *sband,
+ struct sta_info *sta,
+ unsigned int link_id,
+ u32 changed);
static inline void *rate_control_alloc_sta(struct rate_control_ref *ref,
struct sta_info *sta, gfp_t gfp)
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 1675f8cb87f1..304b9909f025 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -6,7 +6,7 @@
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#include <linux/jiffies.h>
@@ -1009,43 +1009,20 @@ static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
return -1;
}
-static int ieee80211_get_keyid(struct sk_buff *skb,
- const struct ieee80211_cipher_scheme *cs)
+static int ieee80211_get_keyid(struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- __le16 fc;
- int hdrlen;
- int minlen;
- u8 key_idx_off;
- u8 key_idx_shift;
+ __le16 fc = hdr->frame_control;
+ int hdrlen = ieee80211_hdrlen(fc);
u8 keyid;
- fc = hdr->frame_control;
- hdrlen = ieee80211_hdrlen(fc);
-
- if (cs) {
- minlen = hdrlen + cs->hdr_len;
- key_idx_off = hdrlen + cs->key_idx_off;
- key_idx_shift = cs->key_idx_shift;
- } else {
- /* WEP, TKIP, CCMP and GCMP */
- minlen = hdrlen + IEEE80211_WEP_IV_LEN;
- key_idx_off = hdrlen + 3;
- key_idx_shift = 6;
- }
-
- if (unlikely(skb->len < minlen))
+ /* WEP, TKIP, CCMP and GCMP */
+ if (unlikely(skb->len < hdrlen + IEEE80211_WEP_IV_LEN))
return -EINVAL;
- skb_copy_bits(skb, key_idx_off, &keyid, 1);
-
- if (cs)
- keyid &= cs->key_idx_mask;
- keyid >>= key_idx_shift;
+ skb_copy_bits(skb, hdrlen + 3, &keyid, 1);
- /* cs could use more than the usual two bits for the keyid */
- if (unlikely(keyid >= NUM_DEFAULT_KEYS))
- return -EINVAL;
+ keyid >>= 6;
return keyid;
}
@@ -1588,8 +1565,12 @@ static void sta_ps_start(struct sta_info *sta)
for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) {
struct ieee80211_txq *txq = sta->sta.txq[tid];
+ struct txq_info *txqi = to_txq_info(txq);
- ieee80211_unschedule_txq(&local->hw, txq, false);
+ spin_lock(&local->active_txq_lock[txq->ac]);
+ if (!list_empty(&txqi->schedule_order))
+ list_del_init(&txqi->schedule_order);
+ spin_unlock(&local->active_txq_lock[txq->ac]);
if (txq_has_queue(txq))
set_bit(tid, &sta->txq_buffered_tids);
@@ -1895,11 +1876,11 @@ ieee80211_rx_get_bigtk(struct ieee80211_rx_data *rx, int idx)
if (rx->sta)
key = rcu_dereference(rx->sta->deflink.gtk[idx]);
if (!key)
- key = rcu_dereference(sdata->keys[idx]);
+ key = rcu_dereference(sdata->deflink.gtk[idx]);
if (!key && rx->sta)
key = rcu_dereference(rx->sta->deflink.gtk[idx2]);
if (!key)
- key = rcu_dereference(sdata->keys[idx2]);
+ key = rcu_dereference(sdata->deflink.gtk[idx2]);
return key;
}
@@ -1916,7 +1897,6 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
struct ieee80211_key *ptk_idx = NULL;
int mmie_keyidx = -1;
__le16 fc;
- const struct ieee80211_cipher_scheme *cs = NULL;
if (ieee80211_is_ext(hdr->frame_control))
return RX_CONTINUE;
@@ -1959,8 +1939,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
if (ieee80211_has_protected(fc) &&
!(status->flag & RX_FLAG_IV_STRIPPED)) {
- cs = rx->sta->cipher_scheme;
- keyid = ieee80211_get_keyid(rx->skb, cs);
+ keyid = ieee80211_get_keyid(rx->skb);
if (unlikely(keyid < 0))
return RX_DROP_UNUSABLE;
@@ -2015,7 +1994,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
rx->key = rcu_dereference(rx->sta->deflink.gtk[mmie_keyidx]);
}
if (!rx->key)
- rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
+ rx->key = rcu_dereference(rx->sdata->deflink.gtk[mmie_keyidx]);
} else if (!ieee80211_has_protected(fc)) {
/*
* The frame was not protected, so skip decryption. However, we
@@ -2031,7 +2010,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
key = ieee80211_rx_get_bigtk(rx, -1);
} else if (ieee80211_is_mgmt(fc) &&
is_multicast_ether_addr(hdr->addr1)) {
- key = rcu_dereference(rx->sdata->default_mgmt_key);
+ key = rcu_dereference(rx->sdata->deflink.default_mgmt_key);
} else {
if (rx->sta) {
for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
@@ -2042,7 +2021,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
}
if (!key) {
for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
- key = rcu_dereference(sdata->keys[i]);
+ key = rcu_dereference(sdata->deflink.gtk[i]);
if (key)
break;
}
@@ -2065,7 +2044,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
(status->flag & RX_FLAG_IV_STRIPPED))
return RX_CONTINUE;
- keyidx = ieee80211_get_keyid(rx->skb, cs);
+ keyidx = ieee80211_get_keyid(rx->skb);
if (unlikely(keyidx < 0))
return RX_DROP_UNUSABLE;
@@ -2076,7 +2055,10 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
/* if not found, try default key */
if (!rx->key) {
- rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
+ if (is_multicast_ether_addr(hdr->addr1))
+ rx->key = rcu_dereference(rx->sdata->deflink.gtk[keyidx]);
+ if (!rx->key)
+ rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
/*
* RSNA-protected unicast frames should always be
@@ -2131,7 +2113,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
result = ieee80211_crypto_gcmp_decrypt(rx);
break;
default:
- result = ieee80211_crypto_hw_decrypt(rx);
+ result = RX_DROP_UNUSABLE;
}
/* the hdr variable is invalid after the decrypt handlers */
@@ -2945,7 +2927,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
tailroom = IEEE80211_ENCRYPT_TAILROOM;
fwd_skb = skb_copy_expand(skb, local->tx_headroom +
- sdata->encrypt_headroom,
+ IEEE80211_ENCRYPT_HEADROOM,
tailroom, GFP_ATOMIC);
if (!fwd_skb)
goto out;
@@ -3146,8 +3128,8 @@ static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
return;
}
- if (!ether_addr_equal(mgmt->sa, sdata->u.mgd.bssid) ||
- !ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) {
+ if (!ether_addr_equal(mgmt->sa, sdata->deflink.u.mgd.bssid) ||
+ !ether_addr_equal(mgmt->bssid, sdata->deflink.u.mgd.bssid)) {
/* Not from the current AP or not associated yet. */
return;
}
@@ -3165,7 +3147,7 @@ static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
resp = skb_put_zero(skb, 24);
memcpy(resp->da, mgmt->sa, ETH_ALEN);
memcpy(resp->sa, sdata->vif.addr, ETH_ALEN);
- memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
+ memcpy(resp->bssid, sdata->deflink.u.mgd.bssid, ETH_ALEN);
resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
@@ -3192,7 +3174,7 @@ ieee80211_rx_check_bss_color_collision(struct ieee80211_rx_data *rx)
if (ieee80211_hw_check(&rx->local->hw, DETECTS_COLOR_COLLISION))
return;
- if (rx->sdata->vif.csa_active)
+ if (rx->sdata->vif.bss_conf.csa_active)
return;
baselen = mgmt->u.beacon.variable - rx->skb->data;
@@ -3392,7 +3374,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
sband = rx->local->hw.wiphy->bands[status->band];
- rate_control_rate_update(local, sband, rx->sta,
+ rate_control_rate_update(local, sband, rx->sta, 0,
IEEE80211_RC_SMPS_CHANGED);
cfg80211_sta_opmode_change_notify(sdata->dev,
rx->sta->addr,
@@ -3414,11 +3396,11 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
if (chanwidth == IEEE80211_HT_CHANWIDTH_20MHZ)
max_bw = IEEE80211_STA_RX_BW_20;
else
- max_bw = ieee80211_sta_cap_rx_bw(rx->sta);
+ max_bw = ieee80211_sta_cap_rx_bw(&rx->sta->deflink);
/* set cur_max_bandwidth and recalc sta bw */
rx->sta->deflink.cur_max_bandwidth = max_bw;
- new_bw = ieee80211_sta_cur_vht_bw(rx->sta);
+ new_bw = ieee80211_sta_cur_vht_bw(&rx->sta->deflink);
if (rx->sta->sta.deflink.bandwidth == new_bw)
goto handled;
@@ -3426,10 +3408,10 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
rx->sta->sta.deflink.bandwidth = new_bw;
sband = rx->local->hw.wiphy->bands[status->band];
sta_opmode.bw =
- ieee80211_sta_rx_bw_to_chan_width(rx->sta);
+ ieee80211_sta_rx_bw_to_chan_width(&rx->sta->deflink);
sta_opmode.changed = STA_OPMODE_MAX_BW_CHANGED;
- rate_control_rate_update(local, sband, rx->sta,
+ rate_control_rate_update(local, sband, rx->sta, 0,
IEEE80211_RC_BW_CHANGED);
cfg80211_sta_opmode_change_notify(sdata->dev,
rx->sta->addr,
@@ -3449,7 +3431,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
break;
if (!rx->sta)
break;
- if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid))
+ if (!ether_addr_equal(mgmt->bssid, sdata->deflink.u.mgd.bssid))
break;
if (mgmt->u.action.u.ext_chan_switch.action_code !=
WLAN_PUB_ACTION_EXT_CHANSW_ANN)
@@ -3550,7 +3532,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
break;
if (sdata->vif.type == NL80211_IFTYPE_STATION)
- bssid = sdata->u.mgd.bssid;
+ bssid = sdata->deflink.u.mgd.bssid;
else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
bssid = sdata->u.ibss.bssid;
else if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
@@ -4153,6 +4135,12 @@ EXPORT_SYMBOL(ieee80211_mark_rx_ba_filtered_frames);
/* main receive path */
+static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr)
+{
+ return ether_addr_equal(raddr, addr) ||
+ is_broadcast_ether_addr(raddr);
+}
+
static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
{
struct ieee80211_sub_if_data *sdata = rx->sdata;
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index b698756887eb..f80284eee055 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -177,7 +177,7 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
rcu_read_lock();
scan_sdata = rcu_dereference(local->scan_sdata);
if (scan_sdata && scan_sdata->vif.type == NL80211_IFTYPE_STATION &&
- scan_sdata->vif.bss_conf.assoc &&
+ scan_sdata->vif.cfg.assoc &&
ieee80211_have_rx_timestamp(rx_status)) {
bss_meta.parent_tsf =
ieee80211_calculate_rx_timestamp(local, rx_status,
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index e04a0905e941..c9852f71e8e1 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -64,6 +64,12 @@
* freed before they are done using it.
*/
+struct sta_link_alloc {
+ struct link_sta_info info;
+ struct ieee80211_link_sta sta;
+ struct rcu_head rcu_head;
+};
+
static const struct rhashtable_params sta_rht_params = {
.nelem_hint = 3, /* start small */
.automatic_shrinking = true,
@@ -73,6 +79,15 @@ static const struct rhashtable_params sta_rht_params = {
.max_size = CONFIG_MAC80211_STA_HASH_MAX_SIZE,
};
+static const struct rhashtable_params link_sta_rht_params = {
+ .nelem_hint = 3, /* start small */
+ .automatic_shrinking = true,
+ .head_offset = offsetof(struct link_sta_info, link_hash_node),
+ .key_offset = offsetof(struct link_sta_info, addr),
+ .key_len = ETH_ALEN,
+ .max_size = CONFIG_MAC80211_STA_HASH_MAX_SIZE,
+};
+
/* Caller must hold local->sta_mtx */
static int sta_info_hash_del(struct ieee80211_local *local,
struct sta_info *sta)
@@ -81,6 +96,14 @@ static int sta_info_hash_del(struct ieee80211_local *local,
sta_rht_params);
}
+static int link_sta_info_hash_del(struct ieee80211_local *local,
+ struct link_sta_info *link_sta)
+{
+ return rhltable_remove(&local->link_sta_hash,
+ &link_sta->link_hash_node,
+ link_sta_rht_params);
+}
+
static void __cleanup_single_sta(struct sta_info *sta)
{
int ac, i;
@@ -210,6 +233,37 @@ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
return NULL;
}
+struct rhlist_head *link_sta_info_hash_lookup(struct ieee80211_local *local,
+ const u8 *addr)
+{
+ return rhltable_lookup(&local->link_sta_hash, addr,
+ link_sta_rht_params);
+}
+
+struct link_sta_info *
+link_sta_info_get_bss(struct ieee80211_sub_if_data *sdata, const u8 *addr)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct rhlist_head *tmp;
+ struct link_sta_info *link_sta;
+
+ rcu_read_lock();
+ for_each_link_sta_info(local, addr, link_sta, tmp) {
+ struct sta_info *sta = link_sta->sta;
+
+ if (sta->sdata == sdata ||
+ (sta->sdata->bss && sta->sdata->bss == sdata->bss)) {
+ rcu_read_unlock();
+ /* this is safe as the caller must already hold
+ * another rcu read section or the mutex
+ */
+ return link_sta;
+ }
+ }
+ rcu_read_unlock();
+ return NULL;
+}
+
struct sta_info *sta_info_get_by_addrs(struct ieee80211_local *local,
const u8 *sta_addr, const u8 *vif_addr)
{
@@ -245,6 +299,38 @@ struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata,
return NULL;
}
+static void sta_info_free_link(struct link_sta_info *link_sta)
+{
+ free_percpu(link_sta->pcpu_rx_stats);
+}
+
+static void sta_remove_link(struct sta_info *sta, unsigned int link_id,
+ bool unhash)
+{
+ struct sta_link_alloc *alloc = NULL;
+ struct link_sta_info *link_sta;
+
+ link_sta = rcu_dereference_protected(sta->link[link_id],
+ lockdep_is_held(&sta->local->sta_mtx));
+
+ if (WARN_ON(!link_sta))
+ return;
+
+ if (unhash)
+ link_sta_info_hash_del(sta->local, link_sta);
+
+ if (link_sta != &sta->deflink)
+ alloc = container_of(link_sta, typeof(*alloc), info);
+
+ sta->sta.valid_links &= ~BIT(link_id);
+ RCU_INIT_POINTER(sta->link[link_id], NULL);
+ RCU_INIT_POINTER(sta->sta.link[link_id], NULL);
+ if (alloc) {
+ sta_info_free_link(&alloc->info);
+ kfree_rcu(alloc, rcu_head);
+ }
+}
+
/**
* sta_info_free - free STA
*
@@ -258,6 +344,15 @@ struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata,
*/
void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sta->link); i++) {
+ if (!(sta->sta.valid_links & BIT(i)))
+ continue;
+
+ sta_remove_link(sta, i, true);
+ }
+
/*
* If we had used sta_info_pre_move_state() then we might not
* have gone through the state transitions down again, so do
@@ -287,7 +382,8 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
#ifdef CONFIG_MAC80211_MESH
kfree(sta->mesh);
#endif
- free_percpu(sta->deflink.pcpu_rx_stats);
+
+ sta_info_free_link(&sta->deflink);
kfree(sta);
}
@@ -333,8 +429,45 @@ static int sta_prepare_rate_control(struct ieee80211_local *local,
return 0;
}
+static int sta_info_alloc_link(struct ieee80211_local *local,
+ struct link_sta_info *link_info,
+ gfp_t gfp)
+{
+ struct ieee80211_hw *hw = &local->hw;
+ int i;
+
+ if (ieee80211_hw_check(hw, USES_RSS)) {
+ link_info->pcpu_rx_stats =
+ alloc_percpu_gfp(struct ieee80211_sta_rx_stats, gfp);
+ if (!link_info->pcpu_rx_stats)
+ return -ENOMEM;
+ }
+
+ link_info->rx_stats.last_rx = jiffies;
+ u64_stats_init(&link_info->rx_stats.syncp);
+
+ ewma_signal_init(&link_info->rx_stats_avg.signal);
+ ewma_avg_signal_init(&link_info->status_stats.avg_ack_signal);
+ for (i = 0; i < ARRAY_SIZE(link_info->rx_stats_avg.chain_signal); i++)
+ ewma_signal_init(&link_info->rx_stats_avg.chain_signal[i]);
+
+ return 0;
+}
+
+static void sta_info_add_link(struct sta_info *sta,
+ unsigned int link_id,
+ struct link_sta_info *link_info,
+ struct ieee80211_link_sta *link_sta)
+{
+ link_info->sta = sta;
+ link_info->link_id = link_id;
+ link_info->pub = link_sta;
+ rcu_assign_pointer(sta->link[link_id], link_info);
+ rcu_assign_pointer(sta->sta.link[link_id], link_sta);
+}
+
struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
- const u8 *addr, gfp_t gfp)
+ const u8 *addr, int link_id, gfp_t gfp)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_hw *hw = &local->hw;
@@ -345,11 +478,18 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
if (!sta)
return NULL;
- if (ieee80211_hw_check(hw, USES_RSS)) {
- sta->deflink.pcpu_rx_stats =
- alloc_percpu_gfp(struct ieee80211_sta_rx_stats, gfp);
- if (!sta->deflink.pcpu_rx_stats)
- goto free;
+ sta->local = local;
+ sta->sdata = sdata;
+
+ if (sta_info_alloc_link(local, &sta->deflink, gfp))
+ return NULL;
+
+ if (link_id >= 0) {
+ sta_info_add_link(sta, link_id, &sta->deflink,
+ &sta->sta.deflink);
+ sta->sta.valid_links = BIT(link_id);
+ } else {
+ sta_info_add_link(sta, 0, &sta->deflink, &sta->sta.deflink);
}
spin_lock_init(&sta->lock);
@@ -373,17 +513,13 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
memcpy(sta->addr, addr, ETH_ALEN);
memcpy(sta->sta.addr, addr, ETH_ALEN);
+ memcpy(sta->deflink.addr, addr, ETH_ALEN);
+ memcpy(sta->sta.deflink.addr, addr, ETH_ALEN);
sta->sta.max_rx_aggregation_subframes =
local->hw.max_rx_aggregation_subframes;
/* TODO link specific alloc and assignments for MLO Link STA */
- /* For non MLO STA, link info can be accessed either via deflink
- * or link[0]
- */
- sta->link[0] = &sta->deflink;
- sta->sta.link[0] = &sta->sta.deflink;
-
/* Extended Key ID needs to install keys for keyid 0 and 1 Rx-only.
* The Tx path starts to use a key as soon as the key slot ptk_idx
* references to is not NULL. To not use the initial Rx-only key
@@ -393,11 +529,6 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
BUILD_BUG_ON(ARRAY_SIZE(sta->ptk) <= INVALID_PTK_KEYIDX);
sta->ptk_idx = INVALID_PTK_KEYIDX;
- sta->local = local;
- sta->sdata = sdata;
- sta->deflink.rx_stats.last_rx = jiffies;
-
- u64_stats_init(&sta->deflink.rx_stats.syncp);
ieee80211_init_frag_cache(&sta->frags);
@@ -407,10 +538,6 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
sta->reserved_tid = IEEE80211_TID_UNRESERVED;
sta->last_connected = ktime_get_seconds();
- ewma_signal_init(&sta->deflink.rx_stats_avg.signal);
- ewma_avg_signal_init(&sta->deflink.status_stats.avg_ack_signal);
- for (i = 0; i < ARRAY_SIZE(sta->deflink.rx_stats_avg.chain_signal); i++)
- ewma_signal_init(&sta->deflink.rx_stats_avg.chain_signal[i]);
if (local->ops->wake_tx_queue) {
void *txq_data;
@@ -432,11 +559,15 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
if (sta_prepare_rate_control(local, sta, gfp))
goto free_txq;
+ sta->airtime_weight = IEEE80211_DEFAULT_AIRTIME_WEIGHT;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
skb_queue_head_init(&sta->ps_tx_buf[i]);
skb_queue_head_init(&sta->tx_filtered[i]);
- init_airtime_info(&sta->airtime[i], &local->airtime[i]);
+ sta->airtime[i].deficit = sta->airtime_weight;
+ atomic_set(&sta->airtime[i].aql_tx_pending, 0);
+ sta->airtime[i].aql_limit_low = local->aql_txq_limit_low[i];
+ sta->airtime[i].aql_limit_high = local->aql_txq_limit_high[i];
}
for (i = 0; i < IEEE80211_NUM_TIDS; i++)
@@ -532,7 +663,7 @@ free_txq:
if (sta->sta.txq[0])
kfree(to_txq_info(sta->sta.txq[0]));
free:
- free_percpu(sta->deflink.pcpu_rx_stats);
+ sta_info_free_link(&sta->deflink);
#ifdef CONFIG_MAC80211_MESH
kfree(sta->mesh);
#endif
@@ -630,7 +761,7 @@ ieee80211_recalc_p2p_go_ps_allowed(struct ieee80211_sub_if_data *sdata)
if (allow_p2p_go_ps != sdata->vif.bss_conf.allow_p2p_go_ps) {
sdata->vif.bss_conf.allow_p2p_go_ps = allow_p2p_go_ps;
- ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_P2P_PS);
+ ieee80211_link_info_change_notify(sdata, 0, BSS_CHANGED_P2P_PS);
}
}
@@ -1189,6 +1320,12 @@ int sta_info_init(struct ieee80211_local *local)
if (err)
return err;
+ err = rhltable_init(&local->link_sta_hash, &link_sta_rht_params);
+ if (err) {
+ rhltable_destroy(&local->sta_hash);
+ return err;
+ }
+
spin_lock_init(&local->tim_lock);
mutex_init(&local->sta_mtx);
INIT_LIST_HEAD(&local->sta_list);
@@ -1201,6 +1338,7 @@ void sta_info_stop(struct ieee80211_local *local)
{
del_timer_sync(&local->sta_cleanup);
rhltable_destroy(&local->sta_hash);
+ rhltable_destroy(&local->link_sta_hash);
}
@@ -1467,7 +1605,7 @@ static void ieee80211_send_null_response(struct sta_info *sta, int tid,
skb->dev = sdata->dev;
rcu_read_lock();
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+ chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
if (WARN_ON(!chanctx_conf)) {
rcu_read_unlock();
kfree_skb(skb);
@@ -1901,59 +2039,29 @@ void ieee80211_sta_set_buffered(struct ieee80211_sta *pubsta,
}
EXPORT_SYMBOL(ieee80211_sta_set_buffered);
-void ieee80211_register_airtime(struct ieee80211_txq *txq,
- u32 tx_airtime, u32 rx_airtime)
+void ieee80211_sta_register_airtime(struct ieee80211_sta *pubsta, u8 tid,
+ u32 tx_airtime, u32 rx_airtime)
{
- struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->vif);
- struct ieee80211_local *local = sdata->local;
- u64 weight_sum, weight_sum_reciprocal;
- struct airtime_sched_info *air_sched;
- struct airtime_info *air_info;
+ struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
+ struct ieee80211_local *local = sta->sdata->local;
+ u8 ac = ieee80211_ac_from_tid(tid);
u32 airtime = 0;
+ u32 diff;
- air_sched = &local->airtime[txq->ac];
- air_info = to_airtime_info(txq);
-
- if (local->airtime_flags & AIRTIME_USE_TX)
+ if (sta->local->airtime_flags & AIRTIME_USE_TX)
airtime += tx_airtime;
- if (local->airtime_flags & AIRTIME_USE_RX)
+ if (sta->local->airtime_flags & AIRTIME_USE_RX)
airtime += rx_airtime;
- /* Weights scale so the unit weight is 256 */
- airtime <<= 8;
-
- spin_lock_bh(&air_sched->lock);
+ spin_lock_bh(&local->active_txq_lock[ac]);
+ sta->airtime[ac].tx_airtime += tx_airtime;
+ sta->airtime[ac].rx_airtime += rx_airtime;
- air_info->tx_airtime += tx_airtime;
- air_info->rx_airtime += rx_airtime;
+ diff = (u32)jiffies - sta->airtime[ac].last_active;
+ if (diff <= AIRTIME_ACTIVE_DURATION)
+ sta->airtime[ac].deficit -= airtime;
- if (air_sched->weight_sum) {
- weight_sum = air_sched->weight_sum;
- weight_sum_reciprocal = air_sched->weight_sum_reciprocal;
- } else {
- weight_sum = air_info->weight;
- weight_sum_reciprocal = air_info->weight_reciprocal;
- }
-
- /* Round the calculation of global vt */
- air_sched->v_t += (u64)((airtime + (weight_sum >> 1)) *
- weight_sum_reciprocal) >> IEEE80211_RECIPROCAL_SHIFT_64;
- air_info->v_t += (u32)((airtime + (air_info->weight >> 1)) *
- air_info->weight_reciprocal) >> IEEE80211_RECIPROCAL_SHIFT_32;
- ieee80211_resort_txq(&local->hw, txq);
-
- spin_unlock_bh(&air_sched->lock);
-}
-
-void ieee80211_sta_register_airtime(struct ieee80211_sta *pubsta, u8 tid,
- u32 tx_airtime, u32 rx_airtime)
-{
- struct ieee80211_txq *txq = pubsta->txq[tid];
-
- if (!txq)
- return;
-
- ieee80211_register_airtime(txq, tx_airtime, rx_airtime);
+ spin_unlock_bh(&local->active_txq_lock[ac]);
}
EXPORT_SYMBOL(ieee80211_sta_register_airtime);
@@ -1972,6 +2080,7 @@ void ieee80211_sta_update_pending_airtime(struct ieee80211_local *local,
&sta->airtime[ac].aql_tx_pending);
atomic_add(tx_airtime, &local->aql_total_pending_airtime);
+ atomic_add(tx_airtime, &local->aql_ac_pending_airtime[ac]);
return;
}
@@ -1983,14 +2092,17 @@ void ieee80211_sta_update_pending_airtime(struct ieee80211_local *local,
tx_pending, 0);
}
+ atomic_sub(tx_airtime, &local->aql_total_pending_airtime);
tx_pending = atomic_sub_return(tx_airtime,
- &local->aql_total_pending_airtime);
+ &local->aql_ac_pending_airtime[ac]);
if (WARN_ONCE(tx_pending < 0,
"Device %s AC %d pending airtime underflow: %u, %u",
wiphy_name(local->hw.wiphy), ac, tx_pending,
- tx_airtime))
- atomic_cmpxchg(&local->aql_total_pending_airtime,
+ tx_airtime)) {
+ atomic_cmpxchg(&local->aql_ac_pending_airtime[ac],
tx_pending, 0);
+ atomic_sub(tx_pending, &local->aql_total_pending_airtime);
+ }
}
int sta_info_move_state(struct sta_info *sta,
@@ -2093,41 +2205,6 @@ int sta_info_move_state(struct sta_info *sta,
return 0;
}
-u8 sta_info_tx_streams(struct sta_info *sta)
-{
- struct ieee80211_sta_ht_cap *ht_cap = &sta->sta.deflink.ht_cap;
- u8 rx_streams;
-
- if (!sta->sta.deflink.ht_cap.ht_supported)
- return 1;
-
- if (sta->sta.deflink.vht_cap.vht_supported) {
- int i;
- u16 tx_mcs_map =
- le16_to_cpu(sta->sta.deflink.vht_cap.vht_mcs.tx_mcs_map);
-
- for (i = 7; i >= 0; i--)
- if ((tx_mcs_map & (0x3 << (i * 2))) !=
- IEEE80211_VHT_MCS_NOT_SUPPORTED)
- return i + 1;
- }
-
- if (ht_cap->mcs.rx_mask[3])
- rx_streams = 4;
- else if (ht_cap->mcs.rx_mask[2])
- rx_streams = 3;
- else if (ht_cap->mcs.rx_mask[1])
- rx_streams = 2;
- else
- rx_streams = 1;
-
- if (!(ht_cap->mcs.tx_params & IEEE80211_HT_MCS_TX_RX_DIFF))
- return rx_streams;
-
- return ((ht_cap->mcs.tx_params & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
- >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT) + 1;
-}
-
static struct ieee80211_sta_rx_stats *
sta_get_last_rx_stats(struct sta_info *sta)
{
@@ -2312,7 +2389,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
* (or just modify the value entirely, of course)
*/
if (sdata->vif.type == NL80211_IFTYPE_STATION)
- sinfo->rx_beacon = sdata->u.mgd.count_beacon_signal;
+ sinfo->rx_beacon = sdata->deflink.u.mgd.count_beacon_signal;
drv_sta_statistics(local, sdata, &sta->sta, sinfo);
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME) |
@@ -2323,7 +2400,8 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC);
if (sdata->vif.type == NL80211_IFTYPE_STATION) {
- sinfo->beacon_loss_count = sdata->u.mgd.beacon_loss_count;
+ sinfo->beacon_loss_count =
+ sdata->deflink.u.mgd.beacon_loss_count;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_LOSS);
}
@@ -2401,7 +2479,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
}
if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT))) {
- sinfo->airtime_weight = sta->airtime[0].weight;
+ sinfo->airtime_weight = sta->airtime_weight;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT);
}
@@ -2614,3 +2692,92 @@ void ieee80211_sta_set_expected_throughput(struct ieee80211_sta *pubsta,
sta_update_codel_params(sta, thr);
}
+
+int ieee80211_sta_allocate_link(struct sta_info *sta, unsigned int link_id)
+{
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ struct sta_link_alloc *alloc;
+ int ret;
+
+ lockdep_assert_held(&sdata->local->sta_mtx);
+
+ /* must represent an MLD from the start */
+ if (WARN_ON(!sta->sta.valid_links))
+ return -EINVAL;
+
+ if (WARN_ON(sta->sta.valid_links & BIT(link_id) ||
+ sta->link[link_id]))
+ return -EBUSY;
+
+ alloc = kzalloc(sizeof(*alloc), GFP_KERNEL);
+ if (!alloc)
+ return -ENOMEM;
+
+ ret = sta_info_alloc_link(sdata->local, &alloc->info, GFP_KERNEL);
+ if (ret) {
+ kfree(alloc);
+ return ret;
+ }
+
+ sta_info_add_link(sta, link_id, &alloc->info, &alloc->sta);
+
+ return 0;
+}
+
+static int link_sta_info_hash_add(struct ieee80211_local *local,
+ struct link_sta_info *link_sta)
+{
+ return rhltable_insert(&local->link_sta_hash,
+ &link_sta->link_hash_node,
+ link_sta_rht_params);
+}
+
+int ieee80211_sta_activate_link(struct sta_info *sta, unsigned int link_id)
+{
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ struct link_sta_info *link_sta;
+ u16 old_links = sta->sta.valid_links;
+ u16 new_links = old_links | BIT(link_id);
+ int ret;
+
+ link_sta = rcu_dereference_protected(sta->link[link_id],
+ lockdep_is_held(&sdata->local->sta_mtx));
+
+ if (WARN_ON(old_links == new_links || !link_sta))
+ return -EINVAL;
+
+ sta->sta.valid_links = new_links;
+
+ if (!test_sta_flag(sta, WLAN_STA_INSERTED)) {
+ ret = 0;
+ goto hash;
+ }
+
+ ret = drv_change_sta_links(sdata->local, sdata, &sta->sta,
+ old_links, new_links);
+ if (ret) {
+ sta->sta.valid_links = old_links;
+ sta_remove_link(sta, link_id, false);
+ }
+
+hash:
+ link_sta_info_hash_add(sdata->local, link_sta);
+
+ return ret;
+}
+
+void ieee80211_sta_remove_link(struct sta_info *sta, unsigned int link_id)
+{
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+
+ lockdep_assert_held(&sdata->local->sta_mtx);
+
+ sta->sta.valid_links &= ~BIT(link_id);
+
+ if (test_sta_flag(sta, WLAN_STA_INSERTED))
+ drv_change_sta_links(sdata->local, sdata, &sta->sta,
+ sta->sta.valid_links,
+ sta->sta.valid_links & ~BIT(link_id));
+
+ sta_remove_link(sta, link_id, true);
+}
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 35c390bedfba..70ee55ec5518 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -3,7 +3,7 @@
* Copyright 2002-2005, Devicescape Software, Inc.
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright(c) 2015-2017 Intel Deutschland GmbH
- * Copyright(c) 2020-2021 Intel Corporation
+ * Copyright(c) 2020-2022 Intel Corporation
*/
#ifndef STA_INFO_H
@@ -135,25 +135,19 @@ enum ieee80211_agg_stop_reason {
#define AIRTIME_USE_TX BIT(0)
#define AIRTIME_USE_RX BIT(1)
-
struct airtime_info {
u64 rx_airtime;
u64 tx_airtime;
- u64 v_t;
- u64 last_scheduled;
- struct list_head list;
+ u32 last_active;
+ s32 deficit;
atomic_t aql_tx_pending; /* Estimated airtime for frames pending */
u32 aql_limit_low;
u32 aql_limit_high;
- u32 weight_reciprocal;
- u16 weight;
};
void ieee80211_sta_update_pending_airtime(struct ieee80211_local *local,
struct sta_info *sta, u8 ac,
u16 tx_airtime, bool tx_completed);
-void ieee80211_register_airtime(struct ieee80211_txq *txq,
- u32 tx_airtime, u32 rx_airtime);
struct sta_info;
@@ -491,6 +485,7 @@ struct ieee80211_fragment_cache {
* same for non-MLD STA. This is used as key for searching link STA
* @link_id: Link ID uniquely identifying the link STA. This is 0 for non-MLD
* and set to the corresponding vif LinkId for MLD STA
+ * @link_hash_node: hash node for rhashtable
* @sta: Points to the STA info
* @gtk: group keys negotiated with this station, if any
* @tx_stats: TX statistics
@@ -516,13 +511,16 @@ struct ieee80211_fragment_cache {
* @status_stats.last_ack_signal: last ACK signal
* @status_stats.ack_signal_filled: last ACK signal validity
* @status_stats.avg_ack_signal: average ACK signal
+ * @cur_max_bandwidth: maximum bandwidth to use for TX to the station,
+ * taken from HT/VHT capabilities or VHT operating mode notification
+ * @pub: public (driver visible) link STA data
* TODO Move other link params from sta_info as required for MLD operation
*/
struct link_sta_info {
u8 addr[ETH_ALEN];
u8 link_id;
- /* TODO rhash head/node for finding link_sta based on addr */
+ struct rhlist_head link_hash_node;
struct sta_info *sta;
struct ieee80211_key __rcu *gtk[NUM_DEFAULT_KEYS +
@@ -561,6 +559,8 @@ struct link_sta_info {
} tx_stats;
enum ieee80211_sta_rx_bandwidth cur_max_bandwidth;
+
+ struct ieee80211_link_sta *pub;
};
/**
@@ -603,6 +603,7 @@ struct link_sta_info {
* @tid_seq: per-TID sequence numbers for sending to this STA
* @airtime: per-AC struct airtime_info describing airtime statistics for this
* station
+ * @airtime_weight: station weight for airtime fairness calculation purposes
* @ampdu_mlme: A-MPDU state machine state
* @mesh: mesh STA information
* @debugfs_dir: debug filesystem directory dentry
@@ -616,7 +617,6 @@ struct link_sta_info {
* taken from HT/VHT capabilities or VHT operating mode notification
* @known_smps_mode: the smps_mode the client thinks we are in. Relevant for
* AP only.
- * @cipher_scheme: optional cipher scheme for this station
* @cparams: CoDel parameters for this station.
* @reserved_tid: reserved TID (if any, otherwise IEEE80211_TID_UNRESERVED)
* @fast_tx: TX fastpath information
@@ -624,7 +624,6 @@ struct link_sta_info {
* @tdls_chandef: a TDLS peer can have a wider chandef that is compatible to
* the BSS one.
* @frags: fragment cache
- * @multi_link_sta: Identifies if this sta is a MLD STA or regular STA
* @deflink: This is the default link STA information, for non MLO STA all link
* specific STA information is accessed through @deflink or through
* link[0] which points to address of @deflink. For MLO Link STA
@@ -689,6 +688,7 @@ struct sta_info {
u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1];
struct airtime_info airtime[IEEE80211_NUM_ACS];
+ u16 airtime_weight;
/*
* Aggregation information, locked with lock.
@@ -700,7 +700,6 @@ struct sta_info {
#endif
enum ieee80211_smps_mode known_smps_mode;
- const struct ieee80211_cipher_scheme *cipher_scheme;
struct codel_params cparams;
@@ -710,9 +709,8 @@ struct sta_info {
struct ieee80211_fragment_cache frags;
- bool multi_link_sta;
struct link_sta_info deflink;
- struct link_sta_info *link[MAX_STA_LINKS];
+ struct link_sta_info __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
/* keep last! */
struct ieee80211_sta sta;
@@ -825,6 +823,17 @@ struct sta_info *sta_info_get_by_addrs(struct ieee80211_local *local,
rhl_for_each_entry_rcu(_sta, _tmp, \
sta_info_hash_lookup(local, _addr), hash_node)
+struct rhlist_head *link_sta_info_hash_lookup(struct ieee80211_local *local,
+ const u8 *addr);
+
+#define for_each_link_sta_info(local, _addr, _sta, _tmp) \
+ rhl_for_each_entry_rcu(_sta, _tmp, \
+ link_sta_info_hash_lookup(local, _addr), \
+ link_hash_node)
+
+struct link_sta_info *
+link_sta_info_get_bss(struct ieee80211_sub_if_data *sdata, const u8 *addr);
+
/*
* Get STA info by index, BROKEN!
*/
@@ -835,7 +844,7 @@ struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata,
* until sta_info_insert().
*/
struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
- const u8 *addr, gfp_t gfp);
+ const u8 *addr, int link_id, gfp_t gfp);
void sta_info_free(struct ieee80211_local *local, struct sta_info *sta);
@@ -893,7 +902,10 @@ u32 sta_get_expected_throughput(struct sta_info *sta);
void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
unsigned long exp_time);
-u8 sta_info_tx_streams(struct sta_info *sta);
+
+int ieee80211_sta_allocate_link(struct sta_info *sta, unsigned int link_id);
+int ieee80211_sta_activate_link(struct sta_info *sta, unsigned int link_id);
+void ieee80211_sta_remove_link(struct sta_info *sta, unsigned int link_id);
void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta);
void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta);
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index e69272139437..9bd4d336d444 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -223,7 +223,7 @@ static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
* only be the AP. And the only other place updating
* this variable in managed mode is before association.
*/
- sdata->smps_mode = smps_mode;
+ sdata->deflink.smps_mode = smps_mode;
ieee80211_queue_work(&local->hw, &sdata->recalc_smps);
} else if (sdata->vif.type == NL80211_IFTYPE_AP ||
sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
@@ -293,7 +293,6 @@ static int ieee80211_tx_radiotap_len(struct ieee80211_tx_info *info,
static void
ieee80211_add_tx_radiotap_header(struct ieee80211_local *local,
- struct ieee80211_supported_band *sband,
struct sk_buff *skb, int retry_count,
int rtap_len, int shift,
struct ieee80211_tx_status *status)
@@ -336,9 +335,13 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_local *local,
legacy_rate = status_rate->rate_idx.legacy;
} else if (info->status.rates[0].idx >= 0 &&
!(info->status.rates[0].flags & (IEEE80211_TX_RC_MCS |
- IEEE80211_TX_RC_VHT_MCS)))
+ IEEE80211_TX_RC_VHT_MCS))) {
+ struct ieee80211_supported_band *sband;
+
+ sband = local->hw.wiphy->bands[info->band];
legacy_rate =
sband->bitrates[info->status.rates[0].idx].bitrate;
+ }
if (legacy_rate) {
rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_RATE));
@@ -845,7 +848,6 @@ static int ieee80211_tx_get_rates(struct ieee80211_hw *hw,
}
void ieee80211_tx_monitor(struct ieee80211_local *local, struct sk_buff *skb,
- struct ieee80211_supported_band *sband,
int retry_count, int shift, bool send_to_cooked,
struct ieee80211_tx_status *status)
{
@@ -862,7 +864,7 @@ void ieee80211_tx_monitor(struct ieee80211_local *local, struct sk_buff *skb,
dev_kfree_skb(skb);
return;
}
- ieee80211_add_tx_radiotap_header(local, sband, skb, retry_count,
+ ieee80211_add_tx_radiotap_header(local, skb, retry_count,
rtap_len, shift, status);
/* XXX: is this sufficient for BPF? */
@@ -912,7 +914,6 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info = status->info;
struct sta_info *sta;
__le16 fc;
- struct ieee80211_supported_band *sband;
bool send_to_cooked;
bool acked;
bool noack_success;
@@ -920,7 +921,6 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
int shift = 0;
int tid = IEEE80211_NUM_TIDS;
- sband = local->hw.wiphy->bands[info->band];
fc = hdr->frame_control;
if (status->sta) {
@@ -998,25 +998,6 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
if (!(info->flags & IEEE80211_TX_CTL_INJECTED) && acked)
ieee80211_frame_acked(sta, skb);
- } else if (wiphy_ext_feature_isset(local->hw.wiphy,
- NL80211_EXT_FEATURE_AIRTIME_FAIRNESS)) {
- struct ieee80211_sub_if_data *sdata;
- struct ieee80211_txq *txq;
- u32 airtime;
-
- /* Account airtime to multicast queue */
- sdata = ieee80211_sdata_from_skb(local, skb);
-
- if (sdata && (txq = sdata->vif.txq)) {
- airtime = info->status.tx_time ?:
- ieee80211_calc_expected_tx_airtime(hw,
- &sdata->vif,
- NULL,
- skb->len,
- false);
-
- ieee80211_register_airtime(txq, airtime, 0);
- }
}
/* SNMP counters
@@ -1082,7 +1063,7 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
}
/* send to monitor interfaces */
- ieee80211_tx_monitor(local, skb, sband, retry_count, shift,
+ ieee80211_tx_monitor(local, skb, retry_count, shift,
send_to_cooked, status);
}
@@ -1114,7 +1095,6 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info = status->info;
struct ieee80211_sta *pubsta = status->sta;
struct sk_buff *skb = status->skb;
- struct ieee80211_supported_band *sband;
struct sta_info *sta = NULL;
int rates_idx, retry_count;
bool acked, noack_success, ack_signal_valid;
@@ -1145,8 +1125,6 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
rates_idx = ieee80211_tx_get_rates(hw, info, &retry_count);
- sband = hw->wiphy->bands[info->band];
-
acked = !!(info->flags & IEEE80211_TX_STAT_ACK);
noack_success = !!(info->flags & IEEE80211_TX_STAT_NOACK_TRANSMITTED);
ack_signal_valid =
@@ -1201,7 +1179,7 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
}
}
- rate_control_tx_status(local, sband, status);
+ rate_control_tx_status(local, status);
if (ieee80211_vif_is_mesh(&sta->sdata->vif))
ieee80211s_update_metric(local, sta, status);
}
@@ -1239,14 +1217,13 @@ void ieee80211_tx_rate_update(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info)
{
struct ieee80211_local *local = hw_to_local(hw);
- struct ieee80211_supported_band *sband = hw->wiphy->bands[info->band];
struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
struct ieee80211_tx_status status = {
.info = info,
.sta = pubsta,
};
- rate_control_tx_status(local, sband, &status);
+ rate_control_tx_status(local, &status);
if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL))
sta->deflink.tx_stats.last_rate = info->status.rates[0];
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index 4e2d22e47429..c531fa17f426 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -6,7 +6,7 @@
* Copyright 2014, Intel Corporation
* Copyright 2014 Intel Mobile Communications GmbH
* Copyright 2015 - 2016 Intel Deutschland GmbH
- * Copyright (C) 2019, 2021 Intel Corporation
+ * Copyright (C) 2019, 2021-2022 Intel Corporation
*/
#include <linux/ieee80211.h>
@@ -218,7 +218,7 @@ static void ieee80211_tdls_add_link_ie(struct ieee80211_sub_if_data *sdata,
lnkid->ie_type = WLAN_EID_LINK_ID;
lnkid->ie_len = sizeof(struct ieee80211_tdls_lnkie) - 2;
- memcpy(lnkid->bssid, sdata->u.mgd.bssid, ETH_ALEN);
+ memcpy(lnkid->bssid, sdata->deflink.u.mgd.bssid, ETH_ALEN);
memcpy(lnkid->init_sta, init_addr, ETH_ALEN);
memcpy(lnkid->resp_sta, rsp_addr, ETH_ALEN);
}
@@ -230,7 +230,7 @@ ieee80211_tdls_add_aid(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
*pos++ = WLAN_EID_AID;
*pos++ = 2; /* len */
- put_unaligned_le16(sdata->vif.bss_conf.aid, pos);
+ put_unaligned_le16(sdata->vif.cfg.aid, pos);
}
/* translate numbering in the WMM parameter IE to the mac80211 notation */
@@ -308,7 +308,8 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
/* IEEE802.11ac-2013 Table E-4 */
u16 centers_80mhz[] = { 5210, 5290, 5530, 5610, 5690, 5775 };
struct cfg80211_chan_def uc = sta->tdls_chandef;
- enum nl80211_chan_width max_width = ieee80211_sta_cap_chan_bw(sta);
+ enum nl80211_chan_width max_width =
+ ieee80211_sta_cap_chan_bw(&sta->deflink);
int i;
/* only support upgrading non-narrow channels up to 80Mhz */
@@ -545,7 +546,6 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata,
size_t extra_ies_len)
{
struct ieee80211_local *local = sdata->local;
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
size_t offset = 0, noffset;
struct sta_info *sta, *ap_sta;
struct ieee80211_supported_band *sband;
@@ -558,7 +558,7 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata,
mutex_lock(&local->sta_mtx);
sta = sta_info_get(sdata, peer);
- ap_sta = sta_info_get(sdata, ifmgd->bssid);
+ ap_sta = sta_info_get(sdata, sdata->deflink.u.mgd.bssid);
if (WARN_ON_ONCE(!sta || !ap_sta)) {
mutex_unlock(&local->sta_mtx);
return;
@@ -833,7 +833,7 @@ ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev,
mgmt = skb_put_zero(skb, 24);
memcpy(mgmt->da, peer, ETH_ALEN);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
- memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
+ memcpy(mgmt->bssid, sdata->deflink.u.mgd.bssid, ETH_ALEN);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
@@ -1072,7 +1072,8 @@ ieee80211_tdls_mgmt_setup(struct wiphy *wiphy, struct net_device *dev,
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
- enum ieee80211_smps_mode smps_mode = sdata->u.mgd.driver_smps_mode;
+ enum ieee80211_smps_mode smps_mode =
+ sdata->deflink.u.mgd.driver_smps_mode;
int ret;
/* don't support setup with forced SMPS mode that's not off */
@@ -1254,7 +1255,7 @@ static void iee80211_tdls_recalc_chanctx(struct ieee80211_sub_if_data *sdata,
struct ieee80211_supported_band *sband;
mutex_lock(&local->chanctx_mtx);
- conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
+ conf = rcu_dereference_protected(sdata->vif.bss_conf.chanctx_conf,
lockdep_is_held(&local->chanctx_mtx));
if (conf) {
width = conf->def.width;
@@ -1268,10 +1269,10 @@ static void iee80211_tdls_recalc_chanctx(struct ieee80211_sub_if_data *sdata,
enum ieee80211_sta_rx_bandwidth bw;
bw = ieee80211_chan_width_to_rx_bw(conf->def.width);
- bw = min(bw, ieee80211_sta_cap_rx_bw(sta));
+ bw = min(bw, ieee80211_sta_cap_rx_bw(&sta->deflink));
if (bw != sta->sta.deflink.bandwidth) {
sta->sta.deflink.bandwidth = bw;
- rate_control_rate_update(local, sband, sta,
+ rate_control_rate_update(local, sband, sta, 0,
IEEE80211_RC_BW_CHANGED);
/*
* if a TDLS peer BW was updated, we need to
@@ -1335,7 +1336,7 @@ iee80211_tdls_recalc_ht_protection(struct ieee80211_sub_if_data *sdata,
return;
sdata->vif.bss_conf.ht_operation_mode = opmode;
- ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT);
+ ieee80211_link_info_change_notify(sdata, 0, BSS_CHANGED_HT);
}
int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
@@ -1372,7 +1373,7 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
switch (oper) {
case NL80211_TDLS_ENABLE_LINK:
- if (sdata->vif.csa_active) {
+ if (sdata->vif.bss_conf.csa_active) {
tdls_dbg(sdata, "TDLS: disallow link during CSA\n");
ret = -EBUSY;
break;
@@ -1431,7 +1432,7 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
if (ret == 0)
ieee80211_queue_work(&sdata->local->hw,
- &sdata->u.mgd.request_smps_work);
+ &sdata->deflink.u.mgd.request_smps_work);
mutex_unlock(&local->mtx);
sdata_unlock(sdata);
@@ -1444,7 +1445,7 @@ void ieee80211_tdls_oper_request(struct ieee80211_vif *vif, const u8 *peer,
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
- if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc) {
+ if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc) {
sdata_err(sdata, "Discarding TDLS oper %d - not STA or disconnected\n",
oper);
return;
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 743adfbb9b15..f96e7cdca4c2 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -1,9 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
-* Portions of this file
-* Copyright(c) 2016-2017 Intel Deutschland GmbH
-* Copyright (C) 2018 - 2021 Intel Corporation
-*/
+ * Portions of this file
+ * Copyright(c) 2016-2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 - 2022 Intel Corporation
+ */
#if !defined(__MAC80211_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ)
#define __MAC80211_DRIVER_TRACE
@@ -390,22 +390,71 @@ TRACE_EVENT(drv_config,
)
);
-TRACE_EVENT(drv_bss_info_changed,
+TRACE_EVENT(drv_vif_cfg_changed,
TP_PROTO(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
- struct ieee80211_bss_conf *info,
- u32 changed),
+ u64 changed),
- TP_ARGS(local, sdata, info, changed),
+ TP_ARGS(local, sdata, changed),
TP_STRUCT__entry(
LOCAL_ENTRY
VIF_ENTRY
- __field(u32, changed)
+ __field(u64, changed)
__field(bool, assoc)
__field(bool, ibss_joined)
__field(bool, ibss_creator)
__field(u16, aid)
+ __dynamic_array(u32, arp_addr_list,
+ sdata->vif.cfg.arp_addr_cnt > IEEE80211_BSS_ARP_ADDR_LIST_LEN ?
+ IEEE80211_BSS_ARP_ADDR_LIST_LEN :
+ sdata->vif.cfg.arp_addr_cnt)
+ __field(int, arp_addr_cnt)
+ __dynamic_array(u8, ssid, sdata->vif.cfg.ssid_len)
+ __field(int, s1g)
+ __field(bool, idle)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ __entry->changed = changed;
+ __entry->aid = sdata->vif.cfg.aid;
+ __entry->assoc = sdata->vif.cfg.assoc;
+ __entry->ibss_joined = sdata->vif.cfg.ibss_joined;
+ __entry->ibss_creator = sdata->vif.cfg.ibss_creator;
+
+ __entry->arp_addr_cnt = sdata->vif.cfg.arp_addr_cnt;
+ memcpy(__get_dynamic_array(arp_addr_list),
+ sdata->vif.cfg.arp_addr_list,
+ sizeof(u32) * (sdata->vif.cfg.arp_addr_cnt > IEEE80211_BSS_ARP_ADDR_LIST_LEN ?
+ IEEE80211_BSS_ARP_ADDR_LIST_LEN :
+ sdata->vif.cfg.arp_addr_cnt));
+ memcpy(__get_dynamic_array(ssid),
+ sdata->vif.cfg.ssid,
+ sdata->vif.cfg.ssid_len);
+ __entry->s1g = sdata->vif.cfg.s1g;
+ __entry->idle = sdata->vif.cfg.idle;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT VIF_PR_FMT " changed:%#llx",
+ LOCAL_PR_ARG, VIF_PR_ARG, __entry->changed
+ )
+);
+
+TRACE_EVENT(drv_link_info_changed,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ int link_id, u64 changed),
+
+ TP_ARGS(local, sdata, link_id, changed),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ __field(u64, changed)
+ __field(int, link_id)
__field(bool, cts)
__field(bool, shortpre)
__field(bool, shortslot)
@@ -424,64 +473,50 @@ TRACE_EVENT(drv_bss_info_changed,
__field(u32, channel_width)
__field(u32, channel_cfreq1)
__field(u32, channel_cfreq1_offset)
- __dynamic_array(u32, arp_addr_list,
- info->arp_addr_cnt > IEEE80211_BSS_ARP_ADDR_LIST_LEN ?
- IEEE80211_BSS_ARP_ADDR_LIST_LEN :
- info->arp_addr_cnt)
- __field(int, arp_addr_cnt)
__field(bool, qos)
- __field(bool, idle)
__field(bool, ps)
- __dynamic_array(u8, ssid, info->ssid_len)
__field(bool, hidden_ssid)
__field(int, txpower)
__field(u8, p2p_oppps_ctwindow)
),
TP_fast_assign(
+ struct ieee80211_bss_conf *link_conf = sdata->vif.link_conf[link_id];
+
LOCAL_ASSIGN;
VIF_ASSIGN;
__entry->changed = changed;
- __entry->aid = info->aid;
- __entry->assoc = info->assoc;
- __entry->ibss_joined = info->ibss_joined;
- __entry->ibss_creator = info->ibss_creator;
- __entry->shortpre = info->use_short_preamble;
- __entry->cts = info->use_cts_prot;
- __entry->shortslot = info->use_short_slot;
- __entry->enable_beacon = info->enable_beacon;
- __entry->dtimper = info->dtim_period;
- __entry->bcnint = info->beacon_int;
- __entry->assoc_cap = info->assoc_capability;
- __entry->sync_tsf = info->sync_tsf;
- __entry->sync_device_ts = info->sync_device_ts;
- __entry->sync_dtim_count = info->sync_dtim_count;
- __entry->basic_rates = info->basic_rates;
- memcpy(__entry->mcast_rate, info->mcast_rate,
+ __entry->link_id = link_id;
+ __entry->shortpre = link_conf->use_short_preamble;
+ __entry->cts = link_conf->use_cts_prot;
+ __entry->shortslot = link_conf->use_short_slot;
+ __entry->enable_beacon = link_conf->enable_beacon;
+ __entry->dtimper = link_conf->dtim_period;
+ __entry->bcnint = link_conf->beacon_int;
+ __entry->assoc_cap = link_conf->assoc_capability;
+ __entry->sync_tsf = link_conf->sync_tsf;
+ __entry->sync_device_ts = link_conf->sync_device_ts;
+ __entry->sync_dtim_count = link_conf->sync_dtim_count;
+ __entry->basic_rates = link_conf->basic_rates;
+ memcpy(__entry->mcast_rate, link_conf->mcast_rate,
sizeof(__entry->mcast_rate));
- __entry->ht_operation_mode = info->ht_operation_mode;
- __entry->cqm_rssi_thold = info->cqm_rssi_thold;
- __entry->cqm_rssi_hyst = info->cqm_rssi_hyst;
- __entry->channel_width = info->chandef.width;
- __entry->channel_cfreq1 = info->chandef.center_freq1;
- __entry->channel_cfreq1_offset = info->chandef.freq1_offset;
- __entry->arp_addr_cnt = info->arp_addr_cnt;
- memcpy(__get_dynamic_array(arp_addr_list), info->arp_addr_list,
- sizeof(u32) * (info->arp_addr_cnt > IEEE80211_BSS_ARP_ADDR_LIST_LEN ?
- IEEE80211_BSS_ARP_ADDR_LIST_LEN :
- info->arp_addr_cnt));
- __entry->qos = info->qos;
- __entry->idle = info->idle;
- __entry->ps = info->ps;
- memcpy(__get_dynamic_array(ssid), info->ssid, info->ssid_len);
- __entry->hidden_ssid = info->hidden_ssid;
- __entry->txpower = info->txpower;
- __entry->p2p_oppps_ctwindow = info->p2p_noa_attr.oppps_ctwindow;
+ __entry->ht_operation_mode = link_conf->ht_operation_mode;
+ __entry->cqm_rssi_thold = link_conf->cqm_rssi_thold;
+ __entry->cqm_rssi_hyst = link_conf->cqm_rssi_hyst;
+ __entry->channel_width = link_conf->chandef.width;
+ __entry->channel_cfreq1 = link_conf->chandef.center_freq1;
+ __entry->channel_cfreq1_offset = link_conf->chandef.freq1_offset;
+ __entry->qos = link_conf->qos;
+ __entry->ps = link_conf->ps;
+ __entry->hidden_ssid = link_conf->hidden_ssid;
+ __entry->txpower = link_conf->txpower;
+ __entry->p2p_oppps_ctwindow = link_conf->p2p_noa_attr.oppps_ctwindow;
),
TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT " changed:%#x",
- LOCAL_PR_ARG, VIF_PR_ARG, __entry->changed
+ LOCAL_PR_FMT VIF_PR_FMT " link_id:%d, changed:%#llx",
+ LOCAL_PR_ARG, VIF_PR_ARG, __entry->link_id,
+ __entry->changed
)
);
@@ -1592,6 +1627,7 @@ struct trace_chandef_entry {
struct trace_switch_entry {
struct trace_vif_entry vif;
+ unsigned int link_id;
struct trace_chandef_entry old_chandef;
struct trace_chandef_entry new_chandef;
} __packed;
@@ -1631,6 +1667,7 @@ TRACE_EVENT(drv_switch_vif_chanctx,
SWITCH_ENTRY_ASSIGN(vif.vif_type, vif->type);
SWITCH_ENTRY_ASSIGN(vif.p2p, vif->p2p);
+ SWITCH_ENTRY_ASSIGN(link_id, link_id);
strncpy(local_vifs[i].vif.vif_name,
sdata->name,
sizeof(local_vifs[i].vif.vif_name));
@@ -1671,77 +1708,106 @@ TRACE_EVENT(drv_switch_vif_chanctx,
DECLARE_EVENT_CLASS(local_sdata_chanctx,
TP_PROTO(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
+ unsigned int link_id,
struct ieee80211_chanctx *ctx),
- TP_ARGS(local, sdata, ctx),
+ TP_ARGS(local, sdata, link_id, ctx),
TP_STRUCT__entry(
LOCAL_ENTRY
VIF_ENTRY
CHANCTX_ENTRY
+ __field(unsigned int, link_id)
),
TP_fast_assign(
LOCAL_ASSIGN;
VIF_ASSIGN;
CHANCTX_ASSIGN;
+ __entry->link_id = link_id;
),
TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT CHANCTX_PR_FMT,
- LOCAL_PR_ARG, VIF_PR_ARG, CHANCTX_PR_ARG
+ LOCAL_PR_FMT VIF_PR_FMT " link_id:%d" CHANCTX_PR_FMT,
+ LOCAL_PR_ARG, VIF_PR_ARG, __entry->link_id, CHANCTX_PR_ARG
)
);
DEFINE_EVENT(local_sdata_chanctx, drv_assign_vif_chanctx,
TP_PROTO(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
+ unsigned int link_id,
struct ieee80211_chanctx *ctx),
- TP_ARGS(local, sdata, ctx)
+ TP_ARGS(local, sdata, link_id, ctx)
);
DEFINE_EVENT(local_sdata_chanctx, drv_unassign_vif_chanctx,
TP_PROTO(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
+ unsigned int link_id,
struct ieee80211_chanctx *ctx),
- TP_ARGS(local, sdata, ctx)
+ TP_ARGS(local, sdata, link_id, ctx)
);
TRACE_EVENT(drv_start_ap,
TP_PROTO(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
- struct ieee80211_bss_conf *info),
+ struct ieee80211_bss_conf *info,
+ unsigned int link_id),
- TP_ARGS(local, sdata, info),
+ TP_ARGS(local, sdata, info, link_id),
TP_STRUCT__entry(
LOCAL_ENTRY
VIF_ENTRY
+ __field(u32, link_id)
__field(u8, dtimper)
__field(u16, bcnint)
- __dynamic_array(u8, ssid, info->ssid_len)
+ __dynamic_array(u8, ssid, sdata->vif.cfg.ssid_len)
__field(bool, hidden_ssid)
),
TP_fast_assign(
LOCAL_ASSIGN;
VIF_ASSIGN;
+ __entry->link_id = link_id;
__entry->dtimper = info->dtim_period;
__entry->bcnint = info->beacon_int;
- memcpy(__get_dynamic_array(ssid), info->ssid, info->ssid_len);
+ memcpy(__get_dynamic_array(ssid),
+ sdata->vif.cfg.ssid,
+ sdata->vif.cfg.ssid_len);
__entry->hidden_ssid = info->hidden_ssid;
),
TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT,
- LOCAL_PR_ARG, VIF_PR_ARG
+ LOCAL_PR_FMT VIF_PR_FMT " link id %u",
+ LOCAL_PR_ARG, VIF_PR_ARG, __entry->link_id
)
);
-DEFINE_EVENT(local_sdata_evt, drv_stop_ap,
+TRACE_EVENT(drv_stop_ap,
TP_PROTO(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata),
- TP_ARGS(local, sdata)
+ struct ieee80211_sub_if_data *sdata,
+ unsigned int link_id),
+
+ TP_ARGS(local, sdata, link_id),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ __field(u32, link_id)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ __entry->link_id = link_id;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT VIF_PR_FMT " link id %u",
+ LOCAL_PR_ARG, VIF_PR_ARG, __entry->link_id
+ )
);
TRACE_EVENT(drv_reconfig_complete,
@@ -1786,7 +1852,7 @@ TRACE_EVENT(drv_join_ibss,
VIF_ENTRY
__field(u8, dtimper)
__field(u16, bcnint)
- __dynamic_array(u8, ssid, info->ssid_len)
+ __dynamic_array(u8, ssid, sdata->vif.cfg.ssid_len)
),
TP_fast_assign(
@@ -1794,7 +1860,9 @@ TRACE_EVENT(drv_join_ibss,
VIF_ASSIGN;
__entry->dtimper = info->dtim_period;
__entry->bcnint = info->beacon_int;
- memcpy(__get_dynamic_array(ssid), info->ssid, info->ssid_len);
+ memcpy(__get_dynamic_array(ssid),
+ sdata->vif.cfg.ssid,
+ sdata->vif.cfg.ssid_len);
),
TP_printk(
@@ -1972,933 +2040,991 @@ DEFINE_EVENT(local_sdata_evt, drv_abort_pmsr,
TP_ARGS(local, sdata)
);
-/*
- * Tracing for API calls that drivers call.
- */
-
-TRACE_EVENT(api_start_tx_ba_session,
- TP_PROTO(struct ieee80211_sta *sta, u16 tid),
+TRACE_EVENT(drv_set_default_unicast_key,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ int key_idx),
- TP_ARGS(sta, tid),
+ TP_ARGS(local, sdata, key_idx),
TP_STRUCT__entry(
- STA_ENTRY
- __field(u16, tid)
+ LOCAL_ENTRY
+ VIF_ENTRY
+ __field(int, key_idx)
),
TP_fast_assign(
- STA_ASSIGN;
- __entry->tid = tid;
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ __entry->key_idx = key_idx;
),
- TP_printk(
- STA_PR_FMT " tid:%d",
- STA_PR_ARG, __entry->tid
- )
+ TP_printk(LOCAL_PR_FMT VIF_PR_FMT " key_idx:%d",
+ LOCAL_PR_ARG, VIF_PR_ARG, __entry->key_idx)
);
-TRACE_EVENT(api_start_tx_ba_cb,
- TP_PROTO(struct ieee80211_sub_if_data *sdata, const u8 *ra, u16 tid),
+TRACE_EVENT(drv_channel_switch_beacon,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_chan_def *chandef),
- TP_ARGS(sdata, ra, tid),
+ TP_ARGS(local, sdata, chandef),
TP_STRUCT__entry(
+ LOCAL_ENTRY
VIF_ENTRY
- __array(u8, ra, ETH_ALEN)
- __field(u16, tid)
+ CHANDEF_ENTRY
),
TP_fast_assign(
+ LOCAL_ASSIGN;
VIF_ASSIGN;
- memcpy(__entry->ra, ra, ETH_ALEN);
- __entry->tid = tid;
+ CHANDEF_ASSIGN(chandef);
),
TP_printk(
- VIF_PR_FMT " ra:%pM tid:%d",
- VIF_PR_ARG, __entry->ra, __entry->tid
+ LOCAL_PR_FMT VIF_PR_FMT " channel switch to " CHANDEF_PR_FMT,
+ LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG
)
);
-TRACE_EVENT(api_stop_tx_ba_session,
- TP_PROTO(struct ieee80211_sta *sta, u16 tid),
+TRACE_EVENT(drv_pre_channel_switch,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_channel_switch *ch_switch),
- TP_ARGS(sta, tid),
+ TP_ARGS(local, sdata, ch_switch),
TP_STRUCT__entry(
- STA_ENTRY
- __field(u16, tid)
+ LOCAL_ENTRY
+ VIF_ENTRY
+ CHANDEF_ENTRY
+ __field(u64, timestamp)
+ __field(u32, device_timestamp)
+ __field(bool, block_tx)
+ __field(u8, count)
),
TP_fast_assign(
- STA_ASSIGN;
- __entry->tid = tid;
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ CHANDEF_ASSIGN(&ch_switch->chandef)
+ __entry->timestamp = ch_switch->timestamp;
+ __entry->device_timestamp = ch_switch->device_timestamp;
+ __entry->block_tx = ch_switch->block_tx;
+ __entry->count = ch_switch->count;
),
TP_printk(
- STA_PR_FMT " tid:%d",
- STA_PR_ARG, __entry->tid
+ LOCAL_PR_FMT VIF_PR_FMT " prepare channel switch to "
+ CHANDEF_PR_FMT " count:%d block_tx:%d timestamp:%llu",
+ LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG, __entry->count,
+ __entry->block_tx, __entry->timestamp
)
);
-TRACE_EVENT(api_stop_tx_ba_cb,
- TP_PROTO(struct ieee80211_sub_if_data *sdata, const u8 *ra, u16 tid),
+DEFINE_EVENT(local_sdata_evt, drv_post_channel_switch,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata),
+ TP_ARGS(local, sdata)
+);
- TP_ARGS(sdata, ra, tid),
+DEFINE_EVENT(local_sdata_evt, drv_abort_channel_switch,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata),
+ TP_ARGS(local, sdata)
+);
+
+TRACE_EVENT(drv_channel_switch_rx_beacon,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_channel_switch *ch_switch),
+
+ TP_ARGS(local, sdata, ch_switch),
TP_STRUCT__entry(
+ LOCAL_ENTRY
VIF_ENTRY
- __array(u8, ra, ETH_ALEN)
- __field(u16, tid)
+ CHANDEF_ENTRY
+ __field(u64, timestamp)
+ __field(u32, device_timestamp)
+ __field(bool, block_tx)
+ __field(u8, count)
),
TP_fast_assign(
+ LOCAL_ASSIGN;
VIF_ASSIGN;
- memcpy(__entry->ra, ra, ETH_ALEN);
- __entry->tid = tid;
+ CHANDEF_ASSIGN(&ch_switch->chandef)
+ __entry->timestamp = ch_switch->timestamp;
+ __entry->device_timestamp = ch_switch->device_timestamp;
+ __entry->block_tx = ch_switch->block_tx;
+ __entry->count = ch_switch->count;
),
TP_printk(
- VIF_PR_FMT " ra:%pM tid:%d",
- VIF_PR_ARG, __entry->ra, __entry->tid
+ LOCAL_PR_FMT VIF_PR_FMT
+ " received a channel switch beacon to "
+ CHANDEF_PR_FMT " count:%d block_tx:%d timestamp:%llu",
+ LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG, __entry->count,
+ __entry->block_tx, __entry->timestamp
)
);
-DEFINE_EVENT(local_only_evt, api_restart_hw,
- TP_PROTO(struct ieee80211_local *local),
- TP_ARGS(local)
-);
-
-TRACE_EVENT(api_beacon_loss,
- TP_PROTO(struct ieee80211_sub_if_data *sdata),
+TRACE_EVENT(drv_get_txpower,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ int dbm, int ret),
- TP_ARGS(sdata),
+ TP_ARGS(local, sdata, dbm, ret),
TP_STRUCT__entry(
+ LOCAL_ENTRY
VIF_ENTRY
+ __field(int, dbm)
+ __field(int, ret)
),
TP_fast_assign(
+ LOCAL_ASSIGN;
VIF_ASSIGN;
+ __entry->dbm = dbm;
+ __entry->ret = ret;
),
TP_printk(
- VIF_PR_FMT,
- VIF_PR_ARG
+ LOCAL_PR_FMT VIF_PR_FMT " dbm:%d ret:%d",
+ LOCAL_PR_ARG, VIF_PR_ARG, __entry->dbm, __entry->ret
)
);
-TRACE_EVENT(api_connection_loss,
- TP_PROTO(struct ieee80211_sub_if_data *sdata),
+TRACE_EVENT(drv_tdls_channel_switch,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta *sta, u8 oper_class,
+ struct cfg80211_chan_def *chandef),
- TP_ARGS(sdata),
+ TP_ARGS(local, sdata, sta, oper_class, chandef),
TP_STRUCT__entry(
+ LOCAL_ENTRY
VIF_ENTRY
+ STA_ENTRY
+ __field(u8, oper_class)
+ CHANDEF_ENTRY
),
TP_fast_assign(
+ LOCAL_ASSIGN;
VIF_ASSIGN;
+ STA_ASSIGN;
+ __entry->oper_class = oper_class;
+ CHANDEF_ASSIGN(chandef)
),
TP_printk(
- VIF_PR_FMT,
- VIF_PR_ARG
+ LOCAL_PR_FMT VIF_PR_FMT " tdls channel switch to"
+ CHANDEF_PR_FMT " oper_class:%d " STA_PR_FMT,
+ LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG, __entry->oper_class,
+ STA_PR_ARG
)
);
-TRACE_EVENT(api_disconnect,
- TP_PROTO(struct ieee80211_sub_if_data *sdata, bool reconnect),
+TRACE_EVENT(drv_tdls_cancel_channel_switch,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta *sta),
- TP_ARGS(sdata, reconnect),
+ TP_ARGS(local, sdata, sta),
TP_STRUCT__entry(
+ LOCAL_ENTRY
VIF_ENTRY
- __field(int, reconnect)
+ STA_ENTRY
),
TP_fast_assign(
+ LOCAL_ASSIGN;
VIF_ASSIGN;
- __entry->reconnect = reconnect;
+ STA_ASSIGN;
),
TP_printk(
- VIF_PR_FMT " reconnect:%d",
- VIF_PR_ARG, __entry->reconnect
+ LOCAL_PR_FMT VIF_PR_FMT
+ " tdls cancel channel switch with " STA_PR_FMT,
+ LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG
)
);
-TRACE_EVENT(api_cqm_rssi_notify,
- TP_PROTO(struct ieee80211_sub_if_data *sdata,
- enum nl80211_cqm_rssi_threshold_event rssi_event,
- s32 rssi_level),
+TRACE_EVENT(drv_tdls_recv_channel_switch,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_tdls_ch_sw_params *params),
- TP_ARGS(sdata, rssi_event, rssi_level),
+ TP_ARGS(local, sdata, params),
TP_STRUCT__entry(
+ LOCAL_ENTRY
VIF_ENTRY
- __field(u32, rssi_event)
- __field(s32, rssi_level)
+ __field(u8, action_code)
+ STA_ENTRY
+ CHANDEF_ENTRY
+ __field(u32, status)
+ __field(bool, peer_initiator)
+ __field(u32, timestamp)
+ __field(u16, switch_time)
+ __field(u16, switch_timeout)
),
TP_fast_assign(
+ LOCAL_ASSIGN;
VIF_ASSIGN;
- __entry->rssi_event = rssi_event;
- __entry->rssi_level = rssi_level;
+ STA_NAMED_ASSIGN(params->sta);
+ CHANDEF_ASSIGN(params->chandef)
+ __entry->peer_initiator = params->sta->tdls_initiator;
+ __entry->action_code = params->action_code;
+ __entry->status = params->status;
+ __entry->timestamp = params->timestamp;
+ __entry->switch_time = params->switch_time;
+ __entry->switch_timeout = params->switch_timeout;
),
TP_printk(
- VIF_PR_FMT " event:%d rssi:%d",
- VIF_PR_ARG, __entry->rssi_event, __entry->rssi_level
+ LOCAL_PR_FMT VIF_PR_FMT " received tdls channel switch packet"
+ " action:%d status:%d time:%d switch time:%d switch"
+ " timeout:%d initiator: %d chan:" CHANDEF_PR_FMT STA_PR_FMT,
+ LOCAL_PR_ARG, VIF_PR_ARG, __entry->action_code, __entry->status,
+ __entry->timestamp, __entry->switch_time,
+ __entry->switch_timeout, __entry->peer_initiator,
+ CHANDEF_PR_ARG, STA_PR_ARG
)
);
-DEFINE_EVENT(local_sdata_evt, api_cqm_beacon_loss_notify,
+TRACE_EVENT(drv_wake_tx_queue,
TP_PROTO(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata),
- TP_ARGS(local, sdata)
-);
-
-TRACE_EVENT(api_scan_completed,
- TP_PROTO(struct ieee80211_local *local, bool aborted),
+ struct ieee80211_sub_if_data *sdata,
+ struct txq_info *txq),
- TP_ARGS(local, aborted),
+ TP_ARGS(local, sdata, txq),
TP_STRUCT__entry(
LOCAL_ENTRY
- __field(bool, aborted)
+ VIF_ENTRY
+ STA_ENTRY
+ __field(u8, ac)
+ __field(u8, tid)
),
TP_fast_assign(
+ struct ieee80211_sta *sta = txq->txq.sta;
+
LOCAL_ASSIGN;
- __entry->aborted = aborted;
+ VIF_ASSIGN;
+ STA_ASSIGN;
+ __entry->ac = txq->txq.ac;
+ __entry->tid = txq->txq.tid;
),
TP_printk(
- LOCAL_PR_FMT " aborted:%d",
- LOCAL_PR_ARG, __entry->aborted
+ LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " ac:%d tid:%d",
+ LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->ac, __entry->tid
)
);
-TRACE_EVENT(api_sched_scan_results,
- TP_PROTO(struct ieee80211_local *local),
+TRACE_EVENT(drv_get_ftm_responder_stats,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_ftm_responder_stats *ftm_stats),
- TP_ARGS(local),
+ TP_ARGS(local, sdata, ftm_stats),
TP_STRUCT__entry(
LOCAL_ENTRY
+ VIF_ENTRY
),
TP_fast_assign(
LOCAL_ASSIGN;
+ VIF_ASSIGN;
),
TP_printk(
- LOCAL_PR_FMT, LOCAL_PR_ARG
+ LOCAL_PR_FMT VIF_PR_FMT,
+ LOCAL_PR_ARG, VIF_PR_ARG
)
);
-TRACE_EVENT(api_sched_scan_stopped,
- TP_PROTO(struct ieee80211_local *local),
+DEFINE_EVENT(local_sdata_addr_evt, drv_update_vif_offload,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata),
+ TP_ARGS(local, sdata)
+);
- TP_ARGS(local),
+DECLARE_EVENT_CLASS(sta_flag_evt,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta *sta, bool enabled),
+
+ TP_ARGS(local, sdata, sta, enabled),
TP_STRUCT__entry(
LOCAL_ENTRY
+ VIF_ENTRY
+ STA_ENTRY
+ __field(bool, enabled)
),
TP_fast_assign(
LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ STA_ASSIGN;
+ __entry->enabled = enabled;
),
TP_printk(
- LOCAL_PR_FMT, LOCAL_PR_ARG
+ LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " enabled:%d",
+ LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->enabled
)
);
-TRACE_EVENT(api_sta_block_awake,
+DEFINE_EVENT(sta_flag_evt, drv_sta_set_4addr,
TP_PROTO(struct ieee80211_local *local,
- struct ieee80211_sta *sta, bool block),
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta *sta, bool enabled),
- TP_ARGS(local, sta, block),
+ TP_ARGS(local, sdata, sta, enabled)
+);
+
+DEFINE_EVENT(sta_flag_evt, drv_sta_set_decap_offload,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta *sta, bool enabled),
+
+ TP_ARGS(local, sdata, sta, enabled)
+);
+
+TRACE_EVENT(drv_add_twt_setup,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sta *sta,
+ struct ieee80211_twt_setup *twt,
+ struct ieee80211_twt_params *twt_agrt),
+
+ TP_ARGS(local, sta, twt, twt_agrt),
TP_STRUCT__entry(
LOCAL_ENTRY
STA_ENTRY
- __field(bool, block)
+ __field(u8, dialog_token)
+ __field(u8, control)
+ __field(__le16, req_type)
+ __field(__le64, twt)
+ __field(u8, duration)
+ __field(__le16, mantissa)
+ __field(u8, channel)
),
TP_fast_assign(
LOCAL_ASSIGN;
STA_ASSIGN;
- __entry->block = block;
+ __entry->dialog_token = twt->dialog_token;
+ __entry->control = twt->control;
+ __entry->req_type = twt_agrt->req_type;
+ __entry->twt = twt_agrt->twt;
+ __entry->duration = twt_agrt->min_twt_dur;
+ __entry->mantissa = twt_agrt->mantissa;
+ __entry->channel = twt_agrt->channel;
),
TP_printk(
- LOCAL_PR_FMT STA_PR_FMT " block:%d",
- LOCAL_PR_ARG, STA_PR_ARG, __entry->block
+ LOCAL_PR_FMT STA_PR_FMT
+ " token:%d control:0x%02x req_type:0x%04x"
+ " twt:%llu duration:%d mantissa:%d channel:%d",
+ LOCAL_PR_ARG, STA_PR_ARG, __entry->dialog_token,
+ __entry->control, le16_to_cpu(__entry->req_type),
+ le64_to_cpu(__entry->twt), __entry->duration,
+ le16_to_cpu(__entry->mantissa), __entry->channel
)
);
-TRACE_EVENT(api_chswitch_done,
- TP_PROTO(struct ieee80211_sub_if_data *sdata, bool success),
+TRACE_EVENT(drv_twt_teardown_request,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sta *sta, u8 flowid),
- TP_ARGS(sdata, success),
+ TP_ARGS(local, sta, flowid),
TP_STRUCT__entry(
- VIF_ENTRY
- __field(bool, success)
+ LOCAL_ENTRY
+ STA_ENTRY
+ __field(u8, flowid)
),
TP_fast_assign(
- VIF_ASSIGN;
- __entry->success = success;
+ LOCAL_ASSIGN;
+ STA_ASSIGN;
+ __entry->flowid = flowid;
),
TP_printk(
- VIF_PR_FMT " success=%d",
- VIF_PR_ARG, __entry->success
+ LOCAL_PR_FMT STA_PR_FMT " flowid:%d",
+ LOCAL_PR_ARG, STA_PR_ARG, __entry->flowid
)
);
-DEFINE_EVENT(local_only_evt, api_ready_on_channel,
- TP_PROTO(struct ieee80211_local *local),
- TP_ARGS(local)
-);
-
-DEFINE_EVENT(local_only_evt, api_remain_on_channel_expired,
- TP_PROTO(struct ieee80211_local *local),
- TP_ARGS(local)
+DEFINE_EVENT(sta_event, drv_net_fill_forward_path,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta *sta),
+ TP_ARGS(local, sdata, sta)
);
-TRACE_EVENT(api_gtk_rekey_notify,
- TP_PROTO(struct ieee80211_sub_if_data *sdata,
- const u8 *bssid, const u8 *replay_ctr),
+TRACE_EVENT(drv_change_vif_links,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ u16 old_links, u16 new_links),
- TP_ARGS(sdata, bssid, replay_ctr),
+ TP_ARGS(local, sdata, old_links, new_links),
TP_STRUCT__entry(
+ LOCAL_ENTRY
VIF_ENTRY
- __array(u8, bssid, ETH_ALEN)
- __array(u8, replay_ctr, NL80211_REPLAY_CTR_LEN)
+ __field(u16, old_links)
+ __field(u16, new_links)
),
TP_fast_assign(
+ LOCAL_ASSIGN;
VIF_ASSIGN;
- memcpy(__entry->bssid, bssid, ETH_ALEN);
- memcpy(__entry->replay_ctr, replay_ctr, NL80211_REPLAY_CTR_LEN);
+ __entry->old_links = old_links;
+ __entry->new_links = new_links;
),
- TP_printk(VIF_PR_FMT, VIF_PR_ARG)
+ TP_printk(
+ LOCAL_PR_FMT VIF_PR_FMT " old_links:0x%04x, new_links:0x%04x\n",
+ LOCAL_PR_ARG, VIF_PR_ARG, __entry->old_links, __entry->new_links
+ )
);
-TRACE_EVENT(api_enable_rssi_reports,
- TP_PROTO(struct ieee80211_sub_if_data *sdata,
- int rssi_min_thold, int rssi_max_thold),
+TRACE_EVENT(drv_change_sta_links,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta *sta,
+ u16 old_links, u16 new_links),
- TP_ARGS(sdata, rssi_min_thold, rssi_max_thold),
+ TP_ARGS(local, sdata, sta, old_links, new_links),
TP_STRUCT__entry(
+ LOCAL_ENTRY
VIF_ENTRY
- __field(int, rssi_min_thold)
- __field(int, rssi_max_thold)
+ STA_ENTRY
+ __field(u16, old_links)
+ __field(u16, new_links)
),
TP_fast_assign(
+ LOCAL_ASSIGN;
VIF_ASSIGN;
- __entry->rssi_min_thold = rssi_min_thold;
- __entry->rssi_max_thold = rssi_max_thold;
+ STA_ASSIGN;
+ __entry->old_links = old_links;
+ __entry->new_links = new_links;
),
TP_printk(
- VIF_PR_FMT " rssi_min_thold =%d, rssi_max_thold = %d",
- VIF_PR_ARG, __entry->rssi_min_thold, __entry->rssi_max_thold
+ LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " old_links:0x%04x, new_links:0x%04x\n",
+ LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG,
+ __entry->old_links, __entry->new_links
)
);
-TRACE_EVENT(api_eosp,
- TP_PROTO(struct ieee80211_local *local,
- struct ieee80211_sta *sta),
+/*
+ * Tracing for API calls that drivers call.
+ */
- TP_ARGS(local, sta),
+TRACE_EVENT(api_start_tx_ba_session,
+ TP_PROTO(struct ieee80211_sta *sta, u16 tid),
+
+ TP_ARGS(sta, tid),
TP_STRUCT__entry(
- LOCAL_ENTRY
STA_ENTRY
+ __field(u16, tid)
),
TP_fast_assign(
- LOCAL_ASSIGN;
STA_ASSIGN;
+ __entry->tid = tid;
),
TP_printk(
- LOCAL_PR_FMT STA_PR_FMT,
- LOCAL_PR_ARG, STA_PR_ARG
+ STA_PR_FMT " tid:%d",
+ STA_PR_ARG, __entry->tid
)
);
-TRACE_EVENT(api_send_eosp_nullfunc,
- TP_PROTO(struct ieee80211_local *local,
- struct ieee80211_sta *sta,
- u8 tid),
+TRACE_EVENT(api_start_tx_ba_cb,
+ TP_PROTO(struct ieee80211_sub_if_data *sdata, const u8 *ra, u16 tid),
- TP_ARGS(local, sta, tid),
+ TP_ARGS(sdata, ra, tid),
TP_STRUCT__entry(
- LOCAL_ENTRY
- STA_ENTRY
- __field(u8, tid)
+ VIF_ENTRY
+ __array(u8, ra, ETH_ALEN)
+ __field(u16, tid)
),
TP_fast_assign(
- LOCAL_ASSIGN;
- STA_ASSIGN;
+ VIF_ASSIGN;
+ memcpy(__entry->ra, ra, ETH_ALEN);
__entry->tid = tid;
),
TP_printk(
- LOCAL_PR_FMT STA_PR_FMT " tid:%d",
- LOCAL_PR_ARG, STA_PR_ARG, __entry->tid
+ VIF_PR_FMT " ra:%pM tid:%d",
+ VIF_PR_ARG, __entry->ra, __entry->tid
)
);
-TRACE_EVENT(api_sta_set_buffered,
- TP_PROTO(struct ieee80211_local *local,
- struct ieee80211_sta *sta,
- u8 tid, bool buffered),
+TRACE_EVENT(api_stop_tx_ba_session,
+ TP_PROTO(struct ieee80211_sta *sta, u16 tid),
- TP_ARGS(local, sta, tid, buffered),
+ TP_ARGS(sta, tid),
TP_STRUCT__entry(
- LOCAL_ENTRY
STA_ENTRY
- __field(u8, tid)
- __field(bool, buffered)
+ __field(u16, tid)
),
TP_fast_assign(
- LOCAL_ASSIGN;
STA_ASSIGN;
__entry->tid = tid;
- __entry->buffered = buffered;
),
TP_printk(
- LOCAL_PR_FMT STA_PR_FMT " tid:%d buffered:%d",
- LOCAL_PR_ARG, STA_PR_ARG, __entry->tid, __entry->buffered
+ STA_PR_FMT " tid:%d",
+ STA_PR_ARG, __entry->tid
)
);
-/*
- * Tracing for internal functions
- * (which may also be called in response to driver calls)
- */
-
-TRACE_EVENT(wake_queue,
- TP_PROTO(struct ieee80211_local *local, u16 queue,
- enum queue_stop_reason reason),
+TRACE_EVENT(api_stop_tx_ba_cb,
+ TP_PROTO(struct ieee80211_sub_if_data *sdata, const u8 *ra, u16 tid),
- TP_ARGS(local, queue, reason),
+ TP_ARGS(sdata, ra, tid),
TP_STRUCT__entry(
- LOCAL_ENTRY
- __field(u16, queue)
- __field(u32, reason)
+ VIF_ENTRY
+ __array(u8, ra, ETH_ALEN)
+ __field(u16, tid)
),
TP_fast_assign(
- LOCAL_ASSIGN;
- __entry->queue = queue;
- __entry->reason = reason;
+ VIF_ASSIGN;
+ memcpy(__entry->ra, ra, ETH_ALEN);
+ __entry->tid = tid;
),
TP_printk(
- LOCAL_PR_FMT " queue:%d, reason:%d",
- LOCAL_PR_ARG, __entry->queue, __entry->reason
+ VIF_PR_FMT " ra:%pM tid:%d",
+ VIF_PR_ARG, __entry->ra, __entry->tid
)
);
-TRACE_EVENT(stop_queue,
- TP_PROTO(struct ieee80211_local *local, u16 queue,
- enum queue_stop_reason reason),
+DEFINE_EVENT(local_only_evt, api_restart_hw,
+ TP_PROTO(struct ieee80211_local *local),
+ TP_ARGS(local)
+);
- TP_ARGS(local, queue, reason),
+TRACE_EVENT(api_beacon_loss,
+ TP_PROTO(struct ieee80211_sub_if_data *sdata),
+
+ TP_ARGS(sdata),
TP_STRUCT__entry(
- LOCAL_ENTRY
- __field(u16, queue)
- __field(u32, reason)
+ VIF_ENTRY
),
TP_fast_assign(
- LOCAL_ASSIGN;
- __entry->queue = queue;
- __entry->reason = reason;
+ VIF_ASSIGN;
),
TP_printk(
- LOCAL_PR_FMT " queue:%d, reason:%d",
- LOCAL_PR_ARG, __entry->queue, __entry->reason
+ VIF_PR_FMT,
+ VIF_PR_ARG
)
);
-TRACE_EVENT(drv_set_default_unicast_key,
- TP_PROTO(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata,
- int key_idx),
+TRACE_EVENT(api_connection_loss,
+ TP_PROTO(struct ieee80211_sub_if_data *sdata),
- TP_ARGS(local, sdata, key_idx),
+ TP_ARGS(sdata),
TP_STRUCT__entry(
- LOCAL_ENTRY
VIF_ENTRY
- __field(int, key_idx)
),
TP_fast_assign(
- LOCAL_ASSIGN;
VIF_ASSIGN;
- __entry->key_idx = key_idx;
),
- TP_printk(LOCAL_PR_FMT VIF_PR_FMT " key_idx:%d",
- LOCAL_PR_ARG, VIF_PR_ARG, __entry->key_idx)
+ TP_printk(
+ VIF_PR_FMT,
+ VIF_PR_ARG
+ )
);
-TRACE_EVENT(api_radar_detected,
- TP_PROTO(struct ieee80211_local *local),
+TRACE_EVENT(api_disconnect,
+ TP_PROTO(struct ieee80211_sub_if_data *sdata, bool reconnect),
- TP_ARGS(local),
+ TP_ARGS(sdata, reconnect),
TP_STRUCT__entry(
- LOCAL_ENTRY
+ VIF_ENTRY
+ __field(int, reconnect)
),
TP_fast_assign(
- LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ __entry->reconnect = reconnect;
),
TP_printk(
- LOCAL_PR_FMT " radar detected",
- LOCAL_PR_ARG
+ VIF_PR_FMT " reconnect:%d",
+ VIF_PR_ARG, __entry->reconnect
)
);
-TRACE_EVENT(drv_channel_switch_beacon,
- TP_PROTO(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata,
- struct cfg80211_chan_def *chandef),
+TRACE_EVENT(api_cqm_rssi_notify,
+ TP_PROTO(struct ieee80211_sub_if_data *sdata,
+ enum nl80211_cqm_rssi_threshold_event rssi_event,
+ s32 rssi_level),
- TP_ARGS(local, sdata, chandef),
+ TP_ARGS(sdata, rssi_event, rssi_level),
TP_STRUCT__entry(
- LOCAL_ENTRY
VIF_ENTRY
- CHANDEF_ENTRY
+ __field(u32, rssi_event)
+ __field(s32, rssi_level)
),
TP_fast_assign(
- LOCAL_ASSIGN;
VIF_ASSIGN;
- CHANDEF_ASSIGN(chandef);
+ __entry->rssi_event = rssi_event;
+ __entry->rssi_level = rssi_level;
),
TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT " channel switch to " CHANDEF_PR_FMT,
- LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG
+ VIF_PR_FMT " event:%d rssi:%d",
+ VIF_PR_ARG, __entry->rssi_event, __entry->rssi_level
)
);
-TRACE_EVENT(drv_pre_channel_switch,
+DEFINE_EVENT(local_sdata_evt, api_cqm_beacon_loss_notify,
TP_PROTO(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata,
- struct ieee80211_channel_switch *ch_switch),
+ struct ieee80211_sub_if_data *sdata),
+ TP_ARGS(local, sdata)
+);
- TP_ARGS(local, sdata, ch_switch),
+TRACE_EVENT(api_scan_completed,
+ TP_PROTO(struct ieee80211_local *local, bool aborted),
+
+ TP_ARGS(local, aborted),
TP_STRUCT__entry(
LOCAL_ENTRY
- VIF_ENTRY
- CHANDEF_ENTRY
- __field(u64, timestamp)
- __field(u32, device_timestamp)
- __field(bool, block_tx)
- __field(u8, count)
+ __field(bool, aborted)
),
TP_fast_assign(
LOCAL_ASSIGN;
- VIF_ASSIGN;
- CHANDEF_ASSIGN(&ch_switch->chandef)
- __entry->timestamp = ch_switch->timestamp;
- __entry->device_timestamp = ch_switch->device_timestamp;
- __entry->block_tx = ch_switch->block_tx;
- __entry->count = ch_switch->count;
+ __entry->aborted = aborted;
),
TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT " prepare channel switch to "
- CHANDEF_PR_FMT " count:%d block_tx:%d timestamp:%llu",
- LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG, __entry->count,
- __entry->block_tx, __entry->timestamp
+ LOCAL_PR_FMT " aborted:%d",
+ LOCAL_PR_ARG, __entry->aborted
)
);
-DEFINE_EVENT(local_sdata_evt, drv_post_channel_switch,
- TP_PROTO(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata),
- TP_ARGS(local, sdata)
-);
-
-DEFINE_EVENT(local_sdata_evt, drv_abort_channel_switch,
- TP_PROTO(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata),
- TP_ARGS(local, sdata)
-);
-
-TRACE_EVENT(drv_channel_switch_rx_beacon,
- TP_PROTO(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata,
- struct ieee80211_channel_switch *ch_switch),
+TRACE_EVENT(api_sched_scan_results,
+ TP_PROTO(struct ieee80211_local *local),
- TP_ARGS(local, sdata, ch_switch),
+ TP_ARGS(local),
TP_STRUCT__entry(
LOCAL_ENTRY
- VIF_ENTRY
- CHANDEF_ENTRY
- __field(u64, timestamp)
- __field(u32, device_timestamp)
- __field(bool, block_tx)
- __field(u8, count)
),
TP_fast_assign(
LOCAL_ASSIGN;
- VIF_ASSIGN;
- CHANDEF_ASSIGN(&ch_switch->chandef)
- __entry->timestamp = ch_switch->timestamp;
- __entry->device_timestamp = ch_switch->device_timestamp;
- __entry->block_tx = ch_switch->block_tx;
- __entry->count = ch_switch->count;
),
TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT
- " received a channel switch beacon to "
- CHANDEF_PR_FMT " count:%d block_tx:%d timestamp:%llu",
- LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG, __entry->count,
- __entry->block_tx, __entry->timestamp
+ LOCAL_PR_FMT, LOCAL_PR_ARG
)
);
-TRACE_EVENT(drv_get_txpower,
- TP_PROTO(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata,
- int dbm, int ret),
+TRACE_EVENT(api_sched_scan_stopped,
+ TP_PROTO(struct ieee80211_local *local),
- TP_ARGS(local, sdata, dbm, ret),
+ TP_ARGS(local),
TP_STRUCT__entry(
LOCAL_ENTRY
- VIF_ENTRY
- __field(int, dbm)
- __field(int, ret)
),
TP_fast_assign(
LOCAL_ASSIGN;
- VIF_ASSIGN;
- __entry->dbm = dbm;
- __entry->ret = ret;
),
TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT " dbm:%d ret:%d",
- LOCAL_PR_ARG, VIF_PR_ARG, __entry->dbm, __entry->ret
+ LOCAL_PR_FMT, LOCAL_PR_ARG
)
);
-TRACE_EVENT(drv_tdls_channel_switch,
+TRACE_EVENT(api_sta_block_awake,
TP_PROTO(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata,
- struct ieee80211_sta *sta, u8 oper_class,
- struct cfg80211_chan_def *chandef),
+ struct ieee80211_sta *sta, bool block),
- TP_ARGS(local, sdata, sta, oper_class, chandef),
+ TP_ARGS(local, sta, block),
TP_STRUCT__entry(
LOCAL_ENTRY
- VIF_ENTRY
STA_ENTRY
- __field(u8, oper_class)
- CHANDEF_ENTRY
+ __field(bool, block)
),
TP_fast_assign(
LOCAL_ASSIGN;
- VIF_ASSIGN;
STA_ASSIGN;
- __entry->oper_class = oper_class;
- CHANDEF_ASSIGN(chandef)
+ __entry->block = block;
),
TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT " tdls channel switch to"
- CHANDEF_PR_FMT " oper_class:%d " STA_PR_FMT,
- LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG, __entry->oper_class,
- STA_PR_ARG
+ LOCAL_PR_FMT STA_PR_FMT " block:%d",
+ LOCAL_PR_ARG, STA_PR_ARG, __entry->block
)
);
-TRACE_EVENT(drv_tdls_cancel_channel_switch,
- TP_PROTO(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata,
- struct ieee80211_sta *sta),
+TRACE_EVENT(api_chswitch_done,
+ TP_PROTO(struct ieee80211_sub_if_data *sdata, bool success),
- TP_ARGS(local, sdata, sta),
+ TP_ARGS(sdata, success),
TP_STRUCT__entry(
- LOCAL_ENTRY
VIF_ENTRY
- STA_ENTRY
+ __field(bool, success)
),
TP_fast_assign(
- LOCAL_ASSIGN;
VIF_ASSIGN;
- STA_ASSIGN;
+ __entry->success = success;
),
TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT
- " tdls cancel channel switch with " STA_PR_FMT,
- LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG
+ VIF_PR_FMT " success=%d",
+ VIF_PR_ARG, __entry->success
)
);
-TRACE_EVENT(drv_tdls_recv_channel_switch,
- TP_PROTO(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata,
- struct ieee80211_tdls_ch_sw_params *params),
+DEFINE_EVENT(local_only_evt, api_ready_on_channel,
+ TP_PROTO(struct ieee80211_local *local),
+ TP_ARGS(local)
+);
- TP_ARGS(local, sdata, params),
+DEFINE_EVENT(local_only_evt, api_remain_on_channel_expired,
+ TP_PROTO(struct ieee80211_local *local),
+ TP_ARGS(local)
+);
+
+TRACE_EVENT(api_gtk_rekey_notify,
+ TP_PROTO(struct ieee80211_sub_if_data *sdata,
+ const u8 *bssid, const u8 *replay_ctr),
+
+ TP_ARGS(sdata, bssid, replay_ctr),
TP_STRUCT__entry(
- LOCAL_ENTRY
VIF_ENTRY
- __field(u8, action_code)
- STA_ENTRY
- CHANDEF_ENTRY
- __field(u32, status)
- __field(bool, peer_initiator)
- __field(u32, timestamp)
- __field(u16, switch_time)
- __field(u16, switch_timeout)
+ __array(u8, bssid, ETH_ALEN)
+ __array(u8, replay_ctr, NL80211_REPLAY_CTR_LEN)
),
TP_fast_assign(
- LOCAL_ASSIGN;
VIF_ASSIGN;
- STA_NAMED_ASSIGN(params->sta);
- CHANDEF_ASSIGN(params->chandef)
- __entry->peer_initiator = params->sta->tdls_initiator;
- __entry->action_code = params->action_code;
- __entry->status = params->status;
- __entry->timestamp = params->timestamp;
- __entry->switch_time = params->switch_time;
- __entry->switch_timeout = params->switch_timeout;
+ memcpy(__entry->bssid, bssid, ETH_ALEN);
+ memcpy(__entry->replay_ctr, replay_ctr, NL80211_REPLAY_CTR_LEN);
+ ),
+
+ TP_printk(VIF_PR_FMT, VIF_PR_ARG)
+);
+
+TRACE_EVENT(api_enable_rssi_reports,
+ TP_PROTO(struct ieee80211_sub_if_data *sdata,
+ int rssi_min_thold, int rssi_max_thold),
+
+ TP_ARGS(sdata, rssi_min_thold, rssi_max_thold),
+
+ TP_STRUCT__entry(
+ VIF_ENTRY
+ __field(int, rssi_min_thold)
+ __field(int, rssi_max_thold)
+ ),
+
+ TP_fast_assign(
+ VIF_ASSIGN;
+ __entry->rssi_min_thold = rssi_min_thold;
+ __entry->rssi_max_thold = rssi_max_thold;
),
TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT " received tdls channel switch packet"
- " action:%d status:%d time:%d switch time:%d switch"
- " timeout:%d initiator: %d chan:" CHANDEF_PR_FMT STA_PR_FMT,
- LOCAL_PR_ARG, VIF_PR_ARG, __entry->action_code, __entry->status,
- __entry->timestamp, __entry->switch_time,
- __entry->switch_timeout, __entry->peer_initiator,
- CHANDEF_PR_ARG, STA_PR_ARG
+ VIF_PR_FMT " rssi_min_thold =%d, rssi_max_thold = %d",
+ VIF_PR_ARG, __entry->rssi_min_thold, __entry->rssi_max_thold
)
);
-TRACE_EVENT(drv_wake_tx_queue,
+TRACE_EVENT(api_eosp,
TP_PROTO(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata,
- struct txq_info *txq),
+ struct ieee80211_sta *sta),
- TP_ARGS(local, sdata, txq),
+ TP_ARGS(local, sta),
TP_STRUCT__entry(
LOCAL_ENTRY
- VIF_ENTRY
STA_ENTRY
- __field(u8, ac)
- __field(u8, tid)
),
TP_fast_assign(
- struct ieee80211_sta *sta = txq->txq.sta;
-
LOCAL_ASSIGN;
- VIF_ASSIGN;
STA_ASSIGN;
- __entry->ac = txq->txq.ac;
- __entry->tid = txq->txq.tid;
),
TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " ac:%d tid:%d",
- LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->ac, __entry->tid
+ LOCAL_PR_FMT STA_PR_FMT,
+ LOCAL_PR_ARG, STA_PR_ARG
)
);
-TRACE_EVENT(drv_get_ftm_responder_stats,
+TRACE_EVENT(api_send_eosp_nullfunc,
TP_PROTO(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata,
- struct cfg80211_ftm_responder_stats *ftm_stats),
+ struct ieee80211_sta *sta,
+ u8 tid),
- TP_ARGS(local, sdata, ftm_stats),
+ TP_ARGS(local, sta, tid),
TP_STRUCT__entry(
LOCAL_ENTRY
- VIF_ENTRY
+ STA_ENTRY
+ __field(u8, tid)
),
TP_fast_assign(
LOCAL_ASSIGN;
- VIF_ASSIGN;
+ STA_ASSIGN;
+ __entry->tid = tid;
),
TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT,
- LOCAL_PR_ARG, VIF_PR_ARG
+ LOCAL_PR_FMT STA_PR_FMT " tid:%d",
+ LOCAL_PR_ARG, STA_PR_ARG, __entry->tid
)
);
-DEFINE_EVENT(local_sdata_addr_evt, drv_update_vif_offload,
- TP_PROTO(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata),
- TP_ARGS(local, sdata)
-);
-
-DECLARE_EVENT_CLASS(sta_flag_evt,
+TRACE_EVENT(api_sta_set_buffered,
TP_PROTO(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata,
- struct ieee80211_sta *sta, bool enabled),
+ struct ieee80211_sta *sta,
+ u8 tid, bool buffered),
- TP_ARGS(local, sdata, sta, enabled),
+ TP_ARGS(local, sta, tid, buffered),
TP_STRUCT__entry(
LOCAL_ENTRY
- VIF_ENTRY
STA_ENTRY
- __field(bool, enabled)
+ __field(u8, tid)
+ __field(bool, buffered)
),
TP_fast_assign(
LOCAL_ASSIGN;
- VIF_ASSIGN;
STA_ASSIGN;
- __entry->enabled = enabled;
+ __entry->tid = tid;
+ __entry->buffered = buffered;
),
TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " enabled:%d",
- LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->enabled
+ LOCAL_PR_FMT STA_PR_FMT " tid:%d buffered:%d",
+ LOCAL_PR_ARG, STA_PR_ARG, __entry->tid, __entry->buffered
)
);
-DEFINE_EVENT(sta_flag_evt, drv_sta_set_4addr,
- TP_PROTO(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata,
- struct ieee80211_sta *sta, bool enabled),
+TRACE_EVENT(api_radar_detected,
+ TP_PROTO(struct ieee80211_local *local),
- TP_ARGS(local, sdata, sta, enabled)
-);
+ TP_ARGS(local),
-DEFINE_EVENT(sta_flag_evt, drv_sta_set_decap_offload,
- TP_PROTO(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata,
- struct ieee80211_sta *sta, bool enabled),
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ ),
- TP_ARGS(local, sdata, sta, enabled)
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " radar detected",
+ LOCAL_PR_ARG
+ )
);
-TRACE_EVENT(drv_add_twt_setup,
- TP_PROTO(struct ieee80211_local *local,
- struct ieee80211_sta *sta,
- struct ieee80211_twt_setup *twt,
- struct ieee80211_twt_params *twt_agrt),
+/*
+ * Tracing for internal functions
+ * (which may also be called in response to driver calls)
+ */
- TP_ARGS(local, sta, twt, twt_agrt),
+TRACE_EVENT(wake_queue,
+ TP_PROTO(struct ieee80211_local *local, u16 queue,
+ enum queue_stop_reason reason),
+
+ TP_ARGS(local, queue, reason),
TP_STRUCT__entry(
LOCAL_ENTRY
- STA_ENTRY
- __field(u8, dialog_token)
- __field(u8, control)
- __field(__le16, req_type)
- __field(__le64, twt)
- __field(u8, duration)
- __field(__le16, mantissa)
- __field(u8, channel)
+ __field(u16, queue)
+ __field(u32, reason)
),
TP_fast_assign(
LOCAL_ASSIGN;
- STA_ASSIGN;
- __entry->dialog_token = twt->dialog_token;
- __entry->control = twt->control;
- __entry->req_type = twt_agrt->req_type;
- __entry->twt = twt_agrt->twt;
- __entry->duration = twt_agrt->min_twt_dur;
- __entry->mantissa = twt_agrt->mantissa;
- __entry->channel = twt_agrt->channel;
+ __entry->queue = queue;
+ __entry->reason = reason;
),
TP_printk(
- LOCAL_PR_FMT STA_PR_FMT
- " token:%d control:0x%02x req_type:0x%04x"
- " twt:%llu duration:%d mantissa:%d channel:%d",
- LOCAL_PR_ARG, STA_PR_ARG, __entry->dialog_token,
- __entry->control, le16_to_cpu(__entry->req_type),
- le64_to_cpu(__entry->twt), __entry->duration,
- le16_to_cpu(__entry->mantissa), __entry->channel
+ LOCAL_PR_FMT " queue:%d, reason:%d",
+ LOCAL_PR_ARG, __entry->queue, __entry->reason
)
);
-TRACE_EVENT(drv_twt_teardown_request,
- TP_PROTO(struct ieee80211_local *local,
- struct ieee80211_sta *sta, u8 flowid),
+TRACE_EVENT(stop_queue,
+ TP_PROTO(struct ieee80211_local *local, u16 queue,
+ enum queue_stop_reason reason),
- TP_ARGS(local, sta, flowid),
+ TP_ARGS(local, queue, reason),
TP_STRUCT__entry(
LOCAL_ENTRY
- STA_ENTRY
- __field(u8, flowid)
+ __field(u16, queue)
+ __field(u32, reason)
),
TP_fast_assign(
LOCAL_ASSIGN;
- STA_ASSIGN;
- __entry->flowid = flowid;
+ __entry->queue = queue;
+ __entry->reason = reason;
),
TP_printk(
- LOCAL_PR_FMT STA_PR_FMT " flowid:%d",
- LOCAL_PR_ARG, STA_PR_ARG, __entry->flowid
+ LOCAL_PR_FMT " queue:%d, reason:%d",
+ LOCAL_PR_ARG, __entry->queue, __entry->reason
)
);
-DEFINE_EVENT(sta_event, drv_net_fill_forward_path,
- TP_PROTO(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata,
- struct ieee80211_sta *sta),
- TP_ARGS(local, sdata, sta)
-);
-
#endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */
#undef TRACE_INCLUDE_PATH
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index c425f4fb7c2e..b58c85abcb1b 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -5,7 +5,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*
* Transmit and frame generation functions.
*/
@@ -18,7 +18,6 @@
#include <linux/bitmap.h>
#include <linux/rcupdate.h>
#include <linux/export.h>
-#include <linux/timekeeping.h>
#include <net/net_namespace.h>
#include <net/ieee80211_radiotap.h>
#include <net/cfg80211.h>
@@ -57,7 +56,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
return 0;
rcu_read_lock();
- chanctx_conf = rcu_dereference(tx->sdata->vif.chanctx_conf);
+ chanctx_conf = rcu_dereference(tx->sdata->vif.bss_conf.chanctx_conf);
if (chanctx_conf) {
shift = ieee80211_chandef_get_shift(&chanctx_conf->def);
rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def);
@@ -593,15 +592,15 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
(key = rcu_dereference(tx->sta->ptk[tx->sta->ptk_idx])))
tx->key = key;
else if (ieee80211_is_group_privacy_action(tx->skb) &&
- (key = rcu_dereference(tx->sdata->default_multicast_key)))
+ (key = rcu_dereference(tx->sdata->deflink.default_multicast_key)))
tx->key = key;
else if (ieee80211_is_mgmt(hdr->frame_control) &&
is_multicast_ether_addr(hdr->addr1) &&
ieee80211_is_robust_mgmt_frame(tx->skb) &&
- (key = rcu_dereference(tx->sdata->default_mgmt_key)))
+ (key = rcu_dereference(tx->sdata->deflink.default_mgmt_key)))
tx->key = key;
else if (is_multicast_ether_addr(hdr->addr1) &&
- (key = rcu_dereference(tx->sdata->default_multicast_key)))
+ (key = rcu_dereference(tx->sdata->deflink.default_multicast_key)))
tx->key = key;
else if (!is_multicast_ether_addr(hdr->addr1) &&
(key = rcu_dereference(tx->sdata->default_unicast_key)))
@@ -882,7 +881,7 @@ static int ieee80211_fragment(struct ieee80211_tx_data *tx,
rem -= fraglen;
tmp = dev_alloc_skb(local->tx_headroom +
frag_threshold +
- tx->sdata->encrypt_headroom +
+ IEEE80211_ENCRYPT_HEADROOM +
IEEE80211_ENCRYPT_TAILROOM);
if (!tmp)
return -ENOMEM;
@@ -890,7 +889,7 @@ static int ieee80211_fragment(struct ieee80211_tx_data *tx,
__skb_queue_tail(&tx->skbs, tmp);
skb_reserve(tmp,
- local->tx_headroom + tx->sdata->encrypt_headroom);
+ local->tx_headroom + IEEE80211_ENCRYPT_HEADROOM);
/* copy control information */
memcpy(tmp->cb, skb->cb, sizeof(tmp->cb));
@@ -1040,8 +1039,6 @@ ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
return ieee80211_crypto_gcmp_encrypt(tx);
- default:
- return ieee80211_crypto_hw_encrypt(tx);
}
return TX_DROP;
@@ -1481,7 +1478,7 @@ void ieee80211_txq_init(struct ieee80211_sub_if_data *sdata,
codel_vars_init(&txqi->def_cvars);
codel_stats_init(&txqi->cstats);
__skb_queue_head_init(&txqi->frags);
- RB_CLEAR_NODE(&txqi->schedule_order);
+ INIT_LIST_HEAD(&txqi->schedule_order);
txqi->txq.vif = &sdata->vif;
@@ -1525,7 +1522,9 @@ void ieee80211_txq_purge(struct ieee80211_local *local,
ieee80211_purge_tx_queue(&local->hw, &txqi->frags);
spin_unlock_bh(&fq->lock);
- ieee80211_unschedule_txq(&local->hw, &txqi->txq, true);
+ spin_lock_bh(&local->active_txq_lock[txqi->txq.ac]);
+ list_del_init(&txqi->schedule_order);
+ spin_unlock_bh(&local->active_txq_lock[txqi->txq.ac]);
}
void ieee80211_txq_set_params(struct ieee80211_local *local)
@@ -2013,7 +2012,7 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
headroom = local->tx_headroom;
if (encrypt != ENCRYPT_NO)
- headroom += sdata->encrypt_headroom;
+ headroom += IEEE80211_ENCRYPT_HEADROOM;
headroom -= skb_headroom(skb);
headroom = max_t(int, 0, headroom);
@@ -2347,12 +2346,12 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
}
}
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+ chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
if (!chanctx_conf) {
tmp_sdata = rcu_dereference(local->monitor_sdata);
if (tmp_sdata)
chanctx_conf =
- rcu_dereference(tmp_sdata->vif.chanctx_conf);
+ rcu_dereference(tmp_sdata->vif.bss_conf.chanctx_conf);
}
if (chanctx_conf)
@@ -2479,7 +2478,7 @@ int ieee80211_lookup_ra_sta(struct ieee80211_sub_if_data *sdata,
}
- sta = sta_info_get(sdata, sdata->u.mgd.bssid);
+ sta = sta_info_get(sdata, sdata->deflink.u.mgd.bssid);
if (!sta)
return -ENOLINK;
break;
@@ -2568,8 +2567,7 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
bool tdls_peer;
bool multicast;
u16 info_id = 0;
- struct ieee80211_chanctx_conf *chanctx_conf;
- struct ieee80211_sub_if_data *ap_sdata;
+ struct ieee80211_chanctx_conf *chanctx_conf = NULL;
enum nl80211_band band;
int ret;
@@ -2586,6 +2584,10 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
ethertype = (skb->data[12] << 8) | skb->data[13];
fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
+ if (!sdata->vif.valid_links)
+ chanctx_conf =
+ rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
+
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP_VLAN:
if (sdata->wdev.use_4addr) {
@@ -2599,31 +2601,26 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
wme_sta = sta->sta.wme;
}
- ap_sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
- u.ap);
- chanctx_conf = rcu_dereference(ap_sdata->vif.chanctx_conf);
- if (!chanctx_conf) {
- ret = -ENOTCONN;
- goto free;
+ if (!sdata->vif.valid_links) {
+ struct ieee80211_sub_if_data *ap_sdata;
+
+ /* override chanctx_conf from AP (we don't have one) */
+ ap_sdata = container_of(sdata->bss,
+ struct ieee80211_sub_if_data,
+ u.ap);
+ chanctx_conf =
+ rcu_dereference(ap_sdata->vif.bss_conf.chanctx_conf);
}
- band = chanctx_conf->def.chan->band;
if (sdata->wdev.use_4addr)
break;
fallthrough;
case NL80211_IFTYPE_AP:
- if (sdata->vif.type == NL80211_IFTYPE_AP)
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
- if (!chanctx_conf) {
- ret = -ENOTCONN;
- goto free;
- }
fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
/* DA BSSID SA */
memcpy(hdr.addr1, skb->data, ETH_ALEN);
memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN);
hdrlen = 24;
- band = chanctx_conf->def.chan->band;
break;
#ifdef CONFIG_MAC80211_MESH
case NL80211_IFTYPE_MESH_POINT:
@@ -2691,12 +2688,6 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
skb->data + ETH_ALEN);
}
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
- if (!chanctx_conf) {
- ret = -ENOTCONN;
- goto free;
- }
- band = chanctx_conf->def.chan->band;
/* For injected frames, fill RA right away as nexthop lookup
* will be skipped.
@@ -2714,14 +2705,14 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
/* DA SA BSSID */
memcpy(hdr.addr1, skb->data, ETH_ALEN);
memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
- memcpy(hdr.addr3, sdata->u.mgd.bssid, ETH_ALEN);
+ memcpy(hdr.addr3, sdata->deflink.u.mgd.bssid, ETH_ALEN);
hdrlen = 24;
} else if (sdata->u.mgd.use_4addr &&
cpu_to_be16(ethertype) != sdata->control_port_protocol) {
fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS |
IEEE80211_FCTL_TODS);
/* RA TA DA SA */
- memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN);
+ memcpy(hdr.addr1, sdata->deflink.u.mgd.bssid, ETH_ALEN);
memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
memcpy(hdr.addr3, skb->data, ETH_ALEN);
memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
@@ -2729,17 +2720,11 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
} else {
fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
/* BSSID SA DA */
- memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN);
+ memcpy(hdr.addr1, sdata->deflink.u.mgd.bssid, ETH_ALEN);
memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
memcpy(hdr.addr3, skb->data, ETH_ALEN);
hdrlen = 24;
}
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
- if (!chanctx_conf) {
- ret = -ENOTCONN;
- goto free;
- }
- band = chanctx_conf->def.chan->band;
break;
case NL80211_IFTYPE_OCB:
/* DA SA BSSID */
@@ -2747,12 +2732,6 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
eth_broadcast_addr(hdr.addr3);
hdrlen = 24;
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
- if (!chanctx_conf) {
- ret = -ENOTCONN;
- goto free;
- }
- band = chanctx_conf->def.chan->band;
break;
case NL80211_IFTYPE_ADHOC:
/* DA SA BSSID */
@@ -2760,18 +2739,23 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
memcpy(hdr.addr3, sdata->u.ibss.bssid, ETH_ALEN);
hdrlen = 24;
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
- if (!chanctx_conf) {
- ret = -ENOTCONN;
- goto free;
- }
- band = chanctx_conf->def.chan->band;
break;
default:
ret = -EINVAL;
goto free;
}
+ if (!chanctx_conf) {
+ if (!sdata->vif.valid_links) {
+ ret = -ENOTCONN;
+ goto free;
+ }
+ /* MLD transmissions must not rely on the band */
+ band = 0;
+ } else {
+ band = chanctx_conf->def.chan->band;
+ }
+
multicast = is_multicast_ether_addr(hdr.addr1);
/* sta is always NULL for mesh */
@@ -2858,7 +2842,7 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
*/
if (head_need > 0 || skb_cloned(skb)) {
- head_need += sdata->encrypt_headroom;
+ head_need += IEEE80211_ENCRYPT_HEADROOM;
head_need += local->tx_headroom;
head_need = max_t(int, 0, head_need);
if (ieee80211_skb_resize(sdata, skb, head_need, ENCRYPT_DATA)) {
@@ -2897,7 +2881,9 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
info->flags = info_flags;
info->ack_frame_id = info_id;
info->band = band;
- info->control.flags = ctrl_flags;
+ info->control.flags = ctrl_flags |
+ u32_encode_bits(IEEE80211_LINK_UNSPECIFIED,
+ IEEE80211_TX_CTRL_MLO_LINK);
return skb;
free:
@@ -2973,14 +2959,20 @@ void ieee80211_check_fast_xmit(struct sta_info *sta)
!ieee80211_hw_check(&local->hw, SUPPORTS_TX_FRAG))
goto out;
- rcu_read_lock();
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
- if (!chanctx_conf) {
+ if (!sdata->vif.valid_links) {
+ rcu_read_lock();
+ chanctx_conf =
+ rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
+ if (!chanctx_conf) {
+ rcu_read_unlock();
+ goto out;
+ }
+ build.band = chanctx_conf->def.chan->band;
rcu_read_unlock();
- goto out;
+ } else {
+ /* MLD transmissions must not rely on the band */
+ build.band = 0;
}
- build.band = chanctx_conf->def.chan->band;
- rcu_read_unlock();
fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
@@ -2997,7 +2989,7 @@ void ieee80211_check_fast_xmit(struct sta_info *sta)
/* DA SA BSSID */
build.da_offs = offsetof(struct ieee80211_hdr, addr1);
build.sa_offs = offsetof(struct ieee80211_hdr, addr2);
- memcpy(hdr->addr3, sdata->u.mgd.bssid, ETH_ALEN);
+ memcpy(hdr->addr3, sdata->deflink.u.mgd.bssid, ETH_ALEN);
build.hdr_len = 24;
break;
}
@@ -3007,7 +2999,7 @@ void ieee80211_check_fast_xmit(struct sta_info *sta)
fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS |
IEEE80211_FCTL_TODS);
/* RA TA DA SA */
- memcpy(hdr->addr1, sdata->u.mgd.bssid, ETH_ALEN);
+ memcpy(hdr->addr1, sdata->deflink.u.mgd.bssid, ETH_ALEN);
memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
build.da_offs = offsetof(struct ieee80211_hdr, addr3);
build.sa_offs = offsetof(struct ieee80211_hdr, addr4);
@@ -3016,7 +3008,7 @@ void ieee80211_check_fast_xmit(struct sta_info *sta)
}
fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
/* BSSID SA DA */
- memcpy(hdr->addr1, sdata->u.mgd.bssid, ETH_ALEN);
+ memcpy(hdr->addr1, sdata->deflink.u.mgd.bssid, ETH_ALEN);
build.da_offs = offsetof(struct ieee80211_hdr, addr3);
build.sa_offs = offsetof(struct ieee80211_hdr, addr2);
build.hdr_len = 24;
@@ -3119,15 +3111,6 @@ void ieee80211_check_fast_xmit(struct sta_info *sta)
/* we don't know how to generate IVs for this at all */
if (WARN_ON(gen_iv))
goto out;
- /* pure hardware keys are OK, of course */
- if (!(build.key->flags & KEY_FLAG_CIPHER_SCHEME))
- break;
- /* cipher scheme might require space allocation */
- if (iv_spc &&
- build.key->conf.iv_len > IEEE80211_FAST_XMIT_MAX_IV)
- goto out;
- if (iv_spc)
- build.hdr_len += build.key->conf.iv_len;
}
fc |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
@@ -3252,7 +3235,7 @@ static bool ieee80211_amsdu_prepare_head(struct ieee80211_sub_if_data *sdata,
*/
switch (sdata->vif.type) {
case NL80211_IFTYPE_STATION:
- bssid = sdata->u.mgd.bssid;
+ bssid = sdata->deflink.u.mgd.bssid;
break;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_AP_VLAN:
@@ -3563,7 +3546,9 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
info->flags = IEEE80211_TX_CTL_FIRST_FRAGMENT |
IEEE80211_TX_CTL_DONTFRAG |
(tid_tx ? IEEE80211_TX_CTL_AMPDU : 0);
- info->control.flags = IEEE80211_TX_CTRL_FAST_XMIT;
+ info->control.flags = IEEE80211_TX_CTRL_FAST_XMIT |
+ u32_encode_bits(IEEE80211_LINK_UNSPECIFIED,
+ IEEE80211_TX_CTRL_MLO_LINK);
#ifdef CONFIG_MAC80211_DEBUGFS
if (local->force_tx_status)
@@ -3800,262 +3785,147 @@ out:
}
EXPORT_SYMBOL(ieee80211_tx_dequeue);
-struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
+static inline s32 ieee80211_sta_deficit(struct sta_info *sta, u8 ac)
{
- struct ieee80211_local *local = hw_to_local(hw);
- struct airtime_sched_info *air_sched;
- u64 now = ktime_get_coarse_boottime_ns();
- struct ieee80211_txq *ret = NULL;
- struct airtime_info *air_info;
- struct txq_info *txqi = NULL;
- struct rb_node *node;
- bool first = false;
-
- air_sched = &local->airtime[ac];
- spin_lock_bh(&air_sched->lock);
-
- node = air_sched->schedule_pos;
-
-begin:
- if (!node) {
- node = rb_first_cached(&air_sched->active_txqs);
- first = true;
- } else {
- node = rb_next(node);
- }
-
- if (!node)
- goto out;
-
- txqi = container_of(node, struct txq_info, schedule_order);
- air_info = to_airtime_info(&txqi->txq);
+ struct airtime_info *air_info = &sta->airtime[ac];
- if (air_info->v_t > air_sched->v_t &&
- (!first || !airtime_catchup_v_t(air_sched, air_info->v_t, now)))
- goto out;
-
- if (!ieee80211_txq_airtime_check(hw, &txqi->txq)) {
- first = false;
- goto begin;
- }
-
- air_sched->schedule_pos = node;
- air_sched->last_schedule_activity = now;
- ret = &txqi->txq;
-out:
- spin_unlock_bh(&air_sched->lock);
- return ret;
+ return air_info->deficit - atomic_read(&air_info->aql_tx_pending);
}
-EXPORT_SYMBOL(ieee80211_next_txq);
-static void __ieee80211_insert_txq(struct rb_root_cached *root,
- struct txq_info *txqi)
-{
- struct rb_node **new = &root->rb_root.rb_node;
- struct airtime_info *old_air, *new_air;
- struct rb_node *parent = NULL;
- struct txq_info *__txqi;
- bool leftmost = true;
-
- while (*new) {
- parent = *new;
- __txqi = rb_entry(parent, struct txq_info, schedule_order);
- old_air = to_airtime_info(&__txqi->txq);
- new_air = to_airtime_info(&txqi->txq);
-
- if (new_air->v_t <= old_air->v_t) {
- new = &parent->rb_left;
- } else {
- new = &parent->rb_right;
- leftmost = false;
- }
- }
-
- rb_link_node(&txqi->schedule_order, parent, new);
- rb_insert_color_cached(&txqi->schedule_order, root, leftmost);
-}
-
-void ieee80211_resort_txq(struct ieee80211_hw *hw,
- struct ieee80211_txq *txq)
+static void
+ieee80211_txq_set_active(struct txq_info *txqi)
{
- struct airtime_info *air_info = to_airtime_info(txq);
- struct ieee80211_local *local = hw_to_local(hw);
- struct txq_info *txqi = to_txq_info(txq);
- struct airtime_sched_info *air_sched;
-
- air_sched = &local->airtime[txq->ac];
-
- lockdep_assert_held(&air_sched->lock);
-
- if (!RB_EMPTY_NODE(&txqi->schedule_order)) {
- struct airtime_info *a_prev = NULL, *a_next = NULL;
- struct txq_info *t_prev, *t_next;
- struct rb_node *n_prev, *n_next;
-
- /* Erasing a node can cause an expensive rebalancing operation,
- * so we check the previous and next nodes first and only remove
- * and re-insert if the current node is not already in the
- * correct position.
- */
- if ((n_prev = rb_prev(&txqi->schedule_order)) != NULL) {
- t_prev = container_of(n_prev, struct txq_info,
- schedule_order);
- a_prev = to_airtime_info(&t_prev->txq);
- }
-
- if ((n_next = rb_next(&txqi->schedule_order)) != NULL) {
- t_next = container_of(n_next, struct txq_info,
- schedule_order);
- a_next = to_airtime_info(&t_next->txq);
- }
-
- if ((!a_prev || a_prev->v_t <= air_info->v_t) &&
- (!a_next || a_next->v_t > air_info->v_t))
- return;
+ struct sta_info *sta;
- if (air_sched->schedule_pos == &txqi->schedule_order)
- air_sched->schedule_pos = n_prev;
+ if (!txqi->txq.sta)
+ return;
- rb_erase_cached(&txqi->schedule_order,
- &air_sched->active_txqs);
- RB_CLEAR_NODE(&txqi->schedule_order);
- __ieee80211_insert_txq(&air_sched->active_txqs, txqi);
- }
+ sta = container_of(txqi->txq.sta, struct sta_info, sta);
+ sta->airtime[txqi->txq.ac].last_active = (u32)jiffies;
}
-void ieee80211_update_airtime_weight(struct ieee80211_local *local,
- struct airtime_sched_info *air_sched,
- u64 now, bool force)
+static bool
+ieee80211_txq_keep_active(struct txq_info *txqi)
{
- struct airtime_info *air_info, *tmp;
- u64 weight_sum = 0;
+ struct sta_info *sta;
+ u32 diff;
- if (unlikely(!now))
- now = ktime_get_coarse_boottime_ns();
+ if (!txqi->txq.sta)
+ return false;
- lockdep_assert_held(&air_sched->lock);
+ sta = container_of(txqi->txq.sta, struct sta_info, sta);
+ if (ieee80211_sta_deficit(sta, txqi->txq.ac) >= 0)
+ return false;
- if (!force && (air_sched->last_weight_update <
- now - AIRTIME_ACTIVE_DURATION))
- return;
+ diff = (u32)jiffies - sta->airtime[txqi->txq.ac].last_active;
- list_for_each_entry_safe(air_info, tmp,
- &air_sched->active_list, list) {
- if (airtime_is_active(air_info, now))
- weight_sum += air_info->weight;
- else
- list_del_init(&air_info->list);
- }
- airtime_weight_sum_set(air_sched, weight_sum);
- air_sched->last_weight_update = now;
+ return diff <= AIRTIME_ACTIVE_DURATION;
}
-void ieee80211_schedule_txq(struct ieee80211_hw *hw,
- struct ieee80211_txq *txq)
- __acquires(txq_lock) __releases(txq_lock)
+struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
{
struct ieee80211_local *local = hw_to_local(hw);
- struct txq_info *txqi = to_txq_info(txq);
- struct airtime_sched_info *air_sched;
- u64 now = ktime_get_coarse_boottime_ns();
- struct airtime_info *air_info;
- u8 ac = txq->ac;
- bool was_active;
-
- air_sched = &local->airtime[ac];
- air_info = to_airtime_info(txq);
+ struct ieee80211_txq *ret = NULL;
+ struct txq_info *txqi = NULL, *head = NULL;
+ bool found_eligible_txq = false;
- spin_lock_bh(&air_sched->lock);
- was_active = airtime_is_active(air_info, now);
- airtime_set_active(air_sched, air_info, now);
+ spin_lock_bh(&local->active_txq_lock[ac]);
- if (!RB_EMPTY_NODE(&txqi->schedule_order))
+ if (!local->schedule_round[ac])
goto out;
- /* If the station has been inactive for a while, catch up its v_t so it
- * doesn't get indefinite priority; see comment above the definition of
- * AIRTIME_MAX_BEHIND.
- */
- if ((!was_active && air_info->v_t < air_sched->v_t) ||
- air_info->v_t < air_sched->v_t - AIRTIME_MAX_BEHIND)
- air_info->v_t = air_sched->v_t;
+ begin:
+ txqi = list_first_entry_or_null(&local->active_txqs[ac],
+ struct txq_info,
+ schedule_order);
+ if (!txqi)
+ goto out;
- ieee80211_update_airtime_weight(local, air_sched, now, !was_active);
- __ieee80211_insert_txq(&air_sched->active_txqs, txqi);
+ if (txqi == head) {
+ if (!found_eligible_txq)
+ goto out;
+ else
+ found_eligible_txq = false;
+ }
-out:
- spin_unlock_bh(&air_sched->lock);
-}
-EXPORT_SYMBOL(ieee80211_schedule_txq);
+ if (!head)
+ head = txqi;
-static void __ieee80211_unschedule_txq(struct ieee80211_hw *hw,
- struct ieee80211_txq *txq,
- bool purge)
-{
- struct ieee80211_local *local = hw_to_local(hw);
- struct txq_info *txqi = to_txq_info(txq);
- struct airtime_sched_info *air_sched;
- struct airtime_info *air_info;
+ if (txqi->txq.sta) {
+ struct sta_info *sta = container_of(txqi->txq.sta,
+ struct sta_info, sta);
+ bool aql_check = ieee80211_txq_airtime_check(hw, &txqi->txq);
+ s32 deficit = ieee80211_sta_deficit(sta, txqi->txq.ac);
- air_sched = &local->airtime[txq->ac];
- air_info = to_airtime_info(&txqi->txq);
+ if (aql_check)
+ found_eligible_txq = true;
- lockdep_assert_held(&air_sched->lock);
+ if (deficit < 0)
+ sta->airtime[txqi->txq.ac].deficit +=
+ sta->airtime_weight;
- if (purge) {
- list_del_init(&air_info->list);
- ieee80211_update_airtime_weight(local, air_sched, 0, true);
+ if (deficit < 0 || !aql_check) {
+ list_move_tail(&txqi->schedule_order,
+ &local->active_txqs[txqi->txq.ac]);
+ goto begin;
+ }
}
- if (RB_EMPTY_NODE(&txqi->schedule_order))
- return;
-
- if (air_sched->schedule_pos == &txqi->schedule_order)
- air_sched->schedule_pos = rb_prev(&txqi->schedule_order);
+ if (txqi->schedule_round == local->schedule_round[ac])
+ goto out;
- if (!purge)
- airtime_set_active(air_sched, air_info,
- ktime_get_coarse_boottime_ns());
+ list_del_init(&txqi->schedule_order);
+ txqi->schedule_round = local->schedule_round[ac];
+ ret = &txqi->txq;
- rb_erase_cached(&txqi->schedule_order,
- &air_sched->active_txqs);
- RB_CLEAR_NODE(&txqi->schedule_order);
+out:
+ spin_unlock_bh(&local->active_txq_lock[ac]);
+ return ret;
}
+EXPORT_SYMBOL(ieee80211_next_txq);
-void ieee80211_unschedule_txq(struct ieee80211_hw *hw,
+void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
struct ieee80211_txq *txq,
- bool purge)
- __acquires(txq_lock) __releases(txq_lock)
-{
- struct ieee80211_local *local = hw_to_local(hw);
-
- spin_lock_bh(&local->airtime[txq->ac].lock);
- __ieee80211_unschedule_txq(hw, txq, purge);
- spin_unlock_bh(&local->airtime[txq->ac].lock);
-}
-
-void ieee80211_return_txq(struct ieee80211_hw *hw,
- struct ieee80211_txq *txq, bool force)
+ bool force)
{
struct ieee80211_local *local = hw_to_local(hw);
struct txq_info *txqi = to_txq_info(txq);
+ bool has_queue;
+
+ spin_lock_bh(&local->active_txq_lock[txq->ac]);
+
+ has_queue = force || txq_has_queue(txq);
+ if (list_empty(&txqi->schedule_order) &&
+ (has_queue || ieee80211_txq_keep_active(txqi))) {
+ /* If airtime accounting is active, always enqueue STAs at the
+ * head of the list to ensure that they only get moved to the
+ * back by the airtime DRR scheduler once they have a negative
+ * deficit. A station that already has a negative deficit will
+ * get immediately moved to the back of the list on the next
+ * call to ieee80211_next_txq().
+ */
+ if (txqi->txq.sta && local->airtime_flags && has_queue &&
+ wiphy_ext_feature_isset(local->hw.wiphy,
+ NL80211_EXT_FEATURE_AIRTIME_FAIRNESS))
+ list_add(&txqi->schedule_order,
+ &local->active_txqs[txq->ac]);
+ else
+ list_add_tail(&txqi->schedule_order,
+ &local->active_txqs[txq->ac]);
+ if (has_queue)
+ ieee80211_txq_set_active(txqi);
+ }
- spin_lock_bh(&local->airtime[txq->ac].lock);
-
- if (!RB_EMPTY_NODE(&txqi->schedule_order) && !force &&
- !txq_has_queue(txq))
- __ieee80211_unschedule_txq(hw, txq, false);
-
- spin_unlock_bh(&local->airtime[txq->ac].lock);
+ spin_unlock_bh(&local->active_txq_lock[txq->ac]);
}
-EXPORT_SYMBOL(ieee80211_return_txq);
+EXPORT_SYMBOL(__ieee80211_schedule_txq);
DEFINE_STATIC_KEY_FALSE(aql_disable);
bool ieee80211_txq_airtime_check(struct ieee80211_hw *hw,
struct ieee80211_txq *txq)
{
- struct airtime_info *air_info = to_airtime_info(txq);
+ struct sta_info *sta;
struct ieee80211_local *local = hw_to_local(hw);
if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL))
@@ -4070,74 +3940,108 @@ bool ieee80211_txq_airtime_check(struct ieee80211_hw *hw,
if (unlikely(txq->tid == IEEE80211_NUM_TIDS))
return true;
- if (atomic_read(&air_info->aql_tx_pending) < air_info->aql_limit_low)
+ sta = container_of(txq->sta, struct sta_info, sta);
+ if (atomic_read(&sta->airtime[txq->ac].aql_tx_pending) <
+ sta->airtime[txq->ac].aql_limit_low)
return true;
if (atomic_read(&local->aql_total_pending_airtime) <
local->aql_threshold &&
- atomic_read(&air_info->aql_tx_pending) < air_info->aql_limit_high)
+ atomic_read(&sta->airtime[txq->ac].aql_tx_pending) <
+ sta->airtime[txq->ac].aql_limit_high)
return true;
return false;
}
EXPORT_SYMBOL(ieee80211_txq_airtime_check);
+static bool
+ieee80211_txq_schedule_airtime_check(struct ieee80211_local *local, u8 ac)
+{
+ unsigned int num_txq = 0;
+ struct txq_info *txq;
+ u32 aql_limit;
+
+ if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL))
+ return true;
+
+ list_for_each_entry(txq, &local->active_txqs[ac], schedule_order)
+ num_txq++;
+
+ aql_limit = (num_txq - 1) * local->aql_txq_limit_low[ac] / 2 +
+ local->aql_txq_limit_high[ac];
+
+ return atomic_read(&local->aql_ac_pending_airtime[ac]) < aql_limit;
+}
+
bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
struct ieee80211_txq *txq)
{
- struct txq_info *first_txqi = NULL, *txqi = to_txq_info(txq);
struct ieee80211_local *local = hw_to_local(hw);
- struct airtime_sched_info *air_sched;
- struct airtime_info *air_info;
- struct rb_node *node = NULL;
- bool ret = false;
- u64 now;
-
+ struct txq_info *iter, *tmp, *txqi = to_txq_info(txq);
+ struct sta_info *sta;
+ u8 ac = txq->ac;
- if (!ieee80211_txq_airtime_check(hw, txq))
- return false;
+ spin_lock_bh(&local->active_txq_lock[ac]);
- air_sched = &local->airtime[txq->ac];
- spin_lock_bh(&air_sched->lock);
+ if (!txqi->txq.sta)
+ goto out;
- if (RB_EMPTY_NODE(&txqi->schedule_order))
+ if (list_empty(&txqi->schedule_order))
goto out;
- now = ktime_get_coarse_boottime_ns();
+ if (!ieee80211_txq_schedule_airtime_check(local, ac))
+ goto out;
- /* Like in ieee80211_next_txq(), make sure the first station in the
- * scheduling order is eligible for transmission to avoid starvation.
- */
- node = rb_first_cached(&air_sched->active_txqs);
- if (node) {
- first_txqi = container_of(node, struct txq_info,
- schedule_order);
- air_info = to_airtime_info(&first_txqi->txq);
+ list_for_each_entry_safe(iter, tmp, &local->active_txqs[ac],
+ schedule_order) {
+ if (iter == txqi)
+ break;
- if (air_sched->v_t < air_info->v_t)
- airtime_catchup_v_t(air_sched, air_info->v_t, now);
+ if (!iter->txq.sta) {
+ list_move_tail(&iter->schedule_order,
+ &local->active_txqs[ac]);
+ continue;
+ }
+ sta = container_of(iter->txq.sta, struct sta_info, sta);
+ if (ieee80211_sta_deficit(sta, ac) < 0)
+ sta->airtime[ac].deficit += sta->airtime_weight;
+ list_move_tail(&iter->schedule_order, &local->active_txqs[ac]);
}
- air_info = to_airtime_info(&txqi->txq);
- if (air_info->v_t <= air_sched->v_t) {
- air_sched->last_schedule_activity = now;
- ret = true;
- }
+ sta = container_of(txqi->txq.sta, struct sta_info, sta);
+ if (sta->airtime[ac].deficit >= 0)
+ goto out;
+
+ sta->airtime[ac].deficit += sta->airtime_weight;
+ list_move_tail(&txqi->schedule_order, &local->active_txqs[ac]);
+ spin_unlock_bh(&local->active_txq_lock[ac]);
+ return false;
out:
- spin_unlock_bh(&air_sched->lock);
- return ret;
+ if (!list_empty(&txqi->schedule_order))
+ list_del_init(&txqi->schedule_order);
+ spin_unlock_bh(&local->active_txq_lock[ac]);
+
+ return true;
}
EXPORT_SYMBOL(ieee80211_txq_may_transmit);
void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
{
struct ieee80211_local *local = hw_to_local(hw);
- struct airtime_sched_info *air_sched = &local->airtime[ac];
- spin_lock_bh(&air_sched->lock);
- air_sched->schedule_pos = NULL;
- spin_unlock_bh(&air_sched->lock);
+ spin_lock_bh(&local->active_txq_lock[ac]);
+
+ if (ieee80211_txq_schedule_airtime_check(local, ac)) {
+ local->schedule_round[ac]++;
+ if (!local->schedule_round[ac])
+ local->schedule_round[ac]++;
+ } else {
+ local->schedule_round[ac] = 0;
+ }
+
+ spin_unlock_bh(&local->active_txq_lock[ac]);
}
EXPORT_SYMBOL(ieee80211_txq_schedule_start);
@@ -4605,12 +4509,16 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
sdata = vif_to_sdata(info->control.vif);
if (info->control.flags & IEEE80211_TX_INTCFL_NEED_TXPROCESSING) {
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
- if (unlikely(!chanctx_conf)) {
- dev_kfree_skb(skb);
- return true;
+ /* update band only for non-MLD */
+ if (!sdata->vif.valid_links) {
+ chanctx_conf =
+ rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
+ if (unlikely(!chanctx_conf)) {
+ dev_kfree_skb(skb);
+ return true;
+ }
+ info->band = chanctx_conf->def.chan->band;
}
- info->band = chanctx_conf->def.chan->band;
result = ieee80211_tx(sdata, NULL, skb, true);
} else if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) {
@@ -4691,11 +4599,12 @@ void ieee80211_tx_pending(struct tasklet_struct *t)
static void __ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
struct ps_data *ps, struct sk_buff *skb,
- bool is_template)
+ bool is_template, unsigned int link_id)
{
u8 *pos, *tim;
int aid0 = 0;
int i, have_bits = 0, n1, n2;
+ struct ieee80211_bss_conf *link_conf = sdata->vif.link_conf[link_id];
/* Generate bitmap for TIM only if there are any STAs in power save
* mode. */
@@ -4706,7 +4615,7 @@ static void __ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
IEEE80211_MAX_AID+1);
if (!is_template) {
if (ps->dtim_count == 0)
- ps->dtim_count = sdata->vif.bss_conf.dtim_period - 1;
+ ps->dtim_count = link_conf->dtim_period - 1;
else
ps->dtim_count--;
}
@@ -4715,7 +4624,7 @@ static void __ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
*pos++ = WLAN_EID_TIM;
*pos++ = 4;
*pos++ = ps->dtim_count;
- *pos++ = sdata->vif.bss_conf.dtim_period;
+ *pos++ = link_conf->dtim_period;
if (ps->dtim_count == 0 && !skb_queue_empty(&ps->bc_buf))
aid0 = 1;
@@ -4756,7 +4665,7 @@ static void __ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
struct ps_data *ps, struct sk_buff *skb,
- bool is_template)
+ bool is_template, unsigned int link_id)
{
struct ieee80211_local *local = sdata->local;
@@ -4768,10 +4677,12 @@ static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
* of the tim bitmap in mac80211 and the driver.
*/
if (local->tim_in_locked_section) {
- __ieee80211_beacon_add_tim(sdata, ps, skb, is_template);
+ __ieee80211_beacon_add_tim(sdata, ps, skb, is_template,
+ link_id);
} else {
spin_lock_bh(&local->tim_lock);
- __ieee80211_beacon_add_tim(sdata, ps, skb, is_template);
+ __ieee80211_beacon_add_tim(sdata, ps, skb, is_template,
+ link_id);
spin_unlock_bh(&local->tim_lock);
}
@@ -4779,7 +4690,8 @@ static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
}
static void ieee80211_set_beacon_cntdwn(struct ieee80211_sub_if_data *sdata,
- struct beacon_data *beacon)
+ struct beacon_data *beacon,
+ unsigned int link_id)
{
u8 *beacon_data, count, max_count = 1;
struct probe_resp *resp;
@@ -4805,11 +4717,11 @@ static void ieee80211_set_beacon_cntdwn(struct ieee80211_sub_if_data *sdata,
}
rcu_read_lock();
- resp = rcu_dereference(sdata->u.ap.probe_resp);
+ resp = rcu_dereference(sdata->link[link_id]->u.ap.probe_resp);
bcn_offsets = beacon->cntdwn_counter_offsets;
count = beacon->cntdwn_current_counter;
- if (sdata->vif.csa_active)
+ if (sdata->vif.link_conf[link_id]->csa_active)
max_count = IEEE80211_MAX_CNTDWN_COUNTERS_NUM;
for (i = 0; i < max_count; ++i) {
@@ -4849,7 +4761,7 @@ u8 ieee80211_beacon_update_cntdwn(struct ieee80211_vif *vif)
rcu_read_lock();
if (sdata->vif.type == NL80211_IFTYPE_AP)
- beacon = rcu_dereference(sdata->u.ap.beacon);
+ beacon = rcu_dereference(sdata->deflink.u.ap.beacon);
else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
beacon = rcu_dereference(sdata->u.ibss.presp);
else if (ieee80211_vif_is_mesh(&sdata->vif))
@@ -4874,7 +4786,7 @@ void ieee80211_beacon_set_cntdwn(struct ieee80211_vif *vif, u8 counter)
rcu_read_lock();
if (sdata->vif.type == NL80211_IFTYPE_AP)
- beacon = rcu_dereference(sdata->u.ap.beacon);
+ beacon = rcu_dereference(sdata->deflink.u.ap.beacon);
else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
beacon = rcu_dereference(sdata->u.ibss.presp);
else if (ieee80211_vif_is_mesh(&sdata->vif))
@@ -4904,9 +4816,7 @@ bool ieee80211_beacon_cntdwn_is_complete(struct ieee80211_vif *vif)
rcu_read_lock();
if (vif->type == NL80211_IFTYPE_AP) {
- struct ieee80211_if_ap *ap = &sdata->u.ap;
-
- beacon = rcu_dereference(ap->beacon);
+ beacon = rcu_dereference(sdata->deflink.u.ap.beacon);
if (WARN_ON(!beacon || !beacon->tail))
goto out;
beacon_data = beacon->tail;
@@ -4952,14 +4862,15 @@ EXPORT_SYMBOL(ieee80211_beacon_cntdwn_is_complete);
static int ieee80211_beacon_protect(struct sk_buff *skb,
struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata)
+ struct ieee80211_sub_if_data *sdata,
+ unsigned int link_id)
{
ieee80211_tx_result res;
struct ieee80211_tx_data tx;
struct sk_buff *check_skb;
memset(&tx, 0, sizeof(tx));
- tx.key = rcu_dereference(sdata->default_beacon_key);
+ tx.key = rcu_dereference(sdata->link[link_id]->default_beacon_key);
if (!tx.key)
return 0;
tx.local = local;
@@ -4983,7 +4894,8 @@ ieee80211_beacon_get_finish(struct ieee80211_hw *hw,
struct beacon_data *beacon,
struct sk_buff *skb,
struct ieee80211_chanctx_conf *chanctx_conf,
- u16 csa_off_base)
+ u16 csa_off_base,
+ unsigned int link_id)
{
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
@@ -5014,7 +4926,7 @@ ieee80211_beacon_get_finish(struct ieee80211_hw *hw,
memset(&txrc, 0, sizeof(txrc));
txrc.hw = hw;
txrc.sband = local->hw.wiphy->bands[band];
- txrc.bss_conf = &sdata->vif.bss_conf;
+ txrc.bss_conf = sdata->vif.link_conf[link_id];
txrc.skb = skb;
txrc.reported_rate.idx = -1;
if (sdata->beacon_rate_set && sdata->beacon_rateidx_mask[band])
@@ -5049,7 +4961,8 @@ ieee80211_beacon_get_ap(struct ieee80211_hw *hw,
struct ieee80211_mutable_offsets *offs,
bool is_template,
struct beacon_data *beacon,
- struct ieee80211_chanctx_conf *chanctx_conf)
+ struct ieee80211_chanctx_conf *chanctx_conf,
+ unsigned int link_id)
{
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
@@ -5062,7 +4975,7 @@ ieee80211_beacon_get_ap(struct ieee80211_hw *hw,
if (!is_template)
ieee80211_beacon_update_cntdwn(vif);
- ieee80211_set_beacon_cntdwn(sdata, beacon);
+ ieee80211_set_beacon_cntdwn(sdata, beacon, link_id);
}
/* headroom, head length,
@@ -5078,7 +4991,7 @@ ieee80211_beacon_get_ap(struct ieee80211_hw *hw,
skb_reserve(skb, local->tx_headroom);
skb_put_data(skb, beacon->head, beacon->head_len);
- ieee80211_beacon_add_tim(sdata, &ap->ps, skb, is_template);
+ ieee80211_beacon_add_tim(sdata, &ap->ps, skb, is_template, link_id);
if (offs) {
offs->tim_offset = beacon->head_len;
@@ -5097,11 +5010,11 @@ ieee80211_beacon_get_ap(struct ieee80211_hw *hw,
if (beacon->tail)
skb_put_data(skb, beacon->tail, beacon->tail_len);
- if (ieee80211_beacon_protect(skb, local, sdata) < 0)
+ if (ieee80211_beacon_protect(skb, local, sdata, link_id) < 0)
return NULL;
ieee80211_beacon_get_finish(hw, vif, offs, beacon, skb, chanctx_conf,
- csa_off_base);
+ csa_off_base, link_id);
return skb;
}
@@ -5109,7 +5022,8 @@ static struct sk_buff *
__ieee80211_beacon_get(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_mutable_offsets *offs,
- bool is_template)
+ bool is_template,
+ unsigned int link_id)
{
struct ieee80211_local *local = hw_to_local(hw);
struct beacon_data *beacon = NULL;
@@ -5120,7 +5034,8 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw,
rcu_read_lock();
sdata = vif_to_sdata(vif);
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+ chanctx_conf =
+ rcu_dereference(sdata->vif.link_conf[link_id]->chanctx_conf);
if (!ieee80211_sdata_running(sdata) || !chanctx_conf)
goto out;
@@ -5129,14 +5044,12 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw,
memset(offs, 0, sizeof(*offs));
if (sdata->vif.type == NL80211_IFTYPE_AP) {
- struct ieee80211_if_ap *ap = &sdata->u.ap;
-
- beacon = rcu_dereference(ap->beacon);
+ beacon = rcu_dereference(sdata->link[link_id]->u.ap.beacon);
if (!beacon)
goto out;
skb = ieee80211_beacon_get_ap(hw, vif, offs, is_template,
- beacon, chanctx_conf);
+ beacon, chanctx_conf, link_id);
} else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
struct ieee80211_hdr *hdr;
@@ -5149,7 +5062,7 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw,
if (!is_template)
__ieee80211_beacon_update_cntdwn(beacon);
- ieee80211_set_beacon_cntdwn(sdata, beacon);
+ ieee80211_set_beacon_cntdwn(sdata, beacon, link_id);
}
skb = dev_alloc_skb(local->tx_headroom + beacon->head_len +
@@ -5164,7 +5077,7 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw,
IEEE80211_STYPE_BEACON);
ieee80211_beacon_get_finish(hw, vif, offs, beacon, skb,
- chanctx_conf, 0);
+ chanctx_conf, 0, link_id);
} else if (ieee80211_vif_is_mesh(&sdata->vif)) {
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
@@ -5181,7 +5094,7 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw,
*/
__ieee80211_beacon_update_cntdwn(beacon);
- ieee80211_set_beacon_cntdwn(sdata, beacon);
+ ieee80211_set_beacon_cntdwn(sdata, beacon, link_id);
}
if (ifmsh->sync_ops)
@@ -5196,7 +5109,8 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw,
goto out;
skb_reserve(skb, local->tx_headroom);
skb_put_data(skb, beacon->head, beacon->head_len);
- ieee80211_beacon_add_tim(sdata, &ifmsh->ps, skb, is_template);
+ ieee80211_beacon_add_tim(sdata, &ifmsh->ps, skb, is_template,
+ link_id);
if (offs) {
offs->tim_offset = beacon->head_len;
@@ -5205,7 +5119,7 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw,
skb_put_data(skb, beacon->tail, beacon->tail_len);
ieee80211_beacon_get_finish(hw, vif, offs, beacon, skb,
- chanctx_conf, 0);
+ chanctx_conf, 0, link_id);
} else {
WARN_ON(1);
goto out;
@@ -5220,20 +5134,22 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw,
struct sk_buff *
ieee80211_beacon_get_template(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- struct ieee80211_mutable_offsets *offs)
+ struct ieee80211_mutable_offsets *offs,
+ unsigned int link_id)
{
- return __ieee80211_beacon_get(hw, vif, offs, true);
+ return __ieee80211_beacon_get(hw, vif, offs, true, link_id);
}
EXPORT_SYMBOL(ieee80211_beacon_get_template);
struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- u16 *tim_offset, u16 *tim_length)
+ u16 *tim_offset, u16 *tim_length,
+ unsigned int link_id)
{
struct ieee80211_mutable_offsets offs = {};
- struct sk_buff *bcn = __ieee80211_beacon_get(hw, vif, &offs, false);
+ struct sk_buff *bcn = __ieee80211_beacon_get(hw, vif, &offs, false,
+ link_id);
struct sk_buff *copy;
- struct ieee80211_supported_band *sband;
int shift;
if (!bcn)
@@ -5255,12 +5171,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
return bcn;
shift = ieee80211_vif_get_shift(vif);
- sband = ieee80211_get_sband(vif_to_sdata(vif));
- if (!sband)
- return bcn;
-
- ieee80211_tx_monitor(hw_to_local(hw), copy, sband, 1, shift, false,
- NULL);
+ ieee80211_tx_monitor(hw_to_local(hw), copy, 1, shift, false, NULL);
return bcn;
}
@@ -5269,7 +5180,6 @@ EXPORT_SYMBOL(ieee80211_beacon_get_tim);
struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
- struct ieee80211_if_ap *ap = NULL;
struct sk_buff *skb = NULL;
struct probe_resp *presp = NULL;
struct ieee80211_hdr *hdr;
@@ -5279,9 +5189,7 @@ struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw,
return NULL;
rcu_read_lock();
-
- ap = &sdata->u.ap;
- presp = rcu_dereference(ap->probe_resp);
+ presp = rcu_dereference(sdata->deflink.u.ap.probe_resp);
if (!presp)
goto out;
@@ -5311,7 +5219,7 @@ struct sk_buff *ieee80211_get_fils_discovery_tmpl(struct ieee80211_hw *hw,
return NULL;
rcu_read_lock();
- tmpl = rcu_dereference(sdata->u.ap.fils_discovery);
+ tmpl = rcu_dereference(sdata->deflink.u.ap.fils_discovery);
if (!tmpl) {
rcu_read_unlock();
return NULL;
@@ -5340,7 +5248,7 @@ ieee80211_get_unsol_bcast_probe_resp_tmpl(struct ieee80211_hw *hw,
return NULL;
rcu_read_lock();
- tmpl = rcu_dereference(sdata->u.ap.unsol_bcast_probe_resp);
+ tmpl = rcu_dereference(sdata->deflink.u.ap.unsol_bcast_probe_resp);
if (!tmpl) {
rcu_read_unlock();
return NULL;
@@ -5361,7 +5269,6 @@ struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct ieee80211_sub_if_data *sdata;
- struct ieee80211_if_managed *ifmgd;
struct ieee80211_pspoll *pspoll;
struct ieee80211_local *local;
struct sk_buff *skb;
@@ -5370,7 +5277,6 @@ struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
return NULL;
sdata = vif_to_sdata(vif);
- ifmgd = &sdata->u.mgd;
local = sdata->local;
skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*pspoll));
@@ -5382,12 +5288,12 @@ struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
pspoll = skb_put_zero(skb, sizeof(*pspoll));
pspoll->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
IEEE80211_STYPE_PSPOLL);
- pspoll->aid = cpu_to_le16(sdata->vif.bss_conf.aid);
+ pspoll->aid = cpu_to_le16(sdata->vif.cfg.aid);
/* aid in PS-Poll has its two MSBs each set to 1 */
pspoll->aid |= cpu_to_le16(1 << 15 | 1 << 14);
- memcpy(pspoll->bssid, ifmgd->bssid, ETH_ALEN);
+ memcpy(pspoll->bssid, sdata->deflink.u.mgd.bssid, ETH_ALEN);
memcpy(pspoll->ta, vif->addr, ETH_ALEN);
return skb;
@@ -5400,7 +5306,6 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
{
struct ieee80211_hdr_3addr *nullfunc;
struct ieee80211_sub_if_data *sdata;
- struct ieee80211_if_managed *ifmgd;
struct ieee80211_local *local;
struct sk_buff *skb;
bool qos = false;
@@ -5409,14 +5314,13 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
return NULL;
sdata = vif_to_sdata(vif);
- ifmgd = &sdata->u.mgd;
local = sdata->local;
if (qos_ok) {
struct sta_info *sta;
rcu_read_lock();
- sta = sta_info_get(sdata, ifmgd->bssid);
+ sta = sta_info_get(sdata, sdata->deflink.u.mgd.bssid);
qos = sta && sta->sta.wme;
rcu_read_unlock();
}
@@ -5445,9 +5349,9 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
skb_put_data(skb, &qoshdr, sizeof(qoshdr));
}
- memcpy(nullfunc->addr1, ifmgd->bssid, ETH_ALEN);
+ memcpy(nullfunc->addr1, sdata->deflink.u.mgd.bssid, ETH_ALEN);
memcpy(nullfunc->addr2, vif->addr, ETH_ALEN);
- memcpy(nullfunc->addr3, ifmgd->bssid, ETH_ALEN);
+ memcpy(nullfunc->addr3, sdata->deflink.u.mgd.bssid, ETH_ALEN);
return skb;
}
@@ -5537,14 +5441,14 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
sdata = vif_to_sdata(vif);
rcu_read_lock();
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+ chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
if (!chanctx_conf)
goto out;
if (sdata->vif.type == NL80211_IFTYPE_AP) {
struct beacon_data *beacon =
- rcu_dereference(sdata->u.ap.beacon);
+ rcu_dereference(sdata->deflink.u.ap.beacon);
if (!beacon || !beacon->head)
goto out;
@@ -5691,7 +5595,9 @@ void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, int tid,
enum nl80211_band band)
{
+ const struct ieee80211_hdr *hdr = (void *)skb->data;
int ac = ieee80211_ac_from_tid(tid);
+ unsigned int link;
skb_reset_mac_header(skb);
skb_set_queue_mapping(skb, ac);
@@ -5699,6 +5605,30 @@ void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
skb->dev = sdata->dev;
+ BUILD_BUG_ON(IEEE80211_LINK_UNSPECIFIED < IEEE80211_MLD_MAX_NUM_LINKS);
+ BUILD_BUG_ON(!FIELD_FIT(IEEE80211_TX_CTRL_MLO_LINK,
+ IEEE80211_LINK_UNSPECIFIED));
+
+ if (!sdata->vif.valid_links) {
+ link = 0;
+ } else if (memcmp(sdata->vif.addr, hdr->addr2, ETH_ALEN) == 0) {
+ /* address from the MLD */
+ link = IEEE80211_LINK_UNSPECIFIED;
+ } else {
+ /* otherwise must be addressed from a link */
+ for (link = 0; link < ARRAY_SIZE(sdata->vif.link_conf); link++) {
+ if (memcmp(sdata->vif.link_conf[link]->addr,
+ hdr->addr2, ETH_ALEN) == 0)
+ break;
+ }
+
+ if (WARN_ON_ONCE(link == ARRAY_SIZE(sdata->vif.link_conf)))
+ link = ffs(sdata->vif.valid_links) - 1;
+ }
+
+ IEEE80211_SKB_CB(skb)->control.flags |=
+ u32_encode_bits(link, IEEE80211_TX_CTRL_MLO_LINK);
+
/*
* The other path calling ieee80211_xmit is from the tasklet,
* and while we can handle concurrent transmissions locking
@@ -5710,6 +5640,31 @@ void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
local_bh_enable();
}
+void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb, int tid)
+{
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ enum nl80211_band band;
+
+ rcu_read_lock();
+ if (!sdata->vif.valid_links) {
+ chanctx_conf =
+ rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
+ if (WARN_ON(!chanctx_conf)) {
+ rcu_read_unlock();
+ kfree_skb(skb);
+ return;
+ }
+ band = chanctx_conf->def.chan->band;
+ } else {
+ /* MLD transmissions must not rely on the band */
+ band = 0;
+ }
+
+ __ieee80211_tx_skb_tid_band(sdata, skb, tid, band);
+ rcu_read_unlock();
+}
+
int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
const u8 *buf, size_t len,
const u8 *dest, __be16 proto, bool unencrypted,
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index dad42d42aa84..645f75b0f89f 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -6,7 +6,7 @@
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*
* utilities for mac80211
*/
@@ -1569,7 +1569,7 @@ void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata,
return;
rcu_read_lock();
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+ chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
if (chanctx_conf)
center_freq = chanctx_conf->def.chan->center_freq;
@@ -1616,7 +1616,7 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
memset(&qparam, 0, sizeof(qparam));
rcu_read_lock();
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+ chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
use_11b = (chanctx_conf &&
chanctx_conf->def.chan->band == NL80211_BAND_2GHZ) &&
!(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE);
@@ -1702,8 +1702,8 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
sdata->vif.type != NL80211_IFTYPE_NAN) {
sdata->vif.bss_conf.qos = enable_qos;
if (bss_notify)
- ieee80211_bss_info_change_notify(sdata,
- BSS_CHANGED_QOS);
+ ieee80211_link_info_change_notify(sdata, 0,
+ BSS_CHANGED_QOS);
}
}
@@ -2258,7 +2258,8 @@ static void ieee80211_handle_reconfig_failure(struct ieee80211_local *local)
}
static void ieee80211_assign_chanctx(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata)
+ struct ieee80211_sub_if_data *sdata,
+ unsigned int link_id)
{
struct ieee80211_chanctx_conf *conf;
struct ieee80211_chanctx *ctx;
@@ -2267,11 +2268,11 @@ static void ieee80211_assign_chanctx(struct ieee80211_local *local,
return;
mutex_lock(&local->chanctx_mtx);
- conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
+ conf = rcu_dereference_protected(sdata->vif.link_conf[link_id]->chanctx_conf,
lockdep_is_held(&local->chanctx_mtx));
if (conf) {
ctx = container_of(conf, struct ieee80211_chanctx, conf);
- drv_assign_vif_chanctx(local, sdata, ctx);
+ drv_assign_vif_chanctx(local, sdata, link_id, ctx);
}
mutex_unlock(&local->chanctx_mtx);
}
@@ -2477,7 +2478,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
sdata = wiphy_dereference(local->hw.wiphy,
local->monitor_sdata);
if (sdata && ieee80211_sdata_running(sdata))
- ieee80211_assign_chanctx(local, sdata);
+ ieee80211_assign_chanctx(local, sdata, 0);
}
/* reconfigure hardware */
@@ -2487,19 +2488,23 @@ int ieee80211_reconfig(struct ieee80211_local *local)
/* Finally also reconfigure all the BSS information */
list_for_each_entry(sdata, &local->interfaces, list) {
+ unsigned int link;
u32 changed;
if (!ieee80211_sdata_running(sdata))
continue;
- ieee80211_assign_chanctx(local, sdata);
+ for (link = 0; link < ARRAY_SIZE(sdata->vif.link_conf); link++) {
+ if (sdata->vif.link_conf[link])
+ ieee80211_assign_chanctx(local, sdata, link);
+ }
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_MONITOR:
break;
case NL80211_IFTYPE_ADHOC:
- if (sdata->vif.bss_conf.ibss_joined)
+ if (sdata->vif.cfg.ibss_joined)
WARN_ON(drv_join_ibss(local, sdata));
fallthrough;
default:
@@ -2526,7 +2531,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
BSS_CHANGED_TXPOWER |
BSS_CHANGED_MCAST_RATE;
- if (sdata->vif.mu_mimo_owner)
+ if (sdata->vif.bss_conf.mu_mimo_owner)
changed |= BSS_CHANGED_MU_GROUPS;
switch (sdata->vif.type) {
@@ -2536,7 +2541,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
BSS_CHANGED_PS;
/* Re-send beacon info report to the driver */
- if (sdata->u.mgd.have_beacon)
+ if (sdata->deflink.u.mgd.have_beacon)
changed |= BSS_CHANGED_BEACON_INFO;
if (sdata->vif.bss_conf.max_idle_period ||
@@ -2565,8 +2570,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
if (sdata->vif.type == NL80211_IFTYPE_AP) {
changed |= BSS_CHANGED_AP_PROBE_RESP;
- if (rcu_access_pointer(sdata->u.ap.beacon))
- drv_start_ap(local, sdata);
+ if (rcu_access_pointer(sdata->deflink.u.ap.beacon))
+ drv_start_ap(local, sdata, 0);
}
fallthrough;
case NL80211_IFTYPE_MESH_POINT:
@@ -2801,7 +2806,8 @@ void ieee80211_resume_disconnect(struct ieee80211_vif *vif)
}
EXPORT_SYMBOL_GPL(ieee80211_resume_disconnect);
-void ieee80211_recalc_smps(struct ieee80211_sub_if_data *sdata)
+void ieee80211_recalc_smps(struct ieee80211_sub_if_data *sdata,
+ unsigned int link_id)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_chanctx_conf *chanctx_conf;
@@ -2809,8 +2815,8 @@ void ieee80211_recalc_smps(struct ieee80211_sub_if_data *sdata)
mutex_lock(&local->chanctx_mtx);
- chanctx_conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
- lockdep_is_held(&local->chanctx_mtx));
+ chanctx_conf = rcu_dereference_protected(sdata->vif.link_conf[link_id]->chanctx_conf,
+ lockdep_is_held(&local->chanctx_mtx));
/*
* This function can be called from a work, thus it may be possible
@@ -2835,8 +2841,8 @@ void ieee80211_recalc_min_chandef(struct ieee80211_sub_if_data *sdata)
mutex_lock(&local->chanctx_mtx);
- chanctx_conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
- lockdep_is_held(&local->chanctx_mtx));
+ chanctx_conf = rcu_dereference_protected(sdata->vif.bss_conf.chanctx_conf,
+ lockdep_is_held(&local->chanctx_mtx));
if (WARN_ON_ONCE(!chanctx_conf))
goto unlock;
@@ -3044,7 +3050,7 @@ void ieee80211_ie_build_he_6ghz_cap(struct ieee80211_sub_if_data *sdata,
cap = le16_to_cpu(iftd->he_6ghz_capa.capa);
cap &= ~IEEE80211_HE_6GHZ_CAP_SM_PS;
- switch (sdata->smps_mode) {
+ switch (sdata->deflink.smps_mode) {
case IEEE80211_SMPS_AUTOMATIC:
case IEEE80211_SMPS_NUM_MODES:
WARN_ON(1);
@@ -3778,13 +3784,11 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
int ieee80211_ave_rssi(struct ieee80211_vif *vif)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- if (WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION)) {
- /* non-managed type inferfaces */
+ if (WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION))
return 0;
- }
- return -ewma_beacon_signal_read(&ifmgd->ave_beacon_signal);
+
+ return -ewma_beacon_signal_read(&sdata->deflink.u.mgd.ave_beacon_signal);
}
EXPORT_SYMBOL_GPL(ieee80211_ave_rssi);
@@ -3976,11 +3980,11 @@ void ieee80211_dfs_cac_cancel(struct ieee80211_local *local)
* by the time it gets it, sdata->wdev.cac_started
* will no longer be true
*/
- cancel_delayed_work(&sdata->dfs_cac_timer_work);
+ cancel_delayed_work(&sdata->deflink.dfs_cac_timer_work);
if (sdata->wdev.cac_started) {
chandef = sdata->vif.bss_conf.chandef;
- ieee80211_vif_release_channel(sdata);
+ ieee80211_link_release_channel(sdata->link[0]);
cfg80211_cac_event(sdata->dev,
&chandef,
NL80211_RADAR_CAC_ABORTED,
@@ -4074,7 +4078,7 @@ u32 ieee80211_chandef_downgrade(struct cfg80211_chan_def *c)
/* n_P20 */
tmp = (150 + c->chan->center_freq - c->center_freq1) / 20;
/* n_P160 */
- tmp /= 80;
+ tmp /= 8;
c->center_freq1 = c->center_freq1 - 80 + 160 * tmp;
c->width = NL80211_CHAN_WIDTH_160;
ret = IEEE80211_STA_DISABLE_320MHZ;
@@ -4215,74 +4219,6 @@ int ieee80211_send_action_csa(struct ieee80211_sub_if_data *sdata,
return 0;
}
-bool ieee80211_cs_valid(const struct ieee80211_cipher_scheme *cs)
-{
- return !(cs == NULL || cs->cipher == 0 ||
- cs->hdr_len < cs->pn_len + cs->pn_off ||
- cs->hdr_len <= cs->key_idx_off ||
- cs->key_idx_shift > 7 ||
- cs->key_idx_mask == 0);
-}
-
-bool ieee80211_cs_list_valid(const struct ieee80211_cipher_scheme *cs, int n)
-{
- int i;
-
- /* Ensure we have enough iftype bitmap space for all iftype values */
- WARN_ON((NUM_NL80211_IFTYPES / 8 + 1) > sizeof(cs[0].iftype));
-
- for (i = 0; i < n; i++)
- if (!ieee80211_cs_valid(&cs[i]))
- return false;
-
- return true;
-}
-
-const struct ieee80211_cipher_scheme *
-ieee80211_cs_get(struct ieee80211_local *local, u32 cipher,
- enum nl80211_iftype iftype)
-{
- const struct ieee80211_cipher_scheme *l = local->hw.cipher_schemes;
- int n = local->hw.n_cipher_schemes;
- int i;
- const struct ieee80211_cipher_scheme *cs = NULL;
-
- for (i = 0; i < n; i++) {
- if (l[i].cipher == cipher) {
- cs = &l[i];
- break;
- }
- }
-
- if (!cs || !(cs->iftype & BIT(iftype)))
- return NULL;
-
- return cs;
-}
-
-int ieee80211_cs_headroom(struct ieee80211_local *local,
- struct cfg80211_crypto_settings *crypto,
- enum nl80211_iftype iftype)
-{
- const struct ieee80211_cipher_scheme *cs;
- int headroom = IEEE80211_ENCRYPT_HEADROOM;
- int i;
-
- for (i = 0; i < crypto->n_ciphers_pairwise; i++) {
- cs = ieee80211_cs_get(local, crypto->ciphers_pairwise[i],
- iftype);
-
- if (cs && headroom < cs->hdr_len)
- headroom = cs->hdr_len;
- }
-
- cs = ieee80211_cs_get(local, crypto->cipher_group, iftype);
- if (cs && headroom < cs->hdr_len)
- headroom = cs->hdr_len;
-
- return headroom;
-}
-
static bool
ieee80211_extend_noa_desc(struct ieee80211_noa_data *data, u32 tsf, int i)
{
@@ -4474,7 +4410,7 @@ void ieee80211_recalc_dtim(struct ieee80211_local *local,
static u8 ieee80211_chanctx_radar_detect(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx)
{
- struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_link_data *link;
u8 radar_detect = 0;
lockdep_assert_held(&local->chanctx_mtx);
@@ -4482,20 +4418,26 @@ static u8 ieee80211_chanctx_radar_detect(struct ieee80211_local *local,
if (WARN_ON(ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED))
return 0;
- list_for_each_entry(sdata, &ctx->reserved_vifs, reserved_chanctx_list)
- if (sdata->reserved_radar_required)
- radar_detect |= BIT(sdata->reserved_chandef.width);
+ list_for_each_entry(link, &ctx->reserved_links, reserved_chanctx_list)
+ if (link->reserved_radar_required)
+ radar_detect |= BIT(link->reserved_chandef.width);
/*
* An in-place reservation context should not have any assigned vifs
* until it replaces the other context.
*/
WARN_ON(ctx->replace_state == IEEE80211_CHANCTX_REPLACES_OTHER &&
- !list_empty(&ctx->assigned_vifs));
+ !list_empty(&ctx->assigned_links));
+
+ list_for_each_entry(link, &ctx->assigned_links, assigned_chanctx_list) {
+ struct ieee80211_sub_if_data *sdata = link->sdata;
- list_for_each_entry(sdata, &ctx->assigned_vifs, assigned_chanctx_list)
- if (sdata->radar_required)
- radar_detect |= BIT(sdata->vif.bss_conf.chandef.width);
+ if (!link->radar_required)
+ continue;
+
+ radar_detect |=
+ BIT(sdata->vif.link_conf[link->link_id]->chandef.width);
+ }
return radar_detect;
}
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index ff26e0c4787b..fa14627b499a 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -4,7 +4,7 @@
*
* Portions of this file
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2021 Intel Corporation
+ * Copyright (C) 2018 - 2022 Intel Corporation
*/
#include <linux/ieee80211.h>
@@ -116,16 +116,16 @@ void
ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
struct ieee80211_supported_band *sband,
const struct ieee80211_vht_cap *vht_cap_ie,
- struct sta_info *sta)
+ struct link_sta_info *link_sta)
{
- struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.deflink.vht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &link_sta->pub->vht_cap;
struct ieee80211_sta_vht_cap own_cap;
u32 cap_info, i;
bool have_80mhz;
memset(vht_cap, 0, sizeof(*vht_cap));
- if (!sta->sta.deflink.ht_cap.ht_supported)
+ if (!link_sta->pub->ht_cap.ht_supported)
return;
if (!vht_cap_ie || !sband->vht_cap.vht_supported)
@@ -162,7 +162,7 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
* our own capabilities and then use those below.
*/
if (sdata->vif.type == NL80211_IFTYPE_STATION &&
- !test_sta_flag(sta, WLAN_STA_TDLS_PEER))
+ !test_sta_flag(link_sta->sta, WLAN_STA_TDLS_PEER))
ieee80211_apply_vhtcap_overrides(sdata, &own_cap);
/* take some capabilities as-is */
@@ -286,8 +286,9 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
*/
if (vht_cap->vht_mcs.rx_mcs_map == cpu_to_le16(0xFFFF)) {
vht_cap->vht_supported = false;
- sdata_info(sdata, "Ignoring VHT IE from %pM due to invalid rx_mcs_map\n",
- sta->addr);
+ sdata_info(sdata,
+ "Ignoring VHT IE from %pM (link:%pM) due to invalid rx_mcs_map\n",
+ link_sta->sta->addr, link_sta->addr);
return;
}
@@ -295,10 +296,10 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
switch (vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ:
- sta->deflink.cur_max_bandwidth = IEEE80211_STA_RX_BW_160;
+ link_sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_160;
break;
default:
- sta->deflink.cur_max_bandwidth = IEEE80211_STA_RX_BW_80;
+ link_sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_80;
if (!(vht_cap->vht_mcs.tx_highest &
cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE)))
@@ -310,39 +311,47 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
* above) between 160 and 80+80 yet.
*/
if (cap_info & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK)
- sta->deflink.cur_max_bandwidth = IEEE80211_STA_RX_BW_160;
+ link_sta->cur_max_bandwidth =
+ IEEE80211_STA_RX_BW_160;
}
- sta->sta.deflink.bandwidth = ieee80211_sta_cur_vht_bw(sta);
+ link_sta->pub->bandwidth = ieee80211_sta_cur_vht_bw(link_sta);
+ /*
+ * FIXME - should the amsdu len be per link? store per link
+ * and maintain a minimum?
+ */
switch (vht_cap->cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK) {
case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454:
- sta->sta.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_11454;
+ link_sta->sta->sta.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_11454;
break;
case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991:
- sta->sta.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_7991;
+ link_sta->sta->sta.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_7991;
break;
case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895:
default:
- sta->sta.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_3895;
+ link_sta->sta->sta.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_3895;
break;
}
}
/* FIXME: move this to some better location - parses HE/EHT now */
-enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta)
+enum ieee80211_sta_rx_bandwidth
+ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta)
{
- struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.deflink.vht_cap;
- struct ieee80211_sta_he_cap *he_cap = &sta->sta.deflink.he_cap;
- struct ieee80211_sta_eht_cap *eht_cap = &sta->sta.deflink.eht_cap;
+ unsigned int link_id = link_sta->link_id;
+ struct ieee80211_sub_if_data *sdata = link_sta->sta->sdata;
+ struct ieee80211_bss_conf *link_conf = sdata->vif.link_conf[link_id];
+ struct ieee80211_sta_vht_cap *vht_cap = &link_sta->pub->vht_cap;
+ struct ieee80211_sta_he_cap *he_cap = &link_sta->pub->he_cap;
+ struct ieee80211_sta_eht_cap *eht_cap = &link_sta->pub->eht_cap;
u32 cap_width;
if (he_cap->has_he) {
u8 info;
if (eht_cap->has_eht &&
- sta->sdata->vif.bss_conf.chandef.chan->band ==
- NL80211_BAND_6GHZ) {
+ link_conf->chandef.chan->band == NL80211_BAND_6GHZ) {
info = eht_cap->eht_cap_elem.phy_cap_info[0];
if (info & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ)
@@ -351,8 +360,7 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta)
info = he_cap->he_cap_elem.phy_cap_info[0];
- if (sta->sdata->vif.bss_conf.chandef.chan->band ==
- NL80211_BAND_2GHZ) {
+ if (link_conf->chandef.chan->band == NL80211_BAND_2GHZ) {
if (info & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G)
return IEEE80211_STA_RX_BW_40;
else
@@ -369,7 +377,7 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta)
}
if (!vht_cap->vht_supported)
- return sta->sta.deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
+ return link_sta->pub->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
IEEE80211_STA_RX_BW_40 :
IEEE80211_STA_RX_BW_20;
@@ -390,16 +398,17 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta)
return IEEE80211_STA_RX_BW_80;
}
-enum nl80211_chan_width ieee80211_sta_cap_chan_bw(struct sta_info *sta)
+enum nl80211_chan_width
+ieee80211_sta_cap_chan_bw(struct link_sta_info *link_sta)
{
- struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.deflink.vht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &link_sta->pub->vht_cap;
u32 cap_width;
if (!vht_cap->vht_supported) {
- if (!sta->sta.deflink.ht_cap.ht_supported)
+ if (!link_sta->pub->ht_cap.ht_supported)
return NL80211_CHAN_WIDTH_20_NOHT;
- return sta->sta.deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
+ return link_sta->pub->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
NL80211_CHAN_WIDTH_40 : NL80211_CHAN_WIDTH_20;
}
@@ -414,15 +423,17 @@ enum nl80211_chan_width ieee80211_sta_cap_chan_bw(struct sta_info *sta)
}
enum nl80211_chan_width
-ieee80211_sta_rx_bw_to_chan_width(struct sta_info *sta)
+ieee80211_sta_rx_bw_to_chan_width(struct link_sta_info *link_sta)
{
- enum ieee80211_sta_rx_bandwidth cur_bw = sta->sta.deflink.bandwidth;
- struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.deflink.vht_cap;
+ enum ieee80211_sta_rx_bandwidth cur_bw =
+ link_sta->pub->bandwidth;
+ struct ieee80211_sta_vht_cap *vht_cap =
+ &link_sta->pub->vht_cap;
u32 cap_width;
switch (cur_bw) {
case IEEE80211_STA_RX_BW_20:
- if (!sta->sta.deflink.ht_cap.ht_supported)
+ if (!link_sta->pub->ht_cap.ht_supported)
return NL80211_CHAN_WIDTH_20_NOHT;
else
return NL80211_CHAN_WIDTH_20;
@@ -466,14 +477,17 @@ ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width)
}
/* FIXME: rename/move - this deals with everything not just VHT */
-enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta)
+enum ieee80211_sta_rx_bandwidth
+ieee80211_sta_cur_vht_bw(struct link_sta_info *link_sta)
{
- struct ieee80211_sub_if_data *sdata = sta->sdata;
+ struct sta_info *sta = link_sta->sta;
+ struct ieee80211_bss_conf *link_conf =
+ sta->sdata->vif.link_conf[link_sta->link_id];
+ enum nl80211_chan_width bss_width = link_conf->chandef.width;
enum ieee80211_sta_rx_bandwidth bw;
- enum nl80211_chan_width bss_width = sdata->vif.bss_conf.chandef.width;
- bw = ieee80211_sta_cap_rx_bw(sta);
- bw = min(bw, sta->deflink.cur_max_bandwidth);
+ bw = ieee80211_sta_cap_rx_bw(link_sta);
+ bw = min(bw, link_sta->cur_max_bandwidth);
/* Don't consider AP's bandwidth for TDLS peers, section 11.23.1 of
* IEEE80211-2016 specification makes higher bandwidth operation
@@ -495,18 +509,18 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta)
return bw;
}
-void ieee80211_sta_set_rx_nss(struct sta_info *sta)
+void ieee80211_sta_set_rx_nss(struct link_sta_info *link_sta)
{
u8 ht_rx_nss = 0, vht_rx_nss = 0, he_rx_nss = 0, eht_rx_nss = 0, rx_nss;
bool support_160;
/* if we received a notification already don't overwrite it */
- if (sta->sta.deflink.rx_nss)
+ if (link_sta->pub->rx_nss)
return;
- if (sta->sta.deflink.eht_cap.has_eht) {
+ if (link_sta->pub->eht_cap.has_eht) {
int i;
- const u8 *rx_nss_mcs = (void *)&sta->sta.deflink.eht_cap.eht_mcs_nss_supp;
+ const u8 *rx_nss_mcs = (void *)&link_sta->pub->eht_cap.eht_mcs_nss_supp;
/* get the max nss for EHT over all possible bandwidths and mcs */
for (i = 0; i < sizeof(struct ieee80211_eht_mcs_nss_supp); i++)
@@ -515,10 +529,10 @@ void ieee80211_sta_set_rx_nss(struct sta_info *sta)
IEEE80211_EHT_MCS_NSS_RX));
}
- if (sta->sta.deflink.he_cap.has_he) {
+ if (link_sta->pub->he_cap.has_he) {
int i;
u8 rx_mcs_80 = 0, rx_mcs_160 = 0;
- const struct ieee80211_sta_he_cap *he_cap = &sta->sta.deflink.he_cap;
+ const struct ieee80211_sta_he_cap *he_cap = &link_sta->pub->he_cap;
u16 mcs_160_map =
le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
u16 mcs_80_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
@@ -549,23 +563,23 @@ void ieee80211_sta_set_rx_nss(struct sta_info *sta)
he_rx_nss = rx_mcs_80;
}
- if (sta->sta.deflink.ht_cap.ht_supported) {
- if (sta->sta.deflink.ht_cap.mcs.rx_mask[0])
+ if (link_sta->pub->ht_cap.ht_supported) {
+ if (link_sta->pub->ht_cap.mcs.rx_mask[0])
ht_rx_nss++;
- if (sta->sta.deflink.ht_cap.mcs.rx_mask[1])
+ if (link_sta->pub->ht_cap.mcs.rx_mask[1])
ht_rx_nss++;
- if (sta->sta.deflink.ht_cap.mcs.rx_mask[2])
+ if (link_sta->pub->ht_cap.mcs.rx_mask[2])
ht_rx_nss++;
- if (sta->sta.deflink.ht_cap.mcs.rx_mask[3])
+ if (link_sta->pub->ht_cap.mcs.rx_mask[3])
ht_rx_nss++;
/* FIXME: consider rx_highest? */
}
- if (sta->sta.deflink.vht_cap.vht_supported) {
+ if (link_sta->pub->vht_cap.vht_supported) {
int i;
u16 rx_mcs_map;
- rx_mcs_map = le16_to_cpu(sta->sta.deflink.vht_cap.vht_mcs.rx_mcs_map);
+ rx_mcs_map = le16_to_cpu(link_sta->pub->vht_cap.vht_mcs.rx_mcs_map);
for (i = 7; i >= 0; i--) {
u8 mcs = (rx_mcs_map >> (2 * i)) & 3;
@@ -581,12 +595,12 @@ void ieee80211_sta_set_rx_nss(struct sta_info *sta)
rx_nss = max(vht_rx_nss, ht_rx_nss);
rx_nss = max(he_rx_nss, rx_nss);
rx_nss = max(eht_rx_nss, rx_nss);
- sta->sta.deflink.rx_nss = max_t(u8, 1, rx_nss);
+ link_sta->pub->rx_nss = max_t(u8, 1, rx_nss);
}
u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
- struct sta_info *sta, u8 opmode,
- enum nl80211_band band)
+ struct link_sta_info *link_sta,
+ u8 opmode, enum nl80211_band band)
{
enum ieee80211_sta_rx_bandwidth new_bw;
struct sta_opmode_info sta_opmode = {};
@@ -601,8 +615,8 @@ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
nss >>= IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT;
nss += 1;
- if (sta->sta.deflink.rx_nss != nss) {
- sta->sta.deflink.rx_nss = nss;
+ if (link_sta->pub->rx_nss != nss) {
+ link_sta->pub->rx_nss = nss;
sta_opmode.rx_nss = nss;
changed |= IEEE80211_RC_NSS_CHANGED;
sta_opmode.changed |= STA_OPMODE_N_SS_CHANGED;
@@ -611,88 +625,91 @@ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
switch (opmode & IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK) {
case IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ:
/* ignore IEEE80211_OPMODE_NOTIF_BW_160_80P80 must not be set */
- sta->deflink.cur_max_bandwidth = IEEE80211_STA_RX_BW_20;
+ link_sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_20;
break;
case IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ:
/* ignore IEEE80211_OPMODE_NOTIF_BW_160_80P80 must not be set */
- sta->deflink.cur_max_bandwidth = IEEE80211_STA_RX_BW_40;
+ link_sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_40;
break;
case IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ:
if (opmode & IEEE80211_OPMODE_NOTIF_BW_160_80P80)
- sta->deflink.cur_max_bandwidth = IEEE80211_STA_RX_BW_160;
+ link_sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_160;
else
- sta->deflink.cur_max_bandwidth = IEEE80211_STA_RX_BW_80;
+ link_sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_80;
break;
case IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ:
/* legacy only, no longer used by newer spec */
- sta->deflink.cur_max_bandwidth = IEEE80211_STA_RX_BW_160;
+ link_sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_160;
break;
}
- new_bw = ieee80211_sta_cur_vht_bw(sta);
- if (new_bw != sta->sta.deflink.bandwidth) {
- sta->sta.deflink.bandwidth = new_bw;
- sta_opmode.bw = ieee80211_sta_rx_bw_to_chan_width(sta);
+ new_bw = ieee80211_sta_cur_vht_bw(link_sta);
+ if (new_bw != link_sta->pub->bandwidth) {
+ link_sta->pub->bandwidth = new_bw;
+ sta_opmode.bw = ieee80211_sta_rx_bw_to_chan_width(link_sta);
changed |= IEEE80211_RC_BW_CHANGED;
sta_opmode.changed |= STA_OPMODE_MAX_BW_CHANGED;
}
if (sta_opmode.changed)
- cfg80211_sta_opmode_change_notify(sdata->dev, sta->addr,
+ cfg80211_sta_opmode_change_notify(sdata->dev, link_sta->addr,
&sta_opmode, GFP_KERNEL);
return changed;
}
void ieee80211_process_mu_groups(struct ieee80211_sub_if_data *sdata,
+ unsigned int link_id,
struct ieee80211_mgmt *mgmt)
{
- struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
+ struct ieee80211_bss_conf *link_conf = sdata->vif.link_conf[link_id];
- if (!sdata->vif.mu_mimo_owner)
+ if (!link_conf->mu_mimo_owner)
return;
if (!memcmp(mgmt->u.action.u.vht_group_notif.position,
- bss_conf->mu_group.position, WLAN_USER_POSITION_LEN) &&
+ link_conf->mu_group.position, WLAN_USER_POSITION_LEN) &&
!memcmp(mgmt->u.action.u.vht_group_notif.membership,
- bss_conf->mu_group.membership, WLAN_MEMBERSHIP_LEN))
+ link_conf->mu_group.membership, WLAN_MEMBERSHIP_LEN))
return;
- memcpy(bss_conf->mu_group.membership,
+ memcpy(link_conf->mu_group.membership,
mgmt->u.action.u.vht_group_notif.membership,
WLAN_MEMBERSHIP_LEN);
- memcpy(bss_conf->mu_group.position,
+ memcpy(link_conf->mu_group.position,
mgmt->u.action.u.vht_group_notif.position,
WLAN_USER_POSITION_LEN);
- ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_MU_GROUPS);
+ ieee80211_link_info_change_notify(sdata, link_id, BSS_CHANGED_MU_GROUPS);
}
-void ieee80211_update_mu_groups(struct ieee80211_vif *vif,
+void ieee80211_update_mu_groups(struct ieee80211_vif *vif, unsigned int link_id,
const u8 *membership, const u8 *position)
{
- struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ struct ieee80211_bss_conf *link_conf = vif->link_conf[link_id];
- if (WARN_ON_ONCE(!vif->mu_mimo_owner))
+ if (WARN_ON_ONCE(!link_conf->mu_mimo_owner))
return;
- memcpy(bss_conf->mu_group.membership, membership, WLAN_MEMBERSHIP_LEN);
- memcpy(bss_conf->mu_group.position, position, WLAN_USER_POSITION_LEN);
+ memcpy(link_conf->mu_group.membership, membership, WLAN_MEMBERSHIP_LEN);
+ memcpy(link_conf->mu_group.position, position, WLAN_USER_POSITION_LEN);
}
EXPORT_SYMBOL_GPL(ieee80211_update_mu_groups);
void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
- struct sta_info *sta, u8 opmode,
- enum nl80211_band band)
+ struct link_sta_info *link_sta,
+ u8 opmode, enum nl80211_band band)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
- u32 changed = __ieee80211_vht_handle_opmode(sdata, sta, opmode, band);
+ u32 changed = __ieee80211_vht_handle_opmode(sdata, link_sta,
+ opmode, band);
if (changed > 0) {
ieee80211_recalc_min_chandef(sdata);
- rate_control_rate_update(local, sband, sta, changed);
+ rate_control_rate_update(local, sband, link_sta->sta,
+ link_sta->link_id, changed);
}
}
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index d50480b31750..ecc1de2e68a5 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -2,6 +2,7 @@
/*
* Copyright 2004, Instant802 Networks, Inc.
* Copyright 2013-2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2022 Intel Corporation
*/
#include <linux/netdevice.h>
@@ -210,7 +211,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
if (sta)
break;
- ra = sdata->u.mgd.bssid;
+ ra = sdata->deflink.u.mgd.bssid;
break;
case NL80211_IFTYPE_ADHOC:
ra = skb->data;
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 5fd8a3e8b5b4..93ec2f349748 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -3,7 +3,7 @@
* Copyright 2002-2004, Instant802 Networks, Inc.
* Copyright 2008, Jouni Malinen <j@w1.fi>
* Copyright (C) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2020-2021 Intel Corporation
+ * Copyright (C) 2020-2022 Intel Corporation
*/
#include <linux/netdevice.h>
@@ -778,102 +778,6 @@ ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx)
return RX_CONTINUE;
}
-static ieee80211_tx_result
-ieee80211_crypto_cs_encrypt(struct ieee80211_tx_data *tx,
- struct sk_buff *skb)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct ieee80211_key *key = tx->key;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- int hdrlen;
- u8 *pos, iv_len = key->conf.iv_len;
-
- if (info->control.hw_key &&
- !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) {
- /* hwaccel has no need for preallocated head room */
- return TX_CONTINUE;
- }
-
- if (unlikely(skb_headroom(skb) < iv_len &&
- pskb_expand_head(skb, iv_len, 0, GFP_ATOMIC)))
- return TX_DROP;
-
- hdrlen = ieee80211_hdrlen(hdr->frame_control);
-
- pos = skb_push(skb, iv_len);
- memmove(pos, pos + iv_len, hdrlen);
-
- return TX_CONTINUE;
-}
-
-static inline int ieee80211_crypto_cs_pn_compare(u8 *pn1, u8 *pn2, int len)
-{
- int i;
-
- /* pn is little endian */
- for (i = len - 1; i >= 0; i--) {
- if (pn1[i] < pn2[i])
- return -1;
- else if (pn1[i] > pn2[i])
- return 1;
- }
-
- return 0;
-}
-
-static ieee80211_rx_result
-ieee80211_crypto_cs_decrypt(struct ieee80211_rx_data *rx)
-{
- struct ieee80211_key *key = rx->key;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
- const struct ieee80211_cipher_scheme *cs = NULL;
- int hdrlen = ieee80211_hdrlen(hdr->frame_control);
- struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
- int data_len;
- u8 *rx_pn;
- u8 *skb_pn;
- u8 qos_tid;
-
- if (!rx->sta || !rx->sta->cipher_scheme ||
- !(status->flag & RX_FLAG_DECRYPTED))
- return RX_DROP_UNUSABLE;
-
- if (!ieee80211_is_data(hdr->frame_control))
- return RX_CONTINUE;
-
- cs = rx->sta->cipher_scheme;
-
- data_len = rx->skb->len - hdrlen - cs->hdr_len;
-
- if (data_len < 0)
- return RX_DROP_UNUSABLE;
-
- if (ieee80211_is_data_qos(hdr->frame_control))
- qos_tid = ieee80211_get_tid(hdr);
- else
- qos_tid = 0;
-
- if (skb_linearize(rx->skb))
- return RX_DROP_UNUSABLE;
-
- rx_pn = key->u.gen.rx_pn[qos_tid];
- skb_pn = rx->skb->data + hdrlen + cs->pn_off;
-
- if (ieee80211_crypto_cs_pn_compare(skb_pn, rx_pn, cs->pn_len) <= 0)
- return RX_DROP_UNUSABLE;
-
- memcpy(rx_pn, skb_pn, cs->pn_len);
-
- /* remove security header and MIC */
- if (pskb_trim(rx->skb, rx->skb->len - cs->mic_len))
- return RX_DROP_UNUSABLE;
-
- memmove(rx->skb->data + cs->hdr_len, rx->skb->data, hdrlen);
- skb_pull(rx->skb, cs->hdr_len);
-
- return RX_CONTINUE;
-}
-
static void bip_aad(struct sk_buff *skb, u8 *aad)
{
__le16 mask_fc;
@@ -1212,38 +1116,3 @@ ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx)
return RX_CONTINUE;
}
-
-ieee80211_tx_result
-ieee80211_crypto_hw_encrypt(struct ieee80211_tx_data *tx)
-{
- struct sk_buff *skb;
- struct ieee80211_tx_info *info = NULL;
- ieee80211_tx_result res;
-
- skb_queue_walk(&tx->skbs, skb) {
- info = IEEE80211_SKB_CB(skb);
-
- /* handle hw-only algorithm */
- if (!info->control.hw_key)
- return TX_DROP;
-
- if (tx->key->flags & KEY_FLAG_CIPHER_SCHEME) {
- res = ieee80211_crypto_cs_encrypt(tx, skb);
- if (res != TX_CONTINUE)
- return res;
- }
- }
-
- ieee80211_tx_set_protected(tx);
-
- return TX_CONTINUE;
-}
-
-ieee80211_rx_result
-ieee80211_crypto_hw_decrypt(struct ieee80211_rx_data *rx)
-{
- if (rx->sta && rx->sta->cipher_scheme)
- return ieee80211_crypto_cs_decrypt(rx);
-
- return RX_DROP_UNUSABLE;
-}
diff --git a/net/mac80211/wpa.h b/net/mac80211/wpa.h
index af3272284e85..a9a81abb5479 100644
--- a/net/mac80211/wpa.h
+++ b/net/mac80211/wpa.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2002-2004, Instant802 Networks, Inc.
+ * Copyright (C) 2022 Intel Corporation
*/
#ifndef WPA_H
@@ -39,10 +40,6 @@ ieee80211_tx_result
ieee80211_crypto_aes_gmac_encrypt(struct ieee80211_tx_data *tx);
ieee80211_rx_result
ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx);
-ieee80211_tx_result
-ieee80211_crypto_hw_encrypt(struct ieee80211_tx_data *tx);
-ieee80211_rx_result
-ieee80211_crypto_hw_decrypt(struct ieee80211_rx_data *rx);
ieee80211_tx_result
ieee80211_crypto_gcmp_encrypt(struct ieee80211_tx_data *tx);
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index 7c7395b58944..291b5da42fdb 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -413,7 +413,7 @@ static bool lookup_address_in_vec(const struct mptcp_addr_info *addrs, unsigned
int i;
for (i = 0; i < nr; i++) {
- if (mptcp_addresses_equal(&addrs[i], addr, addr->port))
+ if (addrs[i].id == addr->id)
return true;
}
@@ -449,7 +449,8 @@ static unsigned int fill_remote_addresses_vec(struct mptcp_sock *msk, bool fullm
mptcp_for_each_subflow(msk, subflow) {
ssk = mptcp_subflow_tcp_sock(subflow);
remote_address((struct sock_common *)ssk, &addrs[i]);
- if (deny_id0 && mptcp_addresses_equal(&addrs[i], &remote, false))
+ addrs[i].id = subflow->remote_id;
+ if (deny_id0 && !addrs[i].id)
continue;
if (!lookup_address_in_vec(addrs, i, &addrs[i]) &&
@@ -463,6 +464,37 @@ static unsigned int fill_remote_addresses_vec(struct mptcp_sock *msk, bool fullm
return i;
}
+static void __mptcp_pm_send_ack(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow,
+ bool prio, bool backup)
+{
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ bool slow;
+
+ pr_debug("send ack for %s",
+ prio ? "mp_prio" : (mptcp_pm_should_add_signal(msk) ? "add_addr" : "rm_addr"));
+
+ slow = lock_sock_fast(ssk);
+ if (prio) {
+ if (subflow->backup != backup)
+ msk->last_snd = NULL;
+
+ subflow->send_mp_prio = 1;
+ subflow->backup = backup;
+ subflow->request_bkup = backup;
+ }
+
+ __mptcp_subflow_send_ack(ssk);
+ unlock_sock_fast(ssk, slow);
+}
+
+static void mptcp_pm_send_ack(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow,
+ bool prio, bool backup)
+{
+ spin_unlock_bh(&msk->pm.lock);
+ __mptcp_pm_send_ack(msk, subflow, prio, backup);
+ spin_lock_bh(&msk->pm.lock);
+}
+
static struct mptcp_pm_addr_entry *
__lookup_addr_by_id(struct pm_nl_pernet *pernet, unsigned int id)
{
@@ -482,30 +514,14 @@ __lookup_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *info,
struct mptcp_pm_addr_entry *entry;
list_for_each_entry(entry, &pernet->local_addr_list, list) {
- if ((!lookup_by_id && mptcp_addresses_equal(&entry->addr, info, true)) ||
+ if ((!lookup_by_id &&
+ mptcp_addresses_equal(&entry->addr, info, entry->addr.port)) ||
(lookup_by_id && entry->addr.id == info->id))
return entry;
}
return NULL;
}
-static int
-lookup_id_by_addr(const struct pm_nl_pernet *pernet, const struct mptcp_addr_info *addr)
-{
- const struct mptcp_pm_addr_entry *entry;
- int ret = -1;
-
- rcu_read_lock();
- list_for_each_entry(entry, &pernet->local_addr_list, list) {
- if (mptcp_addresses_equal(&entry->addr, addr, entry->addr.port)) {
- ret = entry->addr.id;
- break;
- }
- }
- rcu_read_unlock();
- return ret;
-}
-
static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
{
struct sock *sk = (struct sock *)msk;
@@ -523,13 +539,23 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
/* do lazy endpoint usage accounting for the MPC subflows */
if (unlikely(!(msk->pm.status & BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED))) && msk->first) {
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(msk->first);
+ struct mptcp_pm_addr_entry *entry;
struct mptcp_addr_info mpc_addr;
- int mpc_id;
+ bool backup = false;
local_address((struct sock_common *)msk->first, &mpc_addr);
- mpc_id = lookup_id_by_addr(pernet, &mpc_addr);
- if (mpc_id >= 0)
- __clear_bit(mpc_id, msk->pm.id_avail_bitmap);
+ rcu_read_lock();
+ entry = __lookup_addr(pernet, &mpc_addr, false);
+ if (entry) {
+ __clear_bit(entry->addr.id, msk->pm.id_avail_bitmap);
+ msk->mpc_endpoint_id = entry->addr.id;
+ backup = !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP);
+ }
+ rcu_read_unlock();
+
+ if (backup)
+ mptcp_pm_send_ack(msk, subflow, true, backup);
msk->pm.status |= BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED);
}
@@ -705,16 +731,8 @@ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
return;
subflow = list_first_entry_or_null(&msk->conn_list, typeof(*subflow), node);
- if (subflow) {
- struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
-
- spin_unlock_bh(&msk->pm.lock);
- pr_debug("send ack for %s",
- mptcp_pm_should_add_signal(msk) ? "add_addr" : "rm_addr");
-
- mptcp_subflow_send_ack(ssk);
- spin_lock_bh(&msk->pm.lock);
- }
+ if (subflow)
+ mptcp_pm_send_ack(msk, subflow, false, false);
}
int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
@@ -729,7 +747,6 @@ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
mptcp_for_each_subflow(msk, subflow) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
struct mptcp_addr_info local, remote;
- bool slow;
local_address((struct sock_common *)ssk, &local);
if (!mptcp_addresses_equal(&local, addr, addr->port))
@@ -741,23 +758,18 @@ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
continue;
}
- slow = lock_sock_fast(ssk);
- if (subflow->backup != bkup)
- msk->last_snd = NULL;
- subflow->backup = bkup;
- subflow->send_mp_prio = 1;
- subflow->request_bkup = bkup;
-
- pr_debug("send ack for mp_prio");
- __mptcp_subflow_send_ack(ssk);
- unlock_sock_fast(ssk, slow);
-
+ __mptcp_pm_send_ack(msk, subflow, true, bkup);
return 0;
}
return -EINVAL;
}
+static bool mptcp_local_id_match(const struct mptcp_sock *msk, u8 local_id, u8 id)
+{
+ return local_id == id || (!local_id && msk->mpc_endpoint_id == id);
+}
+
static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
const struct mptcp_rm_list *rm_list,
enum linux_mptcp_mib_field rm_type)
@@ -781,6 +793,7 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
return;
for (i = 0; i < rm_list->nr; i++) {
+ u8 rm_id = rm_list->ids[i];
bool removed = false;
list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
@@ -788,15 +801,15 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
int how = RCV_SHUTDOWN | SEND_SHUTDOWN;
u8 id = subflow->local_id;
- if (rm_type == MPTCP_MIB_RMADDR)
- id = subflow->remote_id;
-
- if (rm_list->ids[i] != id)
+ if (rm_type == MPTCP_MIB_RMADDR && subflow->remote_id != rm_id)
+ continue;
+ if (rm_type == MPTCP_MIB_RMSUBFLOW && !mptcp_local_id_match(msk, id, rm_id))
continue;
- pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u",
+ pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u mpc_id=%u",
rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow",
- i, rm_list->ids[i], subflow->local_id, subflow->remote_id);
+ i, rm_id, subflow->local_id, subflow->remote_id,
+ msk->mpc_endpoint_id);
spin_unlock_bh(&msk->pm.lock);
mptcp_subflow_shutdown(sk, ssk, how);
@@ -808,7 +821,7 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
__MPTCP_INC_STATS(sock_net(sk), rm_type);
}
if (rm_type == MPTCP_MIB_RMSUBFLOW)
- __set_bit(rm_list->ids[i], msk->pm.id_avail_bitmap);
+ __set_bit(rm_id ? rm_id : msk->mpc_endpoint_id, msk->pm.id_avail_bitmap);
if (!removed)
continue;
@@ -907,10 +920,11 @@ static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
/* do not insert duplicate address, differentiate on port only
* singled addresses
*/
+ if (!address_use_port(entry))
+ entry->addr.port = 0;
list_for_each_entry(cur, &pernet->local_addr_list, list) {
if (mptcp_addresses_equal(&cur->addr, &entry->addr,
- address_use_port(entry) &&
- address_use_port(cur))) {
+ cur->addr.port || entry->addr.port)) {
/* allow replacing the exiting endpoint only if such
* endpoint is an implicit one and the user-space
* did not provide an endpoint id
@@ -956,7 +970,10 @@ find_next:
}
pernet->addrs++;
- list_add_tail_rcu(&entry->list, &pernet->local_addr_list);
+ if (!entry->addr.port)
+ list_add_tail_rcu(&entry->list, &pernet->local_addr_list);
+ else
+ list_add_rcu(&entry->list, &pernet->local_addr_list);
ret = entry->addr.id;
out:
@@ -1134,7 +1151,7 @@ void mptcp_pm_nl_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ss
}
unlock_sock_fast(ssk, slow);
- /* always try to push the pending data regarless of re-injections:
+ /* always try to push the pending data regardless of re-injections:
* we can possibly use backup subflows now, and subflow selection
* is cheap under the msk socket lock
*/
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 21a3ed64226e..57f23f4e3a7c 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -167,8 +167,8 @@ static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to,
static void __mptcp_rmem_reclaim(struct sock *sk, int amount)
{
- amount >>= SK_MEM_QUANTUM_SHIFT;
- mptcp_sk(sk)->rmem_fwd_alloc -= amount << SK_MEM_QUANTUM_SHIFT;
+ amount >>= PAGE_SHIFT;
+ mptcp_sk(sk)->rmem_fwd_alloc -= amount << PAGE_SHIFT;
__sk_mem_reduce_allocated(sk, amount);
}
@@ -181,8 +181,8 @@ static void mptcp_rmem_uncharge(struct sock *sk, int size)
reclaimable = msk->rmem_fwd_alloc - sk_unused_reserved_mem(sk);
/* see sk_mem_uncharge() for the rationale behind the following schema */
- if (unlikely(reclaimable >= SK_RECLAIM_THRESHOLD))
- __mptcp_rmem_reclaim(sk, SK_RECLAIM_CHUNK);
+ if (unlikely(reclaimable >= PAGE_SIZE))
+ __mptcp_rmem_reclaim(sk, reclaimable);
}
static void mptcp_rfree(struct sk_buff *skb)
@@ -323,20 +323,16 @@ static bool mptcp_rmem_schedule(struct sock *sk, struct sock *ssk, int size)
struct mptcp_sock *msk = mptcp_sk(sk);
int amt, amount;
- if (size < msk->rmem_fwd_alloc)
+ if (size <= msk->rmem_fwd_alloc)
return true;
+ size -= msk->rmem_fwd_alloc;
amt = sk_mem_pages(size);
- amount = amt << SK_MEM_QUANTUM_SHIFT;
- msk->rmem_fwd_alloc += amount;
- if (!__sk_mem_raise_allocated(sk, size, amt, SK_MEM_RECV)) {
- if (ssk->sk_forward_alloc < amount) {
- msk->rmem_fwd_alloc -= amount;
- return false;
- }
+ amount = amt << PAGE_SHIFT;
+ if (!__sk_mem_raise_allocated(sk, size, amt, SK_MEM_RECV))
+ return false;
- ssk->sk_forward_alloc -= amount;
- }
+ msk->rmem_fwd_alloc += amount;
return true;
}
@@ -512,7 +508,7 @@ void __mptcp_subflow_send_ack(struct sock *ssk)
tcp_send_ack(ssk);
}
-void mptcp_subflow_send_ack(struct sock *ssk)
+static void mptcp_subflow_send_ack(struct sock *ssk)
{
bool slow;
@@ -971,25 +967,6 @@ static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk,
df->data_seq + df->data_len == msk->write_seq;
}
-static void __mptcp_mem_reclaim_partial(struct sock *sk)
-{
- int reclaimable = mptcp_sk(sk)->rmem_fwd_alloc - sk_unused_reserved_mem(sk);
-
- lockdep_assert_held_once(&sk->sk_lock.slock);
-
- if (reclaimable > SK_MEM_QUANTUM)
- __mptcp_rmem_reclaim(sk, reclaimable - 1);
-
- sk_mem_reclaim_partial(sk);
-}
-
-static void mptcp_mem_reclaim_partial(struct sock *sk)
-{
- mptcp_data_lock(sk);
- __mptcp_mem_reclaim_partial(sk);
- mptcp_data_unlock(sk);
-}
-
static void dfrag_uncharge(struct sock *sk, int len)
{
sk_mem_uncharge(sk, len);
@@ -1009,7 +986,6 @@ static void __mptcp_clean_una(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
struct mptcp_data_frag *dtmp, *dfrag;
- bool cleaned = false;
u64 snd_una;
/* on fallback we just need to ignore snd_una, as this is really
@@ -1032,7 +1008,6 @@ static void __mptcp_clean_una(struct sock *sk)
}
dfrag_clear(sk, dfrag);
- cleaned = true;
}
dfrag = mptcp_rtx_head(sk);
@@ -1054,7 +1029,6 @@ static void __mptcp_clean_una(struct sock *sk)
dfrag->already_sent -= delta;
dfrag_uncharge(sk, delta);
- cleaned = true;
}
/* all retransmitted data acked, recovery completed */
@@ -1062,9 +1036,6 @@ static void __mptcp_clean_una(struct sock *sk)
msk->recovery = false;
out:
- if (cleaned && tcp_under_memory_pressure(sk))
- __mptcp_mem_reclaim_partial(sk);
-
if (snd_una == READ_ONCE(msk->snd_nxt) &&
snd_una == READ_ONCE(msk->write_seq)) {
if (mptcp_timer_pending(sk) && !mptcp_data_fin_enabled(msk))
@@ -1216,12 +1187,6 @@ static struct sk_buff *mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, boo
{
gfp_t gfp = data_lock_held ? GFP_ATOMIC : sk->sk_allocation;
- if (unlikely(tcp_under_memory_pressure(sk))) {
- if (data_lock_held)
- __mptcp_mem_reclaim_partial(sk);
- else
- mptcp_mem_reclaim_partial(sk);
- }
return __mptcp_alloc_tx_skb(sk, ssk, gfp);
}
@@ -3464,7 +3429,10 @@ static struct proto mptcp_prot = {
.get_port = mptcp_get_port,
.forward_alloc_get = mptcp_forward_alloc_get,
.sockets_allocated = &mptcp_sockets_allocated,
+
.memory_allocated = &tcp_memory_allocated,
+ .per_cpu_fw_alloc = &tcp_memory_per_cpu_fw_alloc,
+
.memory_pressure = &tcp_memory_pressure,
.sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
.sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 480c5320b86e..5d6043c16b09 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -83,7 +83,6 @@
/* MPTCP MP_JOIN flags */
#define MPTCPOPT_BACKUP BIT(0)
-#define MPTCPOPT_HMAC_LEN 20
#define MPTCPOPT_THMAC_LEN 8
/* MPTCP MP_CAPABLE flags */
@@ -283,6 +282,7 @@ struct mptcp_sock {
bool use_64bit_ack; /* Set when we received a 64-bit DSN */
bool csum_enabled;
bool allow_infinite_fallback;
+ u8 mpc_endpoint_id;
u8 recvmsg_inq:1,
cork:1,
nodelay:1;
@@ -608,7 +608,6 @@ void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how);
void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
struct mptcp_subflow_context *subflow);
void __mptcp_subflow_send_ack(struct sock *ssk);
-void mptcp_subflow_send_ack(struct sock *ssk);
void mptcp_subflow_reset(struct sock *ssk);
void mptcp_subflow_queue_clean(struct sock *ssk);
void mptcp_sock_graft(struct sock *sk, struct socket *parent);
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 63e8892ec807..d4b16d033978 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -1634,7 +1634,7 @@ int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
/* the newly created socket really belongs to the owning MPTCP master
* socket, even if for additional subflows the allocation is performed
* by a kernel workqueue. Adjust inode references, so that the
- * procfs/diag interaces really show this one belonging to the correct
+ * procfs/diag interfaces really show this one belonging to the correct
* user.
*/
SOCK_INODE(sf)->i_ino = SOCK_INODE(sk->sk_socket)->i_ino;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index ddc54b6d18ee..df6abbfe0079 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -734,6 +734,15 @@ config NF_FLOW_TABLE
To compile it as a module, choose M here.
+config NF_FLOW_TABLE_PROCFS
+ bool "Supply flow table statistics in procfs"
+ default y
+ depends on PROC_FS
+ depends on SYSCTL
+ help
+ This option enables for the flow table offload statistics
+ to be shown in procfs under net/netfilter/nf_flowtable.
+
config NETFILTER_XTABLES
tristate "Netfilter Xtables support (required for ip_tables)"
default m if NETFILTER_ADVANCED=n
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 238b6a620e88..06df49ea6329 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -128,6 +128,7 @@ obj-$(CONFIG_NFT_FWD_NETDEV) += nft_fwd_netdev.o
obj-$(CONFIG_NF_FLOW_TABLE) += nf_flow_table.o
nf_flow_table-objs := nf_flow_table_core.o nf_flow_table_ip.o \
nf_flow_table_offload.o
+nf_flow_table-$(CONFIG_NF_FLOW_TABLE_PROCFS) += nf_flow_table_procfs.o
obj-$(CONFIG_NF_FLOW_TABLE_INET) += nf_flow_table_inet.o
diff --git a/net/netfilter/ipvs/ip_vs_mh.c b/net/netfilter/ipvs/ip_vs_mh.c
index da0280cec506..e3d7f5c879ce 100644
--- a/net/netfilter/ipvs/ip_vs_mh.c
+++ b/net/netfilter/ipvs/ip_vs_mh.c
@@ -174,8 +174,7 @@ static int ip_vs_mh_populate(struct ip_vs_mh_state *s,
return 0;
}
- table = kcalloc(BITS_TO_LONGS(IP_VS_MH_TAB_SIZE),
- sizeof(unsigned long), GFP_KERNEL);
+ table = bitmap_zalloc(IP_VS_MH_TAB_SIZE, GFP_KERNEL);
if (!table)
return -ENOMEM;
@@ -227,7 +226,7 @@ static int ip_vs_mh_populate(struct ip_vs_mh_state *s,
}
out:
- kfree(table);
+ bitmap_free(table);
return 0;
}
diff --git a/net/netfilter/nf_conntrack_broadcast.c b/net/netfilter/nf_conntrack_broadcast.c
index 1ba6becc3079..9fb9b8031298 100644
--- a/net/netfilter/nf_conntrack_broadcast.c
+++ b/net/netfilter/nf_conntrack_broadcast.c
@@ -20,6 +20,7 @@ int nf_conntrack_broadcast_help(struct sk_buff *skb,
enum ip_conntrack_info ctinfo,
unsigned int timeout)
{
+ const struct nf_conntrack_helper *helper;
struct nf_conntrack_expect *exp;
struct iphdr *iph = ip_hdr(skb);
struct rtable *rt = skb_rtable(skb);
@@ -58,7 +59,10 @@ int nf_conntrack_broadcast_help(struct sk_buff *skb,
goto out;
exp->tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
- exp->tuple.src.u.udp.port = help->helper->tuple.src.u.udp.port;
+
+ helper = rcu_dereference(help->helper);
+ if (helper)
+ exp->tuple.src.u.udp.port = helper->tuple.src.u.udp.port;
exp->mask.src.u3.ip = mask;
exp->mask.src.u.udp.port = htons(0xFFFF);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 369aeabb94fe..8c97d062b1ae 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -329,20 +329,18 @@ nf_ct_get_tuple(const struct sk_buff *skb,
return gre_pkt_to_tuple(skb, dataoff, net, tuple);
#endif
case IPPROTO_TCP:
- case IPPROTO_UDP: /* fallthrough */
- return nf_ct_get_tuple_ports(skb, dataoff, tuple);
+ case IPPROTO_UDP:
#ifdef CONFIG_NF_CT_PROTO_UDPLITE
case IPPROTO_UDPLITE:
- return nf_ct_get_tuple_ports(skb, dataoff, tuple);
#endif
#ifdef CONFIG_NF_CT_PROTO_SCTP
case IPPROTO_SCTP:
- return nf_ct_get_tuple_ports(skb, dataoff, tuple);
#endif
#ifdef CONFIG_NF_CT_PROTO_DCCP
case IPPROTO_DCCP:
- return nf_ct_get_tuple_ports(skb, dataoff, tuple);
#endif
+ /* fallthrough */
+ return nf_ct_get_tuple_ports(skb, dataoff, tuple);
default:
break;
}
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 2eb31ffb3d14..bb76305bb7ff 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -49,64 +49,8 @@ MODULE_PARM_DESC(callforward_filter, "only create call forwarding expectations "
"if both endpoints are on different sides "
"(determined by routing information)");
-/* Hooks for NAT */
-int (*set_h245_addr_hook) (struct sk_buff *skb, unsigned int protoff,
- unsigned char **data, int dataoff,
- H245_TransportAddress *taddr,
- union nf_inet_addr *addr, __be16 port)
- __read_mostly;
-int (*set_h225_addr_hook) (struct sk_buff *skb, unsigned int protoff,
- unsigned char **data, int dataoff,
- TransportAddress *taddr,
- union nf_inet_addr *addr, __be16 port)
- __read_mostly;
-int (*set_sig_addr_hook) (struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff, unsigned char **data,
- TransportAddress *taddr, int count) __read_mostly;
-int (*set_ras_addr_hook) (struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff, unsigned char **data,
- TransportAddress *taddr, int count) __read_mostly;
-int (*nat_rtp_rtcp_hook) (struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff,
- unsigned char **data, int dataoff,
- H245_TransportAddress *taddr,
- __be16 port, __be16 rtp_port,
- struct nf_conntrack_expect *rtp_exp,
- struct nf_conntrack_expect *rtcp_exp) __read_mostly;
-int (*nat_t120_hook) (struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff,
- unsigned char **data, int dataoff,
- H245_TransportAddress *taddr, __be16 port,
- struct nf_conntrack_expect *exp) __read_mostly;
-int (*nat_h245_hook) (struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff,
- unsigned char **data, int dataoff,
- TransportAddress *taddr, __be16 port,
- struct nf_conntrack_expect *exp) __read_mostly;
-int (*nat_callforwarding_hook) (struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff,
- unsigned char **data, int dataoff,
- TransportAddress *taddr, __be16 port,
- struct nf_conntrack_expect *exp) __read_mostly;
-int (*nat_q931_hook) (struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff,
- unsigned char **data, TransportAddress *taddr, int idx,
- __be16 port, struct nf_conntrack_expect *exp)
- __read_mostly;
+const struct nfct_h323_nat_hooks __rcu *nfct_h323_nat_hook __read_mostly;
+EXPORT_SYMBOL_GPL(nfct_h323_nat_hook);
static DEFINE_SPINLOCK(nf_h323_lock);
static char *h323_buffer;
@@ -259,6 +203,7 @@ static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
unsigned char **data, int dataoff,
H245_TransportAddress *taddr)
{
+ const struct nfct_h323_nat_hooks *nathook;
int dir = CTINFO2DIR(ctinfo);
int ret = 0;
__be16 port;
@@ -266,7 +211,6 @@ static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
union nf_inet_addr addr;
struct nf_conntrack_expect *rtp_exp;
struct nf_conntrack_expect *rtcp_exp;
- typeof(nat_rtp_rtcp_hook) nat_rtp_rtcp;
/* Read RTP or RTCP address */
if (!get_h245_addr(ct, *data, taddr, &addr, &port) ||
@@ -296,15 +240,16 @@ static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
&ct->tuplehash[!dir].tuple.dst.u3,
IPPROTO_UDP, NULL, &rtcp_port);
+ nathook = rcu_dereference(nfct_h323_nat_hook);
if (memcmp(&ct->tuplehash[dir].tuple.src.u3,
&ct->tuplehash[!dir].tuple.dst.u3,
sizeof(ct->tuplehash[dir].tuple.src.u3)) &&
- (nat_rtp_rtcp = rcu_dereference(nat_rtp_rtcp_hook)) &&
+ nathook &&
nf_ct_l3num(ct) == NFPROTO_IPV4 &&
ct->status & IPS_NAT_MASK) {
/* NAT needed */
- ret = nat_rtp_rtcp(skb, ct, ctinfo, protoff, data, dataoff,
- taddr, port, rtp_port, rtp_exp, rtcp_exp);
+ ret = nathook->nat_rtp_rtcp(skb, ct, ctinfo, protoff, data, dataoff,
+ taddr, port, rtp_port, rtp_exp, rtcp_exp);
} else { /* Conntrack only */
if (nf_ct_expect_related(rtp_exp, 0) == 0) {
if (nf_ct_expect_related(rtcp_exp, 0) == 0) {
@@ -333,12 +278,12 @@ static int expect_t120(struct sk_buff *skb,
unsigned char **data, int dataoff,
H245_TransportAddress *taddr)
{
+ const struct nfct_h323_nat_hooks *nathook;
int dir = CTINFO2DIR(ctinfo);
int ret = 0;
__be16 port;
union nf_inet_addr addr;
struct nf_conntrack_expect *exp;
- typeof(nat_t120_hook) nat_t120;
/* Read T.120 address */
if (!get_h245_addr(ct, *data, taddr, &addr, &port) ||
@@ -355,15 +300,16 @@ static int expect_t120(struct sk_buff *skb,
IPPROTO_TCP, NULL, &port);
exp->flags = NF_CT_EXPECT_PERMANENT; /* Accept multiple channels */
+ nathook = rcu_dereference(nfct_h323_nat_hook);
if (memcmp(&ct->tuplehash[dir].tuple.src.u3,
&ct->tuplehash[!dir].tuple.dst.u3,
sizeof(ct->tuplehash[dir].tuple.src.u3)) &&
- (nat_t120 = rcu_dereference(nat_t120_hook)) &&
+ nathook &&
nf_ct_l3num(ct) == NFPROTO_IPV4 &&
ct->status & IPS_NAT_MASK) {
/* NAT needed */
- ret = nat_t120(skb, ct, ctinfo, protoff, data, dataoff, taddr,
- port, exp);
+ ret = nathook->nat_t120(skb, ct, ctinfo, protoff, data,
+ dataoff, taddr, port, exp);
} else { /* Conntrack only */
if (nf_ct_expect_related(exp, 0) == 0) {
pr_debug("nf_ct_h323: expect T.120 ");
@@ -664,18 +610,19 @@ int get_h225_addr(struct nf_conn *ct, unsigned char *data,
return 1;
}
+EXPORT_SYMBOL_GPL(get_h225_addr);
static int expect_h245(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff, unsigned char **data, int dataoff,
TransportAddress *taddr)
{
+ const struct nfct_h323_nat_hooks *nathook;
int dir = CTINFO2DIR(ctinfo);
int ret = 0;
__be16 port;
union nf_inet_addr addr;
struct nf_conntrack_expect *exp;
- typeof(nat_h245_hook) nat_h245;
/* Read h245Address */
if (!get_h225_addr(ct, *data, taddr, &addr, &port) ||
@@ -692,15 +639,16 @@ static int expect_h245(struct sk_buff *skb, struct nf_conn *ct,
IPPROTO_TCP, NULL, &port);
exp->helper = &nf_conntrack_helper_h245;
+ nathook = rcu_dereference(nfct_h323_nat_hook);
if (memcmp(&ct->tuplehash[dir].tuple.src.u3,
&ct->tuplehash[!dir].tuple.dst.u3,
sizeof(ct->tuplehash[dir].tuple.src.u3)) &&
- (nat_h245 = rcu_dereference(nat_h245_hook)) &&
+ nathook &&
nf_ct_l3num(ct) == NFPROTO_IPV4 &&
ct->status & IPS_NAT_MASK) {
/* NAT needed */
- ret = nat_h245(skb, ct, ctinfo, protoff, data, dataoff, taddr,
- port, exp);
+ ret = nathook->nat_h245(skb, ct, ctinfo, protoff, data,
+ dataoff, taddr, port, exp);
} else { /* Conntrack only */
if (nf_ct_expect_related(exp, 0) == 0) {
pr_debug("nf_ct_q931: expect H.245 ");
@@ -785,13 +733,13 @@ static int expect_callforwarding(struct sk_buff *skb,
unsigned char **data, int dataoff,
TransportAddress *taddr)
{
+ const struct nfct_h323_nat_hooks *nathook;
int dir = CTINFO2DIR(ctinfo);
int ret = 0;
__be16 port;
union nf_inet_addr addr;
struct nf_conntrack_expect *exp;
struct net *net = nf_ct_net(ct);
- typeof(nat_callforwarding_hook) nat_callforwarding;
/* Read alternativeAddress */
if (!get_h225_addr(ct, *data, taddr, &addr, &port) || port == 0)
@@ -815,16 +763,17 @@ static int expect_callforwarding(struct sk_buff *skb,
IPPROTO_TCP, NULL, &port);
exp->helper = nf_conntrack_helper_q931;
+ nathook = rcu_dereference(nfct_h323_nat_hook);
if (memcmp(&ct->tuplehash[dir].tuple.src.u3,
&ct->tuplehash[!dir].tuple.dst.u3,
sizeof(ct->tuplehash[dir].tuple.src.u3)) &&
- (nat_callforwarding = rcu_dereference(nat_callforwarding_hook)) &&
+ nathook &&
nf_ct_l3num(ct) == NFPROTO_IPV4 &&
ct->status & IPS_NAT_MASK) {
/* Need NAT */
- ret = nat_callforwarding(skb, ct, ctinfo,
- protoff, data, dataoff,
- taddr, port, exp);
+ ret = nathook->nat_callforwarding(skb, ct, ctinfo,
+ protoff, data, dataoff,
+ taddr, port, exp);
} else { /* Conntrack only */
if (nf_ct_expect_related(exp, 0) == 0) {
pr_debug("nf_ct_q931: expect Call Forwarding ");
@@ -844,12 +793,12 @@ static int process_setup(struct sk_buff *skb, struct nf_conn *ct,
unsigned char **data, int dataoff,
Setup_UUIE *setup)
{
+ const struct nfct_h323_nat_hooks *nathook;
int dir = CTINFO2DIR(ctinfo);
int ret;
int i;
__be16 port;
union nf_inet_addr addr;
- typeof(set_h225_addr_hook) set_h225_addr;
pr_debug("nf_ct_q931: Setup\n");
@@ -860,9 +809,9 @@ static int process_setup(struct sk_buff *skb, struct nf_conn *ct,
return -1;
}
- set_h225_addr = rcu_dereference(set_h225_addr_hook);
+ nathook = rcu_dereference(nfct_h323_nat_hook);
if ((setup->options & eSetup_UUIE_destCallSignalAddress) &&
- (set_h225_addr) && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
+ nathook && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
ct->status & IPS_NAT_MASK &&
get_h225_addr(ct, *data, &setup->destCallSignalAddress,
&addr, &port) &&
@@ -870,16 +819,16 @@ static int process_setup(struct sk_buff *skb, struct nf_conn *ct,
pr_debug("nf_ct_q931: set destCallSignalAddress %pI6:%hu->%pI6:%hu\n",
&addr, ntohs(port), &ct->tuplehash[!dir].tuple.src.u3,
ntohs(ct->tuplehash[!dir].tuple.src.u.tcp.port));
- ret = set_h225_addr(skb, protoff, data, dataoff,
- &setup->destCallSignalAddress,
- &ct->tuplehash[!dir].tuple.src.u3,
- ct->tuplehash[!dir].tuple.src.u.tcp.port);
+ ret = nathook->set_h225_addr(skb, protoff, data, dataoff,
+ &setup->destCallSignalAddress,
+ &ct->tuplehash[!dir].tuple.src.u3,
+ ct->tuplehash[!dir].tuple.src.u.tcp.port);
if (ret < 0)
return -1;
}
if ((setup->options & eSetup_UUIE_sourceCallSignalAddress) &&
- (set_h225_addr) && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
+ nathook && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
ct->status & IPS_NAT_MASK &&
get_h225_addr(ct, *data, &setup->sourceCallSignalAddress,
&addr, &port) &&
@@ -887,10 +836,10 @@ static int process_setup(struct sk_buff *skb, struct nf_conn *ct,
pr_debug("nf_ct_q931: set sourceCallSignalAddress %pI6:%hu->%pI6:%hu\n",
&addr, ntohs(port), &ct->tuplehash[!dir].tuple.dst.u3,
ntohs(ct->tuplehash[!dir].tuple.dst.u.tcp.port));
- ret = set_h225_addr(skb, protoff, data, dataoff,
- &setup->sourceCallSignalAddress,
- &ct->tuplehash[!dir].tuple.dst.u3,
- ct->tuplehash[!dir].tuple.dst.u.tcp.port);
+ ret = nathook->set_h225_addr(skb, protoff, data, dataoff,
+ &setup->sourceCallSignalAddress,
+ &ct->tuplehash[!dir].tuple.dst.u3,
+ ct->tuplehash[!dir].tuple.dst.u.tcp.port);
if (ret < 0)
return -1;
}
@@ -1249,13 +1198,13 @@ static int expect_q931(struct sk_buff *skb, struct nf_conn *ct,
TransportAddress *taddr, int count)
{
struct nf_ct_h323_master *info = nfct_help_data(ct);
+ const struct nfct_h323_nat_hooks *nathook;
int dir = CTINFO2DIR(ctinfo);
int ret = 0;
int i;
__be16 port;
union nf_inet_addr addr;
struct nf_conntrack_expect *exp;
- typeof(nat_q931_hook) nat_q931;
/* Look for the first related address */
for (i = 0; i < count; i++) {
@@ -1279,11 +1228,11 @@ static int expect_q931(struct sk_buff *skb, struct nf_conn *ct,
exp->helper = nf_conntrack_helper_q931;
exp->flags = NF_CT_EXPECT_PERMANENT; /* Accept multiple calls */
- nat_q931 = rcu_dereference(nat_q931_hook);
- if (nat_q931 && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
+ nathook = rcu_dereference(nfct_h323_nat_hook);
+ if (nathook && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
ct->status & IPS_NAT_MASK) { /* Need NAT */
- ret = nat_q931(skb, ct, ctinfo, protoff, data,
- taddr, i, port, exp);
+ ret = nathook->nat_q931(skb, ct, ctinfo, protoff, data,
+ taddr, i, port, exp);
} else { /* Conntrack only */
if (nf_ct_expect_related(exp, 0) == 0) {
pr_debug("nf_ct_ras: expect Q.931 ");
@@ -1305,15 +1254,15 @@ static int process_grq(struct sk_buff *skb, struct nf_conn *ct,
unsigned int protoff,
unsigned char **data, GatekeeperRequest *grq)
{
- typeof(set_ras_addr_hook) set_ras_addr;
+ const struct nfct_h323_nat_hooks *nathook;
pr_debug("nf_ct_ras: GRQ\n");
- set_ras_addr = rcu_dereference(set_ras_addr_hook);
- if (set_ras_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
+ nathook = rcu_dereference(nfct_h323_nat_hook);
+ if (nathook && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
ct->status & IPS_NAT_MASK) /* NATed */
- return set_ras_addr(skb, ct, ctinfo, protoff, data,
- &grq->rasAddress, 1);
+ return nathook->set_ras_addr(skb, ct, ctinfo, protoff, data,
+ &grq->rasAddress, 1);
return 0;
}
@@ -1367,8 +1316,8 @@ static int process_rrq(struct sk_buff *skb, struct nf_conn *ct,
unsigned char **data, RegistrationRequest *rrq)
{
struct nf_ct_h323_master *info = nfct_help_data(ct);
+ const struct nfct_h323_nat_hooks *nathook;
int ret;
- typeof(set_ras_addr_hook) set_ras_addr;
pr_debug("nf_ct_ras: RRQ\n");
@@ -1378,12 +1327,12 @@ static int process_rrq(struct sk_buff *skb, struct nf_conn *ct,
if (ret < 0)
return -1;
- set_ras_addr = rcu_dereference(set_ras_addr_hook);
- if (set_ras_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
+ nathook = rcu_dereference(nfct_h323_nat_hook);
+ if (nathook && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
ct->status & IPS_NAT_MASK) {
- ret = set_ras_addr(skb, ct, ctinfo, protoff, data,
- rrq->rasAddress.item,
- rrq->rasAddress.count);
+ ret = nathook->set_ras_addr(skb, ct, ctinfo, protoff, data,
+ rrq->rasAddress.item,
+ rrq->rasAddress.count);
if (ret < 0)
return -1;
}
@@ -1403,19 +1352,19 @@ static int process_rcf(struct sk_buff *skb, struct nf_conn *ct,
unsigned char **data, RegistrationConfirm *rcf)
{
struct nf_ct_h323_master *info = nfct_help_data(ct);
+ const struct nfct_h323_nat_hooks *nathook;
int dir = CTINFO2DIR(ctinfo);
int ret;
struct nf_conntrack_expect *exp;
- typeof(set_sig_addr_hook) set_sig_addr;
pr_debug("nf_ct_ras: RCF\n");
- set_sig_addr = rcu_dereference(set_sig_addr_hook);
- if (set_sig_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
+ nathook = rcu_dereference(nfct_h323_nat_hook);
+ if (nathook && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
ct->status & IPS_NAT_MASK) {
- ret = set_sig_addr(skb, ct, ctinfo, protoff, data,
- rcf->callSignalAddress.item,
- rcf->callSignalAddress.count);
+ ret = nathook->set_sig_addr(skb, ct, ctinfo, protoff, data,
+ rcf->callSignalAddress.item,
+ rcf->callSignalAddress.count);
if (ret < 0)
return -1;
}
@@ -1454,18 +1403,18 @@ static int process_urq(struct sk_buff *skb, struct nf_conn *ct,
unsigned char **data, UnregistrationRequest *urq)
{
struct nf_ct_h323_master *info = nfct_help_data(ct);
+ const struct nfct_h323_nat_hooks *nathook;
int dir = CTINFO2DIR(ctinfo);
int ret;
- typeof(set_sig_addr_hook) set_sig_addr;
pr_debug("nf_ct_ras: URQ\n");
- set_sig_addr = rcu_dereference(set_sig_addr_hook);
- if (set_sig_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
+ nathook = rcu_dereference(nfct_h323_nat_hook);
+ if (nathook && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
ct->status & IPS_NAT_MASK) {
- ret = set_sig_addr(skb, ct, ctinfo, protoff, data,
- urq->callSignalAddress.item,
- urq->callSignalAddress.count);
+ ret = nathook->set_sig_addr(skb, ct, ctinfo, protoff, data,
+ urq->callSignalAddress.item,
+ urq->callSignalAddress.count);
if (ret < 0)
return -1;
}
@@ -1487,39 +1436,42 @@ static int process_arq(struct sk_buff *skb, struct nf_conn *ct,
unsigned char **data, AdmissionRequest *arq)
{
const struct nf_ct_h323_master *info = nfct_help_data(ct);
+ const struct nfct_h323_nat_hooks *nathook;
int dir = CTINFO2DIR(ctinfo);
__be16 port;
union nf_inet_addr addr;
- typeof(set_h225_addr_hook) set_h225_addr;
pr_debug("nf_ct_ras: ARQ\n");
- set_h225_addr = rcu_dereference(set_h225_addr_hook);
+ nathook = rcu_dereference(nfct_h323_nat_hook);
+ if (!nathook)
+ return 0;
+
if ((arq->options & eAdmissionRequest_destCallSignalAddress) &&
get_h225_addr(ct, *data, &arq->destCallSignalAddress,
&addr, &port) &&
!memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) &&
port == info->sig_port[dir] &&
nf_ct_l3num(ct) == NFPROTO_IPV4 &&
- set_h225_addr && ct->status & IPS_NAT_MASK) {
+ ct->status & IPS_NAT_MASK) {
/* Answering ARQ */
- return set_h225_addr(skb, protoff, data, 0,
- &arq->destCallSignalAddress,
- &ct->tuplehash[!dir].tuple.dst.u3,
- info->sig_port[!dir]);
+ return nathook->set_h225_addr(skb, protoff, data, 0,
+ &arq->destCallSignalAddress,
+ &ct->tuplehash[!dir].tuple.dst.u3,
+ info->sig_port[!dir]);
}
if ((arq->options & eAdmissionRequest_srcCallSignalAddress) &&
get_h225_addr(ct, *data, &arq->srcCallSignalAddress,
&addr, &port) &&
!memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) &&
- set_h225_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
+ nf_ct_l3num(ct) == NFPROTO_IPV4 &&
ct->status & IPS_NAT_MASK) {
/* Calling ARQ */
- return set_h225_addr(skb, protoff, data, 0,
- &arq->srcCallSignalAddress,
- &ct->tuplehash[!dir].tuple.dst.u3,
- port);
+ return nathook->set_h225_addr(skb, protoff, data, 0,
+ &arq->srcCallSignalAddress,
+ &ct->tuplehash[!dir].tuple.dst.u3,
+ port);
}
return 0;
@@ -1535,7 +1487,6 @@ static int process_acf(struct sk_buff *skb, struct nf_conn *ct,
__be16 port;
union nf_inet_addr addr;
struct nf_conntrack_expect *exp;
- typeof(set_sig_addr_hook) set_sig_addr;
pr_debug("nf_ct_ras: ACF\n");
@@ -1544,12 +1495,15 @@ static int process_acf(struct sk_buff *skb, struct nf_conn *ct,
return 0;
if (!memcmp(&addr, &ct->tuplehash[dir].tuple.dst.u3, sizeof(addr))) {
+ const struct nfct_h323_nat_hooks *nathook;
+
/* Answering ACF */
- set_sig_addr = rcu_dereference(set_sig_addr_hook);
- if (set_sig_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
+ nathook = rcu_dereference(nfct_h323_nat_hook);
+ if (nathook && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
ct->status & IPS_NAT_MASK)
- return set_sig_addr(skb, ct, ctinfo, protoff, data,
- &acf->destCallSignalAddress, 1);
+ return nathook->set_sig_addr(skb, ct, ctinfo, protoff,
+ data,
+ &acf->destCallSignalAddress, 1);
return 0;
}
@@ -1578,15 +1532,15 @@ static int process_lrq(struct sk_buff *skb, struct nf_conn *ct,
unsigned int protoff,
unsigned char **data, LocationRequest *lrq)
{
- typeof(set_ras_addr_hook) set_ras_addr;
+ const struct nfct_h323_nat_hooks *nathook;
pr_debug("nf_ct_ras: LRQ\n");
- set_ras_addr = rcu_dereference(set_ras_addr_hook);
- if (set_ras_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
+ nathook = rcu_dereference(nfct_h323_nat_hook);
+ if (nathook && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
ct->status & IPS_NAT_MASK)
- return set_ras_addr(skb, ct, ctinfo, protoff, data,
- &lrq->replyAddress, 1);
+ return nathook->set_ras_addr(skb, ct, ctinfo, protoff, data,
+ &lrq->replyAddress, 1);
return 0;
}
@@ -1634,27 +1588,22 @@ static int process_irr(struct sk_buff *skb, struct nf_conn *ct,
unsigned int protoff,
unsigned char **data, InfoRequestResponse *irr)
{
+ const struct nfct_h323_nat_hooks *nathook;
int ret;
- typeof(set_ras_addr_hook) set_ras_addr;
- typeof(set_sig_addr_hook) set_sig_addr;
pr_debug("nf_ct_ras: IRR\n");
- set_ras_addr = rcu_dereference(set_ras_addr_hook);
- if (set_ras_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
+ nathook = rcu_dereference(nfct_h323_nat_hook);
+ if (nathook && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
ct->status & IPS_NAT_MASK) {
- ret = set_ras_addr(skb, ct, ctinfo, protoff, data,
- &irr->rasAddress, 1);
+ ret = nathook->set_ras_addr(skb, ct, ctinfo, protoff, data,
+ &irr->rasAddress, 1);
if (ret < 0)
return -1;
- }
- set_sig_addr = rcu_dereference(set_sig_addr_hook);
- if (set_sig_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
- ct->status & IPS_NAT_MASK) {
- ret = set_sig_addr(skb, ct, ctinfo, protoff, data,
- irr->callSignalAddress.item,
- irr->callSignalAddress.count);
+ ret = nathook->set_sig_addr(skb, ct, ctinfo, protoff, data,
+ irr->callSignalAddress.item,
+ irr->callSignalAddress.count);
if (ret < 0)
return -1;
}
@@ -1837,17 +1786,6 @@ err1:
module_init(nf_conntrack_h323_init);
module_exit(nf_conntrack_h323_fini);
-EXPORT_SYMBOL_GPL(get_h225_addr);
-EXPORT_SYMBOL_GPL(set_h245_addr_hook);
-EXPORT_SYMBOL_GPL(set_h225_addr_hook);
-EXPORT_SYMBOL_GPL(set_sig_addr_hook);
-EXPORT_SYMBOL_GPL(set_ras_addr_hook);
-EXPORT_SYMBOL_GPL(nat_rtp_rtcp_hook);
-EXPORT_SYMBOL_GPL(nat_t120_hook);
-EXPORT_SYMBOL_GPL(nat_h245_hook);
-EXPORT_SYMBOL_GPL(nat_callforwarding_hook);
-EXPORT_SYMBOL_GPL(nat_q931_hook);
-
MODULE_AUTHOR("Jing Min Zhao <zhaojingmin@users.sourceforge.net>");
MODULE_DESCRIPTION("H.323 connection tracking helper");
MODULE_LICENSE("GPL");
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index c12a87ebc3ee..e96b32221444 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -165,7 +165,7 @@ nf_nat_helper_try_module_get(const char *name, u16 l3num, u8 protonum)
if (!nat) {
snprintf(mod_name, sizeof(mod_name), "%s", h->nat_mod_name);
rcu_read_unlock();
- request_module(mod_name);
+ request_module("%s", mod_name);
rcu_read_lock();
nat = nf_conntrack_nat_helper_find(mod_name);
@@ -249,7 +249,7 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
if (tmpl != NULL) {
help = nfct_help(tmpl);
if (help != NULL) {
- helper = help->helper;
+ helper = rcu_dereference(help->helper);
set_bit(IPS_HELPER_BIT, &ct->status);
}
}
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index f5905b5201a7..f8dd4ed8dc60 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -2005,7 +2005,7 @@ static int ctnetlink_change_helper(struct nf_conn *ct,
}
if (help) {
- if (help->helper == helper) {
+ if (rcu_access_pointer(help->helper) == helper) {
/* update private helper data if allowed. */
if (helper->from_nlattr)
helper->from_nlattr(helpinfo, ct);
@@ -3413,12 +3413,17 @@ static int ctnetlink_get_expect(struct sk_buff *skb,
static bool expect_iter_name(struct nf_conntrack_expect *exp, void *data)
{
+ struct nf_conntrack_helper *helper;
const struct nf_conn_help *m_help;
const char *name = data;
m_help = nfct_help(exp->master);
- return strcmp(m_help->helper->name, name) == 0;
+ helper = rcu_dereference(m_help->helper);
+ if (!helper)
+ return false;
+
+ return strcmp(helper->name, name) == 0;
}
static bool expect_iter_all(struct nf_conntrack_expect *exp, void *data)
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c
index f3fa367b455f..4c679638df06 100644
--- a/net/netfilter/nf_conntrack_pptp.c
+++ b/net/netfilter/nf_conntrack_pptp.c
@@ -45,7 +45,7 @@ MODULE_ALIAS_NFCT_HELPER("pptp");
static DEFINE_SPINLOCK(nf_pptp_lock);
-const struct nf_nat_pptp_hook *nf_nat_pptp_hook;
+const struct nf_nat_pptp_hook __rcu *nf_nat_pptp_hook;
EXPORT_SYMBOL_GPL(nf_nat_pptp_hook);
#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index b83dc9bf0a5d..daf06f71d31c 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -60,7 +60,7 @@ module_param(sip_external_media, int, 0600);
MODULE_PARM_DESC(sip_external_media, "Expect Media streams between external "
"endpoints (default 0)");
-const struct nf_nat_sip_hooks *nf_nat_sip_hooks;
+const struct nf_nat_sip_hooks __rcu *nf_nat_sip_hooks;
EXPORT_SYMBOL_GPL(nf_nat_sip_hooks);
static int string_len(const struct nf_conn *ct, const char *dptr,
@@ -1229,6 +1229,7 @@ static int process_register_request(struct sk_buff *skb, unsigned int protoff,
struct nf_conntrack_expect *exp;
union nf_inet_addr *saddr, daddr;
const struct nf_nat_sip_hooks *hooks;
+ struct nf_conntrack_helper *helper;
__be16 port;
u8 proto;
unsigned int expires = 0;
@@ -1289,10 +1290,14 @@ static int process_register_request(struct sk_buff *skb, unsigned int protoff,
if (sip_direct_signalling)
saddr = &ct->tuplehash[!dir].tuple.src.u3;
+ helper = rcu_dereference(nfct_help(ct)->helper);
+ if (!helper)
+ return NF_DROP;
+
nf_ct_expect_init(exp, SIP_EXPECT_SIGNALLING, nf_ct_l3num(ct),
saddr, &daddr, proto, NULL, &port);
exp->timeout.expires = sip_timeout * HZ;
- exp->helper = nfct_help(ct)->helper;
+ exp->helper = helper;
exp->flags = NF_CT_EXPECT_PERMANENT | NF_CT_EXPECT_INACTIVE;
hooks = rcu_dereference(nf_nat_sip_hooks);
diff --git a/net/netfilter/nf_conntrack_timeout.c b/net/netfilter/nf_conntrack_timeout.c
index 0f828d05ea60..0cc584d3dbb1 100644
--- a/net/netfilter/nf_conntrack_timeout.c
+++ b/net/netfilter/nf_conntrack_timeout.c
@@ -22,15 +22,21 @@
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_timeout.h>
-const struct nf_ct_timeout_hooks *nf_ct_timeout_hook __read_mostly;
+const struct nf_ct_timeout_hooks __rcu *nf_ct_timeout_hook __read_mostly;
EXPORT_SYMBOL_GPL(nf_ct_timeout_hook);
static int untimeout(struct nf_conn *ct, void *timeout)
{
struct nf_conn_timeout *timeout_ext = nf_ct_timeout_find(ct);
- if (timeout_ext && (!timeout || timeout_ext->timeout == timeout))
- RCU_INIT_POINTER(timeout_ext->timeout, NULL);
+ if (timeout_ext) {
+ const struct nf_ct_timeout *t;
+
+ t = rcu_access_pointer(timeout_ext->timeout);
+
+ if (!timeout || t == timeout)
+ RCU_INIT_POINTER(timeout_ext->timeout, NULL);
+ }
/* We are not intended to delete this conntrack. */
return 0;
@@ -127,7 +133,11 @@ void nf_ct_destroy_timeout(struct nf_conn *ct)
if (h) {
timeout_ext = nf_ct_timeout_find(ct);
if (timeout_ext) {
- h->timeout_put(timeout_ext->timeout);
+ struct nf_ct_timeout *t;
+
+ t = rcu_dereference(timeout_ext->timeout);
+ if (t)
+ h->timeout_put(t);
RCU_INIT_POINTER(timeout_ext->timeout, NULL);
}
}
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index f2def06d1070..765ac779bfc8 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -53,14 +53,14 @@ struct flow_offload *flow_offload_alloc(struct nf_conn *ct)
{
struct flow_offload *flow;
- if (unlikely(nf_ct_is_dying(ct) ||
- !refcount_inc_not_zero(&ct->ct_general.use)))
+ if (unlikely(nf_ct_is_dying(ct)))
return NULL;
flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
if (!flow)
- goto err_ct_refcnt;
+ return NULL;
+ refcount_inc(&ct->ct_general.use);
flow->ct = ct;
flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
@@ -72,11 +72,6 @@ struct flow_offload *flow_offload_alloc(struct nf_conn *ct)
__set_bit(NF_FLOW_DNAT, &flow->flags);
return flow;
-
-err_ct_refcnt:
- nf_ct_put(ct);
-
- return NULL;
}
EXPORT_SYMBOL_GPL(flow_offload_alloc);
@@ -614,14 +609,74 @@ void nf_flow_table_free(struct nf_flowtable *flow_table)
}
EXPORT_SYMBOL_GPL(nf_flow_table_free);
+static int nf_flow_table_init_net(struct net *net)
+{
+ net->ft.stat = alloc_percpu(struct nf_flow_table_stat);
+ return net->ft.stat ? 0 : -ENOMEM;
+}
+
+static void nf_flow_table_fini_net(struct net *net)
+{
+ free_percpu(net->ft.stat);
+}
+
+static int nf_flow_table_pernet_init(struct net *net)
+{
+ int ret;
+
+ ret = nf_flow_table_init_net(net);
+ if (ret < 0)
+ return ret;
+
+ ret = nf_flow_table_init_proc(net);
+ if (ret < 0)
+ goto out_proc;
+
+ return 0;
+
+out_proc:
+ nf_flow_table_fini_net(net);
+ return ret;
+}
+
+static void nf_flow_table_pernet_exit(struct list_head *net_exit_list)
+{
+ struct net *net;
+
+ list_for_each_entry(net, net_exit_list, exit_list) {
+ nf_flow_table_fini_proc(net);
+ nf_flow_table_fini_net(net);
+ }
+}
+
+static struct pernet_operations nf_flow_table_net_ops = {
+ .init = nf_flow_table_pernet_init,
+ .exit_batch = nf_flow_table_pernet_exit,
+};
+
static int __init nf_flow_table_module_init(void)
{
- return nf_flow_table_offload_init();
+ int ret;
+
+ ret = register_pernet_subsys(&nf_flow_table_net_ops);
+ if (ret < 0)
+ return ret;
+
+ ret = nf_flow_table_offload_init();
+ if (ret)
+ goto out_offload;
+
+ return 0;
+
+out_offload:
+ unregister_pernet_subsys(&nf_flow_table_net_ops);
+ return ret;
}
static void __exit nf_flow_table_module_exit(void)
{
nf_flow_table_offload_exit();
+ unregister_pernet_subsys(&nf_flow_table_net_ops);
}
module_init(nf_flow_table_module_init);
diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
index 11b6e1942092..103b6cbf257f 100644
--- a/net/netfilter/nf_flow_table_offload.c
+++ b/net/netfilter/nf_flow_table_offload.c
@@ -967,17 +967,22 @@ static void flow_offload_work_stats(struct flow_offload_work *offload)
static void flow_offload_work_handler(struct work_struct *work)
{
struct flow_offload_work *offload;
+ struct net *net;
offload = container_of(work, struct flow_offload_work, work);
+ net = read_pnet(&offload->flowtable->net);
switch (offload->cmd) {
case FLOW_CLS_REPLACE:
flow_offload_work_add(offload);
+ NF_FLOW_TABLE_STAT_DEC_ATOMIC(net, count_wq_add);
break;
case FLOW_CLS_DESTROY:
flow_offload_work_del(offload);
+ NF_FLOW_TABLE_STAT_DEC_ATOMIC(net, count_wq_del);
break;
case FLOW_CLS_STATS:
flow_offload_work_stats(offload);
+ NF_FLOW_TABLE_STAT_DEC_ATOMIC(net, count_wq_stats);
break;
default:
WARN_ON_ONCE(1);
@@ -989,12 +994,18 @@ static void flow_offload_work_handler(struct work_struct *work)
static void flow_offload_queue_work(struct flow_offload_work *offload)
{
- if (offload->cmd == FLOW_CLS_REPLACE)
+ struct net *net = read_pnet(&offload->flowtable->net);
+
+ if (offload->cmd == FLOW_CLS_REPLACE) {
+ NF_FLOW_TABLE_STAT_INC(net, count_wq_add);
queue_work(nf_flow_offload_add_wq, &offload->work);
- else if (offload->cmd == FLOW_CLS_DESTROY)
+ } else if (offload->cmd == FLOW_CLS_DESTROY) {
+ NF_FLOW_TABLE_STAT_INC(net, count_wq_del);
queue_work(nf_flow_offload_del_wq, &offload->work);
- else
+ } else {
+ NF_FLOW_TABLE_STAT_INC(net, count_wq_stats);
queue_work(nf_flow_offload_stats_wq, &offload->work);
+ }
}
static struct flow_offload_work *
diff --git a/net/netfilter/nf_flow_table_procfs.c b/net/netfilter/nf_flow_table_procfs.c
new file mode 100644
index 000000000000..159b033a43e6
--- /dev/null
+++ b/net/netfilter/nf_flow_table_procfs.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/kernel.h>
+#include <linux/proc_fs.h>
+#include <net/netfilter/nf_flow_table.h>
+
+static void *nf_flow_table_cpu_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ struct net *net = seq_file_net(seq);
+ int cpu;
+
+ if (*pos == 0)
+ return SEQ_START_TOKEN;
+
+ for (cpu = *pos - 1; cpu < nr_cpu_ids; ++cpu) {
+ if (!cpu_possible(cpu))
+ continue;
+ *pos = cpu + 1;
+ return per_cpu_ptr(net->ft.stat, cpu);
+ }
+
+ return NULL;
+}
+
+static void *nf_flow_table_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct net *net = seq_file_net(seq);
+ int cpu;
+
+ for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
+ if (!cpu_possible(cpu))
+ continue;
+ *pos = cpu + 1;
+ return per_cpu_ptr(net->ft.stat, cpu);
+ }
+ (*pos)++;
+ return NULL;
+}
+
+static void nf_flow_table_cpu_seq_stop(struct seq_file *seq, void *v)
+{
+}
+
+static int nf_flow_table_cpu_seq_show(struct seq_file *seq, void *v)
+{
+ const struct nf_flow_table_stat *st = v;
+
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(seq, "wq_add wq_del wq_stats\n");
+ return 0;
+ }
+
+ seq_printf(seq, "%8d %8d %8d\n",
+ st->count_wq_add,
+ st->count_wq_del,
+ st->count_wq_stats
+ );
+ return 0;
+}
+
+static const struct seq_operations nf_flow_table_cpu_seq_ops = {
+ .start = nf_flow_table_cpu_seq_start,
+ .next = nf_flow_table_cpu_seq_next,
+ .stop = nf_flow_table_cpu_seq_stop,
+ .show = nf_flow_table_cpu_seq_show,
+};
+
+int nf_flow_table_init_proc(struct net *net)
+{
+ struct proc_dir_entry *pde;
+
+ pde = proc_create_net("nf_flowtable", 0444, net->proc_net_stat,
+ &nf_flow_table_cpu_seq_ops,
+ sizeof(struct seq_net_private));
+ return pde ? 0 : -ENOMEM;
+}
+
+void nf_flow_table_fini_proc(struct net *net)
+{
+ remove_proc_entry("nf_flowtable", net->proc_net_stat);
+}
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 2f7c477fc9e7..c24b1240908f 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -626,7 +626,7 @@ static void nfnetlink_rcv_skb_batch(struct sk_buff *skb, struct nlmsghdr *nlh)
nfgenmsg = nlmsg_data(nlh);
skb_pull(skb, msglen);
/* Work around old nft using host byte order */
- if (nfgenmsg->res_id == NFNL_SUBSYS_NFTABLES)
+ if (nfgenmsg->res_id == (__force __be16)NFNL_SUBSYS_NFTABLES)
res_id = NFNL_SUBSYS_NFTABLES;
else
res_id = ntohs(nfgenmsg->res_id);
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
index 5c622f55c9d6..97248963a7d3 100644
--- a/net/netfilter/nfnetlink_cthelper.c
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -96,11 +96,13 @@ static int
nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct)
{
struct nf_conn_help *help = nfct_help(ct);
+ const struct nf_conntrack_helper *helper;
if (attr == NULL)
return -EINVAL;
- if (help->helper->data_len == 0)
+ helper = rcu_dereference(help->helper);
+ if (!helper || helper->data_len == 0)
return -EINVAL;
nla_memcpy(help->data, attr, sizeof(help->data));
@@ -111,9 +113,11 @@ static int
nfnl_cthelper_to_nlattr(struct sk_buff *skb, const struct nf_conn *ct)
{
const struct nf_conn_help *help = nfct_help(ct);
+ const struct nf_conntrack_helper *helper;
- if (help->helper->data_len &&
- nla_put(skb, CTA_HELP_INFO, help->helper->data_len, &help->data))
+ helper = rcu_dereference(help->helper);
+ if (helper && helper->data_len &&
+ nla_put(skb, CTA_HELP_INFO, helper->data_len, &help->data))
goto nla_put_failure;
return 0;
diff --git a/net/netfilter/nft_byteorder.c b/net/netfilter/nft_byteorder.c
index d77609144b26..f952a80275a8 100644
--- a/net/netfilter/nft_byteorder.c
+++ b/net/netfilter/nft_byteorder.c
@@ -44,7 +44,8 @@ void nft_byteorder_eval(const struct nft_expr *expr,
case NFT_BYTEORDER_NTOH:
for (i = 0; i < priv->len / 8; i++) {
src64 = nft_reg_load64(&src[i]);
- nft_reg_store64(&dst[i], be64_to_cpu(src64));
+ nft_reg_store64(&dst[i],
+ be64_to_cpu((__force __be64)src64));
}
break;
case NFT_BYTEORDER_HTON:
diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c
index 6528f76ca29e..777f09e4dc60 100644
--- a/net/netfilter/nft_cmp.c
+++ b/net/netfilter/nft_cmp.c
@@ -125,13 +125,13 @@ static void nft_payload_n2h(union nft_cmp_offload_data *data,
{
switch (len) {
case 2:
- data->val16 = ntohs(*((u16 *)val));
+ data->val16 = ntohs(*((__be16 *)val));
break;
case 4:
- data->val32 = ntohl(*((u32 *)val));
+ data->val32 = ntohl(*((__be32 *)val));
break;
case 8:
- data->val64 = be64_to_cpu(*((u64 *)val));
+ data->val64 = be64_to_cpu(*((__be64 *)val));
break;
default:
WARN_ON_ONCE(1);
@@ -197,6 +197,18 @@ static const struct nft_expr_ops nft_cmp_ops = {
.offload = nft_cmp_offload,
};
+/* Calculate the mask for the nft_cmp_fast expression. On big endian the
+ * mask needs to include the *upper* bytes when interpreting that data as
+ * something smaller than the full u32, therefore a cpu_to_le32 is done.
+ */
+static u32 nft_cmp_fast_mask(unsigned int len)
+{
+ __le32 mask = cpu_to_le32(~0U >> (sizeof_field(struct nft_cmp_fast_expr,
+ data) * BITS_PER_BYTE - len));
+
+ return (__force u32)mask;
+}
+
static int nft_cmp_fast_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index d8e1614918a1..b04995c3e17f 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -204,12 +204,12 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
case NFT_CT_SRC_IP:
if (nf_ct_l3num(ct) != NFPROTO_IPV4)
goto err;
- *dest = tuple->src.u3.ip;
+ *dest = (__force __u32)tuple->src.u3.ip;
return;
case NFT_CT_DST_IP:
if (nf_ct_l3num(ct) != NFPROTO_IPV4)
goto err;
- *dest = tuple->dst.u3.ip;
+ *dest = (__force __u32)tuple->dst.u3.ip;
return;
case NFT_CT_SRC_IP6:
if (nf_ct_l3num(ct) != NFPROTO_IPV6)
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
index 22c3e05b52db..a67ea9c3ae57 100644
--- a/net/netfilter/nft_exthdr.c
+++ b/net/netfilter/nft_exthdr.c
@@ -266,7 +266,7 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
switch (priv->len) {
case 2:
- old.v16 = get_unaligned((u16 *)(opt + offset));
+ old.v16 = (__force __be16)get_unaligned((u16 *)(opt + offset));
new.v16 = (__force __be16)nft_reg_load16(
&regs->data[priv->sreg]);
@@ -281,18 +281,18 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
if (old.v16 == new.v16)
return;
- put_unaligned(new.v16, (u16*)(opt + offset));
+ put_unaligned(new.v16, (__be16*)(opt + offset));
inet_proto_csum_replace2(&tcph->check, pkt->skb,
old.v16, new.v16, false);
break;
case 4:
- new.v32 = regs->data[priv->sreg];
- old.v32 = get_unaligned((u32 *)(opt + offset));
+ new.v32 = nft_reg_load_be32(&regs->data[priv->sreg]);
+ old.v32 = (__force __be32)get_unaligned((u32 *)(opt + offset));
if (old.v32 == new.v32)
return;
- put_unaligned(new.v32, (u32*)(opt + offset));
+ put_unaligned(new.v32, (__be32*)(opt + offset));
inet_proto_csum_replace4(&tcph->check, pkt->skb,
old.v32, new.v32, false);
break;
diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c
index 5eed18f90b02..0053a697c931 100644
--- a/net/netfilter/nft_osf.c
+++ b/net/netfilter/nft_osf.c
@@ -99,7 +99,7 @@ static int nft_osf_dump(struct sk_buff *skb, const struct nft_expr *expr)
if (nla_put_u8(skb, NFTA_OSF_TTL, priv->ttl))
goto nla_put_failure;
- if (nla_put_be32(skb, NFTA_OSF_FLAGS, ntohl(priv->flags)))
+ if (nla_put_u32(skb, NFTA_OSF_FLAGS, ntohl((__force __be32)priv->flags)))
goto nla_put_failure;
if (nft_dump_register(skb, NFTA_OSF_DREG, priv->dreg))
diff --git a/net/netfilter/nft_set_bitmap.c b/net/netfilter/nft_set_bitmap.c
index e7ae5914971e..96081ac8d2b4 100644
--- a/net/netfilter/nft_set_bitmap.c
+++ b/net/netfilter/nft_set_bitmap.c
@@ -21,7 +21,7 @@ struct nft_bitmap_elem {
* the element state in the current and the future generation.
*
* An element can be in three states. The generation cursor is represented using
- * the ^ character, note that this cursor shifts on every succesful transaction.
+ * the ^ character, note that this cursor shifts on every successful transaction.
* If no transaction is going on, we observe all elements are in the following
* state:
*
@@ -39,7 +39,7 @@ struct nft_bitmap_elem {
* 10 = this element is active in the current generation and it becomes inactive
* ^ in the next one. This happens when the element is deactivated but commit
* path has not yet been executed yet, so removal is still pending. On
- * transation abortion, the next generation bit is reset to go back to
+ * transaction abortion, the next generation bit is reset to go back to
* restore its previous state.
*/
struct nft_bitmap {
diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c
index 05ae5a338b6f..a7de29137618 100644
--- a/net/netfilter/nft_socket.c
+++ b/net/netfilter/nft_socket.c
@@ -163,7 +163,7 @@ static int nft_socket_init(const struct nft_ctx *ctx,
return -EOPNOTSUPP;
}
- priv->key = ntohl(nla_get_u32(tb[NFTA_SOCKET_KEY]));
+ priv->key = ntohl(nla_get_be32(tb[NFTA_SOCKET_KEY]));
switch(priv->key) {
case NFT_SOCKET_TRANSPARENT:
case NFT_SOCKET_WILDCARD:
@@ -179,7 +179,7 @@ static int nft_socket_init(const struct nft_ctx *ctx,
if (!tb[NFTA_SOCKET_LEVEL])
return -EINVAL;
- level = ntohl(nla_get_u32(tb[NFTA_SOCKET_LEVEL]));
+ level = ntohl(nla_get_be32(tb[NFTA_SOCKET_LEVEL]));
if (level > 255)
return -EOPNOTSUPP;
@@ -202,12 +202,12 @@ static int nft_socket_dump(struct sk_buff *skb,
{
const struct nft_socket *priv = nft_expr_priv(expr);
- if (nla_put_u32(skb, NFTA_SOCKET_KEY, htonl(priv->key)))
+ if (nla_put_be32(skb, NFTA_SOCKET_KEY, htonl(priv->key)))
return -1;
if (nft_dump_register(skb, NFTA_SOCKET_DREG, priv->dreg))
return -1;
if (priv->key == NFT_SOCKET_CGROUPV2 &&
- nla_put_u32(skb, NFTA_SOCKET_LEVEL, htonl(priv->level)))
+ nla_put_be32(skb, NFTA_SOCKET_LEVEL, htonl(priv->level)))
return -1;
return 0;
}
diff --git a/net/netfilter/nft_tproxy.c b/net/netfilter/nft_tproxy.c
index 801f013971df..68b2eed742df 100644
--- a/net/netfilter/nft_tproxy.c
+++ b/net/netfilter/nft_tproxy.c
@@ -52,11 +52,11 @@ static void nft_tproxy_eval_v4(const struct nft_expr *expr,
skb->dev, NF_TPROXY_LOOKUP_ESTABLISHED);
if (priv->sreg_addr)
- taddr = regs->data[priv->sreg_addr];
+ taddr = nft_reg_load_be32(&regs->data[priv->sreg_addr]);
taddr = nf_tproxy_laddr4(skb, taddr, iph->daddr);
if (priv->sreg_port)
- tport = nft_reg_load16(&regs->data[priv->sreg_port]);
+ tport = nft_reg_load_be16(&regs->data[priv->sreg_port]);
if (!tport)
tport = hp->dest;
@@ -124,7 +124,7 @@ static void nft_tproxy_eval_v6(const struct nft_expr *expr,
taddr = *nf_tproxy_laddr6(skb, &taddr, &iph->daddr);
if (priv->sreg_port)
- tport = nft_reg_load16(&regs->data[priv->sreg_port]);
+ tport = nft_reg_load_be16(&regs->data[priv->sreg_port]);
if (!tport)
tport = hp->dest;
diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c
index d0f9b1d51b0e..5edaaded706d 100644
--- a/net/netfilter/nft_tunnel.c
+++ b/net/netfilter/nft_tunnel.c
@@ -383,8 +383,9 @@ static int nft_tunnel_obj_opts_init(const struct nft_ctx *ctx,
struct ip_tunnel_info *info,
struct nft_tunnel_opts *opts)
{
- int err, rem, type = 0;
struct nlattr *nla;
+ __be16 type = 0;
+ int err, rem;
err = nla_validate_nested_deprecated(attr, NFTA_TUNNEL_KEY_OPTS_MAX,
nft_tunnel_opts_policy, NULL);
diff --git a/net/netfilter/nft_xfrm.c b/net/netfilter/nft_xfrm.c
index becb88fa4e9b..1c5343c936a8 100644
--- a/net/netfilter/nft_xfrm.c
+++ b/net/netfilter/nft_xfrm.c
@@ -51,7 +51,7 @@ static int nft_xfrm_get_init(const struct nft_ctx *ctx,
return -EOPNOTSUPP;
}
- priv->key = ntohl(nla_get_u32(tb[NFTA_XFRM_KEY]));
+ priv->key = ntohl(nla_get_be32(tb[NFTA_XFRM_KEY]));
switch (priv->key) {
case NFT_XFRM_KEY_REQID:
case NFT_XFRM_KEY_SPI:
@@ -134,13 +134,13 @@ static void nft_xfrm_state_get_key(const struct nft_xfrm *priv,
WARN_ON_ONCE(1);
break;
case NFT_XFRM_KEY_DADDR_IP4:
- *dest = state->id.daddr.a4;
+ *dest = (__force __u32)state->id.daddr.a4;
return;
case NFT_XFRM_KEY_DADDR_IP6:
memcpy(dest, &state->id.daddr.in6, sizeof(struct in6_addr));
return;
case NFT_XFRM_KEY_SADDR_IP4:
- *dest = state->props.saddr.a4;
+ *dest = (__force __u32)state->props.saddr.a4;
return;
case NFT_XFRM_KEY_SADDR_IP6:
memcpy(dest, &state->props.saddr.in6, sizeof(struct in6_addr));
@@ -149,7 +149,7 @@ static void nft_xfrm_state_get_key(const struct nft_xfrm *priv,
*dest = state->props.reqid;
return;
case NFT_XFRM_KEY_SPI:
- *dest = state->id.spi;
+ *dest = (__force __u32)state->id.spi;
return;
}
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 267757b0392a..2be2f7a7b60f 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -96,7 +96,7 @@ xt_ct_set_helper(struct nf_conn *ct, const char *helper_name,
return -ENOMEM;
}
- help->helper = helper;
+ rcu_assign_pointer(help->helper, helper);
return 0;
}
@@ -136,6 +136,21 @@ static u16 xt_ct_flags_to_dir(const struct xt_ct_target_info_v1 *info)
}
}
+static void xt_ct_put_helper(struct nf_conn_help *help)
+{
+ struct nf_conntrack_helper *helper;
+
+ if (!help)
+ return;
+
+ /* not yet exposed to other cpus, or ruleset
+ * already detached (post-replacement).
+ */
+ helper = rcu_dereference_raw(help->helper);
+ if (helper)
+ nf_conntrack_helper_put(helper);
+}
+
static int xt_ct_tg_check(const struct xt_tgchk_param *par,
struct xt_ct_target_info_v1 *info)
{
@@ -207,8 +222,7 @@ out:
err4:
help = nfct_help(ct);
- if (help)
- nf_conntrack_helper_put(help->helper);
+ xt_ct_put_helper(help);
err3:
nf_ct_tmpl_free(ct);
err2:
@@ -270,8 +284,7 @@ static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par,
if (ct) {
help = nfct_help(ct);
- if (help)
- nf_conntrack_helper_put(help->helper);
+ xt_ct_put_helper(help);
nf_ct_netns_put(par->net, par->family);
diff --git a/net/netfilter/xt_DSCP.c b/net/netfilter/xt_DSCP.c
index eababc354ff1..cfa44515ab72 100644
--- a/net/netfilter/xt_DSCP.c
+++ b/net/netfilter/xt_DSCP.c
@@ -24,6 +24,8 @@ MODULE_ALIAS("ip6t_DSCP");
MODULE_ALIAS("ipt_TOS");
MODULE_ALIAS("ip6t_TOS");
+#define XT_DSCP_ECN_MASK 3u
+
static unsigned int
dscp_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
@@ -34,8 +36,7 @@ dscp_tg(struct sk_buff *skb, const struct xt_action_param *par)
if (skb_ensure_writable(skb, sizeof(struct iphdr)))
return NF_DROP;
- ipv4_change_dsfield(ip_hdr(skb),
- (__force __u8)(~XT_DSCP_MASK),
+ ipv4_change_dsfield(ip_hdr(skb), XT_DSCP_ECN_MASK,
dinfo->dscp << XT_DSCP_SHIFT);
}
@@ -52,8 +53,7 @@ dscp_tg6(struct sk_buff *skb, const struct xt_action_param *par)
if (skb_ensure_writable(skb, sizeof(struct ipv6hdr)))
return NF_DROP;
- ipv6_change_dsfield(ipv6_hdr(skb),
- (__force __u8)(~XT_DSCP_MASK),
+ ipv6_change_dsfield(ipv6_hdr(skb), XT_DSCP_ECN_MASK,
dinfo->dscp << XT_DSCP_SHIFT);
}
return XT_CONTINUE;
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 122db9fbb9f4..116a885adb3c 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -239,8 +239,8 @@ tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par)
oldlen = ipv6h->payload_len;
newlen = htons(ntohs(oldlen) + ret);
if (skb->ip_summed == CHECKSUM_COMPLETE)
- skb->csum = csum_add(csum_sub(skb->csum, oldlen),
- newlen);
+ skb->csum = csum_add(csum_sub(skb->csum, (__force __wsum)oldlen),
+ (__force __wsum)newlen);
ipv6h->payload_len = newlen;
}
return XT_CONTINUE;
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index 459d0696c91a..e4bea1d346cf 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -74,18 +74,10 @@ tproxy_tg4(struct net *net, struct sk_buff *skb, __be32 laddr, __be16 lport,
/* This should be in a separate target, but we don't do multiple
targets on the same rule yet */
skb->mark = (skb->mark & ~mark_mask) ^ mark_value;
-
- pr_debug("redirecting: proto %hhu %pI4:%hu -> %pI4:%hu, mark: %x\n",
- iph->protocol, &iph->daddr, ntohs(hp->dest),
- &laddr, ntohs(lport), skb->mark);
-
nf_tproxy_assign_sock(skb, sk);
return NF_ACCEPT;
}
- pr_debug("no socket, dropping: proto %hhu %pI4:%hu -> %pI4:%hu, mark: %x\n",
- iph->protocol, &iph->saddr, ntohs(hp->source),
- &iph->daddr, ntohs(hp->dest), skb->mark);
return NF_DROP;
}
@@ -122,16 +114,12 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par)
int tproto;
tproto = ipv6_find_hdr(skb, &thoff, -1, NULL, NULL);
- if (tproto < 0) {
- pr_debug("unable to find transport header in IPv6 packet, dropping\n");
+ if (tproto < 0)
return NF_DROP;
- }
hp = skb_header_pointer(skb, thoff, sizeof(_hdr), &_hdr);
- if (hp == NULL) {
- pr_debug("unable to grab transport header contents in IPv6 packet, dropping\n");
+ if (!hp)
return NF_DROP;
- }
/* check if there's an ongoing connection on the packet
* addresses, this happens if the redirect already happened
@@ -168,19 +156,10 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par)
/* This should be in a separate target, but we don't do multiple
targets on the same rule yet */
skb->mark = (skb->mark & ~tgi->mark_mask) ^ tgi->mark_value;
-
- pr_debug("redirecting: proto %hhu %pI6:%hu -> %pI6:%hu, mark: %x\n",
- tproto, &iph->saddr, ntohs(hp->source),
- laddr, ntohs(lport), skb->mark);
-
nf_tproxy_assign_sock(skb, sk);
return NF_ACCEPT;
}
- pr_debug("no socket, dropping: proto %hhu %pI6:%hu -> %pI6:%hu, mark: %x\n",
- tproto, &iph->saddr, ntohs(hp->source),
- &iph->daddr, ntohs(hp->dest), skb->mark);
-
return NF_DROP;
}
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index 46fcac75f726..5d04ef80a61d 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -62,10 +62,10 @@ connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
key[4] = zone->id;
} else {
const struct iphdr *iph = ip_hdr(skb);
- key[0] = (info->flags & XT_CONNLIMIT_DADDR) ?
- iph->daddr : iph->saddr;
- key[0] &= info->mask.ip;
+ key[0] = (info->flags & XT_CONNLIMIT_DADDR) ?
+ (__force __u32)iph->daddr : (__force __u32)iph->saddr;
+ key[0] &= (__force __u32)info->mask.ip;
key[1] = zone->id;
}
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index b498dac4e1e0..2f61d5bdce1a 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -115,7 +115,7 @@ error_master_upper_dev_unlink:
error_unlock:
rtnl_unlock();
error_put:
- dev_put_track(vport->dev, &vport->dev_tracker);
+ netdev_put(vport->dev, &vport->dev_tracker);
error_free_vport:
ovs_vport_free(vport);
return ERR_PTR(err);
@@ -137,7 +137,7 @@ static void vport_netdev_free(struct rcu_head *rcu)
{
struct vport *vport = container_of(rcu, struct vport, rcu);
- dev_put_track(vport->dev, &vport->dev_tracker);
+ netdev_put(vport->dev, &vport->dev_tracker);
ovs_vport_free(vport);
}
@@ -173,7 +173,7 @@ void ovs_netdev_tunnel_destroy(struct vport *vport)
*/
if (vport->dev->reg_state == NETREG_REGISTERED)
rtnl_delete_link(vport->dev);
- dev_put_track(vport->dev, &vport->dev_tracker);
+ netdev_put(vport->dev, &vport->dev_tracker);
vport->dev = NULL;
rtnl_unlock();
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index ca6e92a22923..d08c4728523b 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3134,7 +3134,7 @@ static int packet_release(struct socket *sock)
packet_cached_dev_reset(po);
if (po->prot_hook.dev) {
- dev_put_track(po->prot_hook.dev, &po->prot_hook.dev_tracker);
+ netdev_put(po->prot_hook.dev, &po->prot_hook.dev_tracker);
po->prot_hook.dev = NULL;
}
spin_unlock(&po->bind_lock);
@@ -3235,15 +3235,15 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
WRITE_ONCE(po->num, proto);
po->prot_hook.type = proto;
- dev_put_track(po->prot_hook.dev, &po->prot_hook.dev_tracker);
+ netdev_put(po->prot_hook.dev, &po->prot_hook.dev_tracker);
if (unlikely(unlisted)) {
po->prot_hook.dev = NULL;
WRITE_ONCE(po->ifindex, -1);
packet_cached_dev_reset(po);
} else {
- dev_hold_track(dev, &po->prot_hook.dev_tracker,
- GFP_ATOMIC);
+ netdev_hold(dev, &po->prot_hook.dev_tracker,
+ GFP_ATOMIC);
po->prot_hook.dev = dev;
WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0);
packet_cached_dev_assign(po, dev);
@@ -4167,8 +4167,8 @@ static int packet_notifier(struct notifier_block *this,
if (msg == NETDEV_UNREGISTER) {
packet_cached_dev_reset(po);
WRITE_ONCE(po->ifindex, -1);
- dev_put_track(po->prot_hook.dev,
- &po->prot_hook.dev_tracker);
+ netdev_put(po->prot_hook.dev,
+ &po->prot_hook.dev_tracker);
po->prot_hook.dev = NULL;
}
spin_unlock(&po->bind_lock);
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 08aab5c01437..258917a714c8 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -431,7 +431,7 @@ static int rxkad_secure_packet(struct rxrpc_call *call,
break;
}
- _leave(" = %d [set %hx]", ret, y);
+ _leave(" = %d [set %x]", ret, y);
return ret;
}
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index e013253b10d1..d55afb8d14be 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -277,7 +277,7 @@ static struct nf_flowtable_type flowtable_ct = {
.owner = THIS_MODULE,
};
-static int tcf_ct_flow_table_get(struct tcf_ct_params *params)
+static int tcf_ct_flow_table_get(struct net *net, struct tcf_ct_params *params)
{
struct tcf_ct_flow_table *ct_ft;
int err = -ENOMEM;
@@ -303,6 +303,7 @@ static int tcf_ct_flow_table_get(struct tcf_ct_params *params)
err = nf_flow_table_init(&ct_ft->nf_ft);
if (err)
goto err_init;
+ write_pnet(&ct_ft->nf_ft.net, net);
__module_get(THIS_MODULE);
out_unlock:
@@ -1391,7 +1392,7 @@ static int tcf_ct_init(struct net *net, struct nlattr *nla,
if (err)
goto cleanup;
- err = tcf_ct_flow_table_get(params);
+ err = tcf_ct_flow_table_get(net, params);
if (err)
goto cleanup;
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index ebb92fb072ab..a1d70cf86843 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -79,7 +79,7 @@ static void tcf_mirred_release(struct tc_action *a)
/* last reference to action, no need to lock */
dev = rcu_dereference_protected(m->tcfm_dev, 1);
- dev_put_track(dev, &m->tcfm_dev_tracker);
+ netdev_put(dev, &m->tcfm_dev_tracker);
}
static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
@@ -181,7 +181,7 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
mac_header_xmit = dev_is_mac_header_xmit(ndev);
odev = rcu_replace_pointer(m->tcfm_dev, ndev,
lockdep_is_held(&m->tcf_lock));
- dev_put_track(odev, &m->tcfm_dev_tracker);
+ netdev_put(odev, &m->tcfm_dev_tracker);
netdev_tracker_alloc(ndev, &m->tcfm_dev_tracker, GFP_ATOMIC);
m->tcfm_mac_header_xmit = mac_header_xmit;
}
@@ -402,7 +402,7 @@ static int mirred_device_event(struct notifier_block *unused,
list_for_each_entry(m, &mirred_list, tcfm_list) {
spin_lock_bh(&m->tcf_lock);
if (tcf_mirred_dev_dereference(m) == dev) {
- dev_put_track(dev, &m->tcfm_dev_tracker);
+ netdev_put(dev, &m->tcfm_dev_tracker);
/* Note : no rcu grace period necessary, as
* net_device are already rcu protected.
*/
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index ac366c99086f..790d6809be81 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -194,7 +194,7 @@ EXPORT_SYMBOL(register_tcf_proto_ops);
static struct workqueue_struct *tc_filter_wq;
-int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
+void unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
{
struct tcf_proto_ops *t;
int rc = -ENOENT;
@@ -214,7 +214,8 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
}
}
write_unlock(&cls_mod_lock);
- return rc;
+
+ WARN(rc, "unregister tc filter kind(%s) failed %d\n", ops->kind, rc);
}
EXPORT_SYMBOL(unregister_tcf_proto_ops);
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index dcca70144dff..1a1e34480b7e 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -63,13 +63,7 @@ struct fl_flow_key {
struct flow_dissector_key_ip ip;
struct flow_dissector_key_ip enc_ip;
struct flow_dissector_key_enc_opts enc_opts;
- union {
- struct flow_dissector_key_ports tp;
- struct {
- struct flow_dissector_key_ports tp_min;
- struct flow_dissector_key_ports tp_max;
- };
- } tp_range;
+ struct flow_dissector_key_ports_range tp_range;
struct flow_dissector_key_ct ct;
struct flow_dissector_key_hash hash;
struct flow_dissector_key_num_of_vlans num_of_vlans;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index e3c0e8ea2dbb..bf87b50837a8 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1292,7 +1292,7 @@ err_out5:
if (ops->destroy)
ops->destroy(sch);
err_out3:
- dev_put_track(dev, &sch->dev_tracker);
+ netdev_put(dev, &sch->dev_tracker);
qdisc_free(sch);
err_out2:
module_put(ops->owner);
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 02d9f0dfe356..599e26fc2fa8 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -149,7 +149,6 @@ struct cbq_sched_data {
psched_time_t now; /* Cached timestamp */
unsigned int pmask;
- struct hrtimer delay_timer;
struct qdisc_watchdog watchdog; /* Watchdog timer,
started when CBQ has
backlog, but cannot
@@ -441,81 +440,6 @@ static void cbq_overlimit(struct cbq_class *cl)
}
}
-static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio,
- psched_time_t now)
-{
- struct cbq_class *cl;
- struct cbq_class *cl_prev = q->active[prio];
- psched_time_t sched = now;
-
- if (cl_prev == NULL)
- return 0;
-
- do {
- cl = cl_prev->next_alive;
- if (now - cl->penalized > 0) {
- cl_prev->next_alive = cl->next_alive;
- cl->next_alive = NULL;
- cl->cpriority = cl->priority;
- cl->delayed = 0;
- cbq_activate_class(cl);
-
- if (cl == q->active[prio]) {
- q->active[prio] = cl_prev;
- if (cl == q->active[prio]) {
- q->active[prio] = NULL;
- return 0;
- }
- }
-
- cl = cl_prev->next_alive;
- } else if (sched - cl->penalized > 0)
- sched = cl->penalized;
- } while ((cl_prev = cl) != q->active[prio]);
-
- return sched - now;
-}
-
-static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
-{
- struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data,
- delay_timer);
- struct Qdisc *sch = q->watchdog.qdisc;
- psched_time_t now;
- psched_tdiff_t delay = 0;
- unsigned int pmask;
-
- now = psched_get_time();
-
- pmask = q->pmask;
- q->pmask = 0;
-
- while (pmask) {
- int prio = ffz(~pmask);
- psched_tdiff_t tmp;
-
- pmask &= ~(1<<prio);
-
- tmp = cbq_undelay_prio(q, prio, now);
- if (tmp > 0) {
- q->pmask |= 1<<prio;
- if (tmp < delay || delay == 0)
- delay = tmp;
- }
- }
-
- if (delay) {
- ktime_t time;
-
- time = 0;
- time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay));
- hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS_PINNED);
- }
-
- __netif_schedule(qdisc_root(sch));
- return HRTIMER_NORESTART;
-}
-
/*
* It is mission critical procedure.
*
@@ -1034,7 +958,6 @@ cbq_reset(struct Qdisc *sch)
q->tx_class = NULL;
q->tx_borrowed = NULL;
qdisc_watchdog_cancel(&q->watchdog);
- hrtimer_cancel(&q->delay_timer);
q->toplevel = TC_CBQ_MAXLEVEL;
q->now = psched_get_time();
@@ -1162,8 +1085,6 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt,
int err;
qdisc_watchdog_init(&q->watchdog, sch);
- hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
- q->delay_timer.function = cbq_undelay;
err = cbq_opt_parse(tb, opt, extack);
if (err < 0)
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index dba0b3e24af5..cc6eabee2830 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -541,7 +541,7 @@ static void dev_watchdog(struct timer_list *t)
spin_unlock(&dev->tx_global_lock);
if (release)
- dev_put_track(dev, &dev->watchdog_dev_tracker);
+ netdev_put(dev, &dev->watchdog_dev_tracker);
}
void __netdev_watchdog_up(struct net_device *dev)
@@ -551,7 +551,8 @@ void __netdev_watchdog_up(struct net_device *dev)
dev->watchdog_timeo = 5*HZ;
if (!mod_timer(&dev->watchdog_timer,
round_jiffies(jiffies + dev->watchdog_timeo)))
- dev_hold_track(dev, &dev->watchdog_dev_tracker, GFP_ATOMIC);
+ netdev_hold(dev, &dev->watchdog_dev_tracker,
+ GFP_ATOMIC);
}
}
EXPORT_SYMBOL_GPL(__netdev_watchdog_up);
@@ -565,7 +566,7 @@ static void dev_watchdog_down(struct net_device *dev)
{
netif_tx_lock_bh(dev);
if (del_timer(&dev->watchdog_timer))
- dev_put_track(dev, &dev->watchdog_dev_tracker);
+ netdev_put(dev, &dev->watchdog_dev_tracker);
netif_tx_unlock_bh(dev);
}
@@ -975,7 +976,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
sch->enqueue = ops->enqueue;
sch->dequeue = ops->dequeue;
sch->dev_queue = dev_queue;
- dev_hold_track(dev, &sch->dev_tracker, GFP_KERNEL);
+ netdev_hold(dev, &sch->dev_tracker, GFP_KERNEL);
refcount_set(&sch->refcnt, 1);
return sch;
@@ -1067,7 +1068,7 @@ static void qdisc_destroy(struct Qdisc *qdisc)
ops->destroy(qdisc);
module_put(ops->owner);
- dev_put_track(qdisc_dev(qdisc), &qdisc->dev_tracker);
+ netdev_put(qdisc_dev(qdisc), &qdisc->dev_tracker);
trace_qdisc_destroy(qdisc);
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index b9c71a304d39..0b941dd63d26 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
+#include <linux/time.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
@@ -176,7 +177,7 @@ static ktime_t get_interval_end_time(struct sched_gate_list *sched,
static int length_to_duration(struct taprio_sched *q, int len)
{
- return div_u64(len * atomic64_read(&q->picos_per_byte), 1000);
+ return div_u64(len * atomic64_read(&q->picos_per_byte), PSEC_PER_NSEC);
}
/* Returns the entry corresponding to next available interval. If
@@ -551,7 +552,7 @@ static struct sk_buff *taprio_peek(struct Qdisc *sch)
static void taprio_set_budget(struct taprio_sched *q, struct sched_entry *entry)
{
atomic_set(&entry->budget,
- div64_u64((u64)entry->interval * 1000,
+ div64_u64((u64)entry->interval * PSEC_PER_NSEC,
atomic64_read(&q->picos_per_byte)));
}
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 1a094b087d88..bcd3384ab07a 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1523,11 +1523,11 @@ static __init int sctp_init(void)
limit = (sysctl_sctp_mem[1]) << (PAGE_SHIFT - 7);
max_share = min(4UL*1024*1024, limit);
- sysctl_sctp_rmem[0] = SK_MEM_QUANTUM; /* give each asoc 1 page min */
+ sysctl_sctp_rmem[0] = PAGE_SIZE; /* give each asoc 1 page min */
sysctl_sctp_rmem[1] = 1500 * SKB_TRUESIZE(1);
sysctl_sctp_rmem[2] = max(sysctl_sctp_rmem[1], max_share);
- sysctl_sctp_wmem[0] = SK_MEM_QUANTUM;
+ sysctl_sctp_wmem[0] = PAGE_SIZE;
sysctl_sctp_wmem[1] = 16*1024;
sysctl_sctp_wmem[2] = max(64*1024, max_share);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 52edee1322fc..f6ee7f4040c1 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -6590,8 +6590,6 @@ static int sctp_eat_data(const struct sctp_association *asoc,
pr_debug("%s: under pressure, reneging for tsn:%u\n",
__func__, tsn);
deliver = SCTP_CMD_RENEGE;
- } else {
- sk_mem_reclaim(sk);
}
}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 6d37d2dfb3da..171f1a35d205 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -93,6 +93,7 @@ static int sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
static unsigned long sctp_memory_pressure;
static atomic_long_t sctp_memory_allocated;
+static DEFINE_PER_CPU(int, sctp_memory_per_cpu_fw_alloc);
struct percpu_counter sctp_sockets_allocated;
static void sctp_enter_memory_pressure(struct sock *sk)
@@ -1823,9 +1824,6 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc,
if (sctp_wspace(asoc) < (int)msg_len)
sctp_prsctp_prune(asoc, sinfo, msg_len - sctp_wspace(asoc));
- if (sk_under_memory_pressure(sk))
- sk_mem_reclaim(sk);
-
if (sctp_wspace(asoc) <= 0 || !sk_wmem_schedule(sk, msg_len)) {
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
@@ -9194,8 +9192,6 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
goto do_error;
if (signal_pending(current))
goto do_interrupted;
- if (sk_under_memory_pressure(sk))
- sk_mem_reclaim(sk);
if ((int)msg_len <= sctp_wspace(asoc) &&
sk_wmem_schedule(sk, msg_len))
break;
@@ -9657,7 +9653,10 @@ struct proto sctp_prot = {
.sysctl_wmem = sysctl_sctp_wmem,
.memory_pressure = &sctp_memory_pressure,
.enter_memory_pressure = sctp_enter_memory_pressure,
+
.memory_allocated = &sctp_memory_allocated,
+ .per_cpu_fw_alloc = &sctp_memory_per_cpu_fw_alloc,
+
.sockets_allocated = &sctp_sockets_allocated,
};
@@ -9700,7 +9699,10 @@ struct proto sctpv6_prot = {
.sysctl_wmem = sysctl_sctp_wmem,
.memory_pressure = &sctp_memory_pressure,
.enter_memory_pressure = sctp_enter_memory_pressure,
+
.memory_allocated = &sctp_memory_allocated,
+ .per_cpu_fw_alloc = &sctp_memory_per_cpu_fw_alloc,
+
.sockets_allocated = &sctp_sockets_allocated,
};
#endif /* IS_ENABLED(CONFIG_IPV6) */
diff --git a/net/sctp/stream_interleave.c b/net/sctp/stream_interleave.c
index 6b13f737ebf2..bb22b71df7a3 100644
--- a/net/sctp/stream_interleave.c
+++ b/net/sctp/stream_interleave.c
@@ -979,8 +979,6 @@ static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
if (freed >= needed && sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0)
sctp_intl_start_pd(ulpq, gfp);
-
- sk_mem_reclaim(asoc->base.sk);
}
static void sctp_intl_stream_abort_pd(struct sctp_ulpq *ulpq, __u16 sid,
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 407fed46931b..0a8510a0c5e6 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -1100,12 +1100,8 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
else if (retval == 1)
sctp_ulpq_reasm_drain(ulpq);
}
-
- sk_mem_reclaim(asoc->base.sk);
}
-
-
/* Notify the application if an association is aborted and in
* partial delivery mode. Send up any pending received messages.
*/
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 433bb5a7df31..6e70d9c10b78 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -487,6 +487,29 @@ static void smc_copy_sock_settings_to_smc(struct smc_sock *smc)
smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC);
}
+/* register the new vzalloced sndbuf on all links */
+static int smcr_lgr_reg_sndbufs(struct smc_link *link,
+ struct smc_buf_desc *snd_desc)
+{
+ struct smc_link_group *lgr = link->lgr;
+ int i, rc = 0;
+
+ if (!snd_desc->is_vm)
+ return -EINVAL;
+
+ /* protect against parallel smcr_link_reg_buf() */
+ mutex_lock(&lgr->llc_conf_mutex);
+ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
+ if (!smc_link_active(&lgr->lnk[i]))
+ continue;
+ rc = smcr_link_reg_buf(&lgr->lnk[i], snd_desc);
+ if (rc)
+ break;
+ }
+ mutex_unlock(&lgr->llc_conf_mutex);
+ return rc;
+}
+
/* register the new rmb on all links */
static int smcr_lgr_reg_rmbs(struct smc_link *link,
struct smc_buf_desc *rmb_desc)
@@ -498,13 +521,13 @@ static int smcr_lgr_reg_rmbs(struct smc_link *link,
if (rc)
return rc;
/* protect against parallel smc_llc_cli_rkey_exchange() and
- * parallel smcr_link_reg_rmb()
+ * parallel smcr_link_reg_buf()
*/
mutex_lock(&lgr->llc_conf_mutex);
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
if (!smc_link_active(&lgr->lnk[i]))
continue;
- rc = smcr_link_reg_rmb(&lgr->lnk[i], rmb_desc);
+ rc = smcr_link_reg_buf(&lgr->lnk[i], rmb_desc);
if (rc)
goto out;
}
@@ -550,8 +573,15 @@ static int smcr_clnt_conf_first_link(struct smc_sock *smc)
smc_wr_remember_qp_attr(link);
- if (smcr_link_reg_rmb(link, smc->conn.rmb_desc))
- return SMC_CLC_DECL_ERR_REGRMB;
+ /* reg the sndbuf if it was vzalloced */
+ if (smc->conn.sndbuf_desc->is_vm) {
+ if (smcr_link_reg_buf(link, smc->conn.sndbuf_desc))
+ return SMC_CLC_DECL_ERR_REGBUF;
+ }
+
+ /* reg the rmb */
+ if (smcr_link_reg_buf(link, smc->conn.rmb_desc))
+ return SMC_CLC_DECL_ERR_REGBUF;
/* confirm_rkey is implicit on 1st contact */
smc->conn.rmb_desc->is_conf_rkey = true;
@@ -1221,12 +1251,18 @@ static int smc_connect_rdma(struct smc_sock *smc,
goto connect_abort;
}
} else {
+ /* reg sendbufs if they were vzalloced */
+ if (smc->conn.sndbuf_desc->is_vm) {
+ if (smcr_lgr_reg_sndbufs(link, smc->conn.sndbuf_desc)) {
+ reason_code = SMC_CLC_DECL_ERR_REGBUF;
+ goto connect_abort;
+ }
+ }
if (smcr_lgr_reg_rmbs(link, smc->conn.rmb_desc)) {
- reason_code = SMC_CLC_DECL_ERR_REGRMB;
+ reason_code = SMC_CLC_DECL_ERR_REGBUF;
goto connect_abort;
}
}
- smc_rmb_sync_sg_for_device(&smc->conn);
if (aclc->hdr.version > SMC_V1) {
struct smc_clc_msg_accept_confirm_v2 *clc_v2 =
@@ -1750,8 +1786,15 @@ static int smcr_serv_conf_first_link(struct smc_sock *smc)
struct smc_llc_qentry *qentry;
int rc;
- if (smcr_link_reg_rmb(link, smc->conn.rmb_desc))
- return SMC_CLC_DECL_ERR_REGRMB;
+ /* reg the sndbuf if it was vzalloced*/
+ if (smc->conn.sndbuf_desc->is_vm) {
+ if (smcr_link_reg_buf(link, smc->conn.sndbuf_desc))
+ return SMC_CLC_DECL_ERR_REGBUF;
+ }
+
+ /* reg the rmb */
+ if (smcr_link_reg_buf(link, smc->conn.rmb_desc))
+ return SMC_CLC_DECL_ERR_REGBUF;
/* send CONFIRM LINK request to client over the RoCE fabric */
rc = smc_llc_send_confirm_link(link, SMC_LLC_REQ);
@@ -2110,10 +2153,15 @@ static int smc_listen_rdma_reg(struct smc_sock *new_smc, bool local_first)
struct smc_connection *conn = &new_smc->conn;
if (!local_first) {
+ /* reg sendbufs if they were vzalloced */
+ if (conn->sndbuf_desc->is_vm) {
+ if (smcr_lgr_reg_sndbufs(conn->lnk,
+ conn->sndbuf_desc))
+ return SMC_CLC_DECL_ERR_REGBUF;
+ }
if (smcr_lgr_reg_rmbs(conn->lnk, conn->rmb_desc))
- return SMC_CLC_DECL_ERR_REGRMB;
+ return SMC_CLC_DECL_ERR_REGBUF;
}
- smc_rmb_sync_sg_for_device(&new_smc->conn);
return 0;
}
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index f9f3f59c79de..1472f31480d8 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -1034,7 +1034,7 @@ static int smc_clc_send_confirm_accept(struct smc_sock *smc,
ETH_ALEN);
hton24(clc->r0.qpn, link->roce_qp->qp_num);
clc->r0.rmb_rkey =
- htonl(conn->rmb_desc->mr_rx[link->link_idx]->rkey);
+ htonl(conn->rmb_desc->mr[link->link_idx]->rkey);
clc->r0.rmbe_idx = 1; /* for now: 1 RMB = 1 RMBE */
clc->r0.rmbe_alert_token = htonl(conn->alert_token_local);
switch (clc->hdr.type) {
@@ -1046,8 +1046,10 @@ static int smc_clc_send_confirm_accept(struct smc_sock *smc,
break;
}
clc->r0.rmbe_size = conn->rmbe_size_short;
- clc->r0.rmb_dma_addr = cpu_to_be64((u64)sg_dma_address
- (conn->rmb_desc->sgt[link->link_idx].sgl));
+ clc->r0.rmb_dma_addr = conn->rmb_desc->is_vm ?
+ cpu_to_be64((uintptr_t)conn->rmb_desc->cpu_addr) :
+ cpu_to_be64((u64)sg_dma_address
+ (conn->rmb_desc->sgt[link->link_idx].sgl));
hton24(clc->r0.psn, link->psn_initial);
if (version == SMC_V1) {
clc->hdr.length = htons(SMCR_CLC_ACCEPT_CONFIRM_LEN);
diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h
index 83f02f131fc0..5fee545c9a10 100644
--- a/net/smc/smc_clc.h
+++ b/net/smc/smc_clc.h
@@ -62,7 +62,7 @@
#define SMC_CLC_DECL_INTERR 0x09990000 /* internal error */
#define SMC_CLC_DECL_ERR_RTOK 0x09990001 /* rtoken handling failed */
#define SMC_CLC_DECL_ERR_RDYLNK 0x09990002 /* ib ready link failed */
-#define SMC_CLC_DECL_ERR_REGRMB 0x09990003 /* reg rmb failed */
+#define SMC_CLC_DECL_ERR_REGBUF 0x09990003 /* reg rdma bufs failed */
#define SMC_FIRST_CONTACT_MASK 0b10 /* first contact bit within typev2 */
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index f40f6ed0fbdb..ff49a11f57b8 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -347,6 +347,8 @@ static int smc_nl_fill_lgr(struct smc_link_group *lgr,
goto errattr;
if (nla_put_u8(skb, SMC_NLA_LGR_R_TYPE, lgr->type))
goto errattr;
+ if (nla_put_u8(skb, SMC_NLA_LGR_R_BUF_TYPE, lgr->buf_type))
+ goto errattr;
if (nla_put_u8(skb, SMC_NLA_LGR_R_VLAN_ID, lgr->vlan_id))
goto errattr;
if (nla_put_u64_64bit(skb, SMC_NLA_LGR_R_NET_COOKIE,
@@ -907,6 +909,7 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
lgr->net = smc_ib_net(lnk->smcibdev);
lgr_list = &smc_lgr_list.list;
lgr_lock = &smc_lgr_list.lock;
+ lgr->buf_type = lgr->net->smc.sysctl_smcr_buf_type;
atomic_inc(&lgr_cnt);
}
smc->conn.lgr = lgr;
@@ -1086,34 +1089,37 @@ err_out:
return NULL;
}
-static void smcr_buf_unuse(struct smc_buf_desc *rmb_desc,
+static void smcr_buf_unuse(struct smc_buf_desc *buf_desc, bool is_rmb,
struct smc_link_group *lgr)
{
+ struct mutex *lock; /* lock buffer list */
int rc;
- if (rmb_desc->is_conf_rkey && !list_empty(&lgr->list)) {
+ if (is_rmb && buf_desc->is_conf_rkey && !list_empty(&lgr->list)) {
/* unregister rmb with peer */
rc = smc_llc_flow_initiate(lgr, SMC_LLC_FLOW_RKEY);
if (!rc) {
/* protect against smc_llc_cli_rkey_exchange() */
mutex_lock(&lgr->llc_conf_mutex);
- smc_llc_do_delete_rkey(lgr, rmb_desc);
- rmb_desc->is_conf_rkey = false;
+ smc_llc_do_delete_rkey(lgr, buf_desc);
+ buf_desc->is_conf_rkey = false;
mutex_unlock(&lgr->llc_conf_mutex);
smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
}
}
- if (rmb_desc->is_reg_err) {
+ if (buf_desc->is_reg_err) {
/* buf registration failed, reuse not possible */
- mutex_lock(&lgr->rmbs_lock);
- list_del(&rmb_desc->list);
- mutex_unlock(&lgr->rmbs_lock);
+ lock = is_rmb ? &lgr->rmbs_lock :
+ &lgr->sndbufs_lock;
+ mutex_lock(lock);
+ list_del(&buf_desc->list);
+ mutex_unlock(lock);
- smc_buf_free(lgr, true, rmb_desc);
+ smc_buf_free(lgr, is_rmb, buf_desc);
} else {
- rmb_desc->used = 0;
- memset(rmb_desc->cpu_addr, 0, rmb_desc->len);
+ buf_desc->used = 0;
+ memset(buf_desc->cpu_addr, 0, buf_desc->len);
}
}
@@ -1121,15 +1127,23 @@ static void smc_buf_unuse(struct smc_connection *conn,
struct smc_link_group *lgr)
{
if (conn->sndbuf_desc) {
- conn->sndbuf_desc->used = 0;
- memset(conn->sndbuf_desc->cpu_addr, 0, conn->sndbuf_desc->len);
+ if (!lgr->is_smcd && conn->sndbuf_desc->is_vm) {
+ smcr_buf_unuse(conn->sndbuf_desc, false, lgr);
+ } else {
+ conn->sndbuf_desc->used = 0;
+ memset(conn->sndbuf_desc->cpu_addr, 0,
+ conn->sndbuf_desc->len);
+ }
}
- if (conn->rmb_desc && lgr->is_smcd) {
- conn->rmb_desc->used = 0;
- memset(conn->rmb_desc->cpu_addr, 0, conn->rmb_desc->len +
- sizeof(struct smcd_cdc_msg));
- } else if (conn->rmb_desc) {
- smcr_buf_unuse(conn->rmb_desc, lgr);
+ if (conn->rmb_desc) {
+ if (!lgr->is_smcd) {
+ smcr_buf_unuse(conn->rmb_desc, true, lgr);
+ } else {
+ conn->rmb_desc->used = 0;
+ memset(conn->rmb_desc->cpu_addr, 0,
+ conn->rmb_desc->len +
+ sizeof(struct smcd_cdc_msg));
+ }
}
}
@@ -1177,20 +1191,21 @@ lgr_put:
static void smcr_buf_unmap_link(struct smc_buf_desc *buf_desc, bool is_rmb,
struct smc_link *lnk)
{
- if (is_rmb)
+ if (is_rmb || buf_desc->is_vm)
buf_desc->is_reg_mr[lnk->link_idx] = false;
if (!buf_desc->is_map_ib[lnk->link_idx])
return;
- if (is_rmb) {
- if (buf_desc->mr_rx[lnk->link_idx]) {
- smc_ib_put_memory_region(
- buf_desc->mr_rx[lnk->link_idx]);
- buf_desc->mr_rx[lnk->link_idx] = NULL;
- }
+
+ if ((is_rmb || buf_desc->is_vm) &&
+ buf_desc->mr[lnk->link_idx]) {
+ smc_ib_put_memory_region(buf_desc->mr[lnk->link_idx]);
+ buf_desc->mr[lnk->link_idx] = NULL;
+ }
+ if (is_rmb)
smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_FROM_DEVICE);
- } else {
+ else
smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_TO_DEVICE);
- }
+
sg_free_table(&buf_desc->sgt[lnk->link_idx]);
buf_desc->is_map_ib[lnk->link_idx] = false;
}
@@ -1279,8 +1294,10 @@ static void smcr_buf_free(struct smc_link_group *lgr, bool is_rmb,
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++)
smcr_buf_unmap_link(buf_desc, is_rmb, &lgr->lnk[i]);
- if (buf_desc->pages)
+ if (!buf_desc->is_vm && buf_desc->pages)
__free_pages(buf_desc->pages, buf_desc->order);
+ else if (buf_desc->is_vm && buf_desc->cpu_addr)
+ vfree(buf_desc->cpu_addr);
kfree(buf_desc);
}
@@ -1992,39 +2009,69 @@ static inline int smc_rmb_wnd_update_limit(int rmbe_size)
return max_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2);
}
-/* map an rmb buf to a link */
+/* map an buf to a link */
static int smcr_buf_map_link(struct smc_buf_desc *buf_desc, bool is_rmb,
struct smc_link *lnk)
{
- int rc;
+ int rc, i, nents, offset, buf_size, size, access_flags;
+ struct scatterlist *sg;
+ void *buf;
if (buf_desc->is_map_ib[lnk->link_idx])
return 0;
- rc = sg_alloc_table(&buf_desc->sgt[lnk->link_idx], 1, GFP_KERNEL);
+ if (buf_desc->is_vm) {
+ buf = buf_desc->cpu_addr;
+ buf_size = buf_desc->len;
+ offset = offset_in_page(buf_desc->cpu_addr);
+ nents = PAGE_ALIGN(buf_size + offset) / PAGE_SIZE;
+ } else {
+ nents = 1;
+ }
+
+ rc = sg_alloc_table(&buf_desc->sgt[lnk->link_idx], nents, GFP_KERNEL);
if (rc)
return rc;
- sg_set_buf(buf_desc->sgt[lnk->link_idx].sgl,
- buf_desc->cpu_addr, buf_desc->len);
+
+ if (buf_desc->is_vm) {
+ /* virtually contiguous buffer */
+ for_each_sg(buf_desc->sgt[lnk->link_idx].sgl, sg, nents, i) {
+ size = min_t(int, PAGE_SIZE - offset, buf_size);
+ sg_set_page(sg, vmalloc_to_page(buf), size, offset);
+ buf += size / sizeof(*buf);
+ buf_size -= size;
+ offset = 0;
+ }
+ } else {
+ /* physically contiguous buffer */
+ sg_set_buf(buf_desc->sgt[lnk->link_idx].sgl,
+ buf_desc->cpu_addr, buf_desc->len);
+ }
/* map sg table to DMA address */
rc = smc_ib_buf_map_sg(lnk, buf_desc,
is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
/* SMC protocol depends on mapping to one DMA address only */
- if (rc != 1) {
+ if (rc != nents) {
rc = -EAGAIN;
goto free_table;
}
- /* create a new memory region for the RMB */
- if (is_rmb) {
- rc = smc_ib_get_memory_region(lnk->roce_pd,
- IB_ACCESS_REMOTE_WRITE |
- IB_ACCESS_LOCAL_WRITE,
+ buf_desc->is_dma_need_sync |=
+ smc_ib_is_sg_need_sync(lnk, buf_desc) << lnk->link_idx;
+
+ if (is_rmb || buf_desc->is_vm) {
+ /* create a new memory region for the RMB or vzalloced sndbuf */
+ access_flags = is_rmb ?
+ IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
+ IB_ACCESS_LOCAL_WRITE;
+
+ rc = smc_ib_get_memory_region(lnk->roce_pd, access_flags,
buf_desc, lnk->link_idx);
if (rc)
goto buf_unmap;
- smc_ib_sync_sg_for_device(lnk, buf_desc, DMA_FROM_DEVICE);
+ smc_ib_sync_sg_for_device(lnk, buf_desc,
+ is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
buf_desc->is_map_ib[lnk->link_idx] = true;
return 0;
@@ -2037,20 +2084,23 @@ free_table:
return rc;
}
-/* register a new rmb on IB device,
+/* register a new buf on IB device, rmb or vzalloced sndbuf
* must be called under lgr->llc_conf_mutex lock
*/
-int smcr_link_reg_rmb(struct smc_link *link, struct smc_buf_desc *rmb_desc)
+int smcr_link_reg_buf(struct smc_link *link, struct smc_buf_desc *buf_desc)
{
if (list_empty(&link->lgr->list))
return -ENOLINK;
- if (!rmb_desc->is_reg_mr[link->link_idx]) {
- /* register memory region for new rmb */
- if (smc_wr_reg_send(link, rmb_desc->mr_rx[link->link_idx])) {
- rmb_desc->is_reg_err = true;
+ if (!buf_desc->is_reg_mr[link->link_idx]) {
+ /* register memory region for new buf */
+ if (buf_desc->is_vm)
+ buf_desc->mr[link->link_idx]->iova =
+ (uintptr_t)buf_desc->cpu_addr;
+ if (smc_wr_reg_send(link, buf_desc->mr[link->link_idx])) {
+ buf_desc->is_reg_err = true;
return -EFAULT;
}
- rmb_desc->is_reg_mr[link->link_idx] = true;
+ buf_desc->is_reg_mr[link->link_idx] = true;
}
return 0;
}
@@ -2102,18 +2152,38 @@ int smcr_buf_reg_lgr(struct smc_link *lnk)
struct smc_buf_desc *buf_desc, *bf;
int i, rc = 0;
+ /* reg all RMBs for a new link */
mutex_lock(&lgr->rmbs_lock);
for (i = 0; i < SMC_RMBE_SIZES; i++) {
list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list) {
if (!buf_desc->used)
continue;
- rc = smcr_link_reg_rmb(lnk, buf_desc);
- if (rc)
- goto out;
+ rc = smcr_link_reg_buf(lnk, buf_desc);
+ if (rc) {
+ mutex_unlock(&lgr->rmbs_lock);
+ return rc;
+ }
}
}
-out:
mutex_unlock(&lgr->rmbs_lock);
+
+ if (lgr->buf_type == SMCR_PHYS_CONT_BUFS)
+ return rc;
+
+ /* reg all vzalloced sndbufs for a new link */
+ mutex_lock(&lgr->sndbufs_lock);
+ for (i = 0; i < SMC_RMBE_SIZES; i++) {
+ list_for_each_entry_safe(buf_desc, bf, &lgr->sndbufs[i], list) {
+ if (!buf_desc->used || !buf_desc->is_vm)
+ continue;
+ rc = smcr_link_reg_buf(lnk, buf_desc);
+ if (rc) {
+ mutex_unlock(&lgr->sndbufs_lock);
+ return rc;
+ }
+ }
+ }
+ mutex_unlock(&lgr->sndbufs_lock);
return rc;
}
@@ -2127,18 +2197,39 @@ static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr,
if (!buf_desc)
return ERR_PTR(-ENOMEM);
- buf_desc->order = get_order(bufsize);
- buf_desc->pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN |
- __GFP_NOMEMALLOC | __GFP_COMP |
- __GFP_NORETRY | __GFP_ZERO,
- buf_desc->order);
- if (!buf_desc->pages) {
- kfree(buf_desc);
- return ERR_PTR(-EAGAIN);
- }
- buf_desc->cpu_addr = (void *)page_address(buf_desc->pages);
- buf_desc->len = bufsize;
+ switch (lgr->buf_type) {
+ case SMCR_PHYS_CONT_BUFS:
+ case SMCR_MIXED_BUFS:
+ buf_desc->order = get_order(bufsize);
+ buf_desc->pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN |
+ __GFP_NOMEMALLOC | __GFP_COMP |
+ __GFP_NORETRY | __GFP_ZERO,
+ buf_desc->order);
+ if (buf_desc->pages) {
+ buf_desc->cpu_addr =
+ (void *)page_address(buf_desc->pages);
+ buf_desc->len = bufsize;
+ buf_desc->is_vm = false;
+ break;
+ }
+ if (lgr->buf_type == SMCR_PHYS_CONT_BUFS)
+ goto out;
+ fallthrough; // try virtually continguous buf
+ case SMCR_VIRT_CONT_BUFS:
+ buf_desc->order = get_order(bufsize);
+ buf_desc->cpu_addr = vzalloc(PAGE_SIZE << buf_desc->order);
+ if (!buf_desc->cpu_addr)
+ goto out;
+ buf_desc->pages = NULL;
+ buf_desc->len = bufsize;
+ buf_desc->is_vm = true;
+ break;
+ }
return buf_desc;
+
+out:
+ kfree(buf_desc);
+ return ERR_PTR(-EAGAIN);
}
/* map buf_desc on all usable links,
@@ -2234,6 +2325,7 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
/* check for reusable slot in the link group */
buf_desc = smc_buf_get_slot(bufsize_short, lock, buf_list);
if (buf_desc) {
+ buf_desc->is_dma_need_sync = 0;
SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, bufsize);
SMC_STAT_BUF_REUSE(smc, is_smcd, is_rmb);
break; /* found reusable slot */
@@ -2268,7 +2360,7 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
if (!is_smcd) {
if (smcr_buf_map_usable_links(lgr, buf_desc, is_rmb)) {
- smcr_buf_unuse(buf_desc, lgr);
+ smcr_buf_unuse(buf_desc, is_rmb, lgr);
return -ENOMEM;
}
}
@@ -2290,16 +2382,10 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
return 0;
}
-void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn)
-{
- if (!smc_conn_lgr_valid(conn) || conn->lgr->is_smcd ||
- !smc_link_active(conn->lnk))
- return;
- smc_ib_sync_sg_for_cpu(conn->lnk, conn->sndbuf_desc, DMA_TO_DEVICE);
-}
-
void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn)
{
+ if (!conn->sndbuf_desc->is_dma_need_sync)
+ return;
if (!smc_conn_lgr_valid(conn) || conn->lgr->is_smcd ||
!smc_link_active(conn->lnk))
return;
@@ -2310,6 +2396,8 @@ void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn)
{
int i;
+ if (!conn->rmb_desc->is_dma_need_sync)
+ return;
if (!smc_conn_lgr_valid(conn) || conn->lgr->is_smcd)
return;
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
@@ -2320,20 +2408,6 @@ void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn)
}
}
-void smc_rmb_sync_sg_for_device(struct smc_connection *conn)
-{
- int i;
-
- if (!smc_conn_lgr_valid(conn) || conn->lgr->is_smcd)
- return;
- for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
- if (!smc_link_active(&conn->lgr->lnk[i]))
- continue;
- smc_ib_sync_sg_for_device(&conn->lgr->lnk[i], conn->rmb_desc,
- DMA_FROM_DEVICE);
- }
-}
-
/* create the send and receive buffer for an SMC socket;
* receive buffers are called RMBs;
* (even though the SMC protocol allows more than one RMB-element per RMB,
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
index 4cb03e942364..fe8b524ad846 100644
--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -168,9 +168,11 @@ struct smc_buf_desc {
struct { /* SMC-R */
struct sg_table sgt[SMC_LINKS_PER_LGR_MAX];
/* virtual buffer */
- struct ib_mr *mr_rx[SMC_LINKS_PER_LGR_MAX];
- /* for rmb only: memory region
+ struct ib_mr *mr[SMC_LINKS_PER_LGR_MAX];
+ /* memory region: for rmb and
+ * vzalloced sndbuf
* incl. rkey provided to peer
+ * and lkey provided to local
*/
u32 order; /* allocation order */
@@ -180,8 +182,11 @@ struct smc_buf_desc {
/* mem region registered */
u8 is_map_ib[SMC_LINKS_PER_LGR_MAX];
/* mem region mapped to lnk */
+ u8 is_dma_need_sync;
u8 is_reg_err;
/* buffer registration err */
+ u8 is_vm;
+ /* virtually contiguous */
};
struct { /* SMC-D */
unsigned short sba_idx;
@@ -216,6 +221,12 @@ enum smc_lgr_type { /* redundancy state of lgr */
SMC_LGR_ASYMMETRIC_LOCAL, /* local has 1, peer 2 active RNICs */
};
+enum smcr_buf_type { /* types of SMC-R sndbufs and RMBs */
+ SMCR_PHYS_CONT_BUFS = 0,
+ SMCR_VIRT_CONT_BUFS = 1,
+ SMCR_MIXED_BUFS = 2,
+};
+
enum smc_llc_flowtype {
SMC_LLC_FLOW_NONE = 0,
SMC_LLC_FLOW_ADD_LINK = 2,
@@ -277,6 +288,7 @@ struct smc_link_group {
/* used rtoken elements */
u8 next_link_id;
enum smc_lgr_type type;
+ enum smcr_buf_type buf_type;
/* redundancy state */
u8 pnet_id[SMC_MAX_PNETID_LEN + 1];
/* pnet id of this lgr */
@@ -513,10 +525,8 @@ void smc_rtoken_set(struct smc_link_group *lgr, int link_idx, int link_idx_new,
__be32 nw_rkey_known, __be64 nw_vaddr, __be32 nw_rkey);
void smc_rtoken_set2(struct smc_link_group *lgr, int rtok_idx, int link_id,
__be64 nw_vaddr, __be32 nw_rkey);
-void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn);
void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn);
void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn);
-void smc_rmb_sync_sg_for_device(struct smc_connection *conn);
int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini);
void smc_conn_free(struct smc_connection *conn);
@@ -537,7 +547,7 @@ int smcr_buf_reg_lgr(struct smc_link *lnk);
void smcr_lgr_set_type(struct smc_link_group *lgr, enum smc_lgr_type new_type);
void smcr_lgr_set_type_asym(struct smc_link_group *lgr,
enum smc_lgr_type new_type, int asym_lnk_idx);
-int smcr_link_reg_rmb(struct smc_link *link, struct smc_buf_desc *rmb_desc);
+int smcr_link_reg_buf(struct smc_link *link, struct smc_buf_desc *rmb_desc);
struct smc_link *smc_switch_conns(struct smc_link_group *lgr,
struct smc_link *from_lnk, bool is_dev_err);
void smcr_link_down_cond(struct smc_link *lnk);
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
index dcda4165d107..854772dd52fd 100644
--- a/net/smc/smc_ib.c
+++ b/net/smc/smc_ib.c
@@ -698,7 +698,7 @@ static int smc_ib_map_mr_sg(struct smc_buf_desc *buf_slot, u8 link_idx)
int sg_num;
/* map the largest prefix of a dma mapped SG list */
- sg_num = ib_map_mr_sg(buf_slot->mr_rx[link_idx],
+ sg_num = ib_map_mr_sg(buf_slot->mr[link_idx],
buf_slot->sgt[link_idx].sgl,
buf_slot->sgt[link_idx].orig_nents,
&offset, PAGE_SIZE);
@@ -710,25 +710,49 @@ static int smc_ib_map_mr_sg(struct smc_buf_desc *buf_slot, u8 link_idx)
int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
struct smc_buf_desc *buf_slot, u8 link_idx)
{
- if (buf_slot->mr_rx[link_idx])
+ if (buf_slot->mr[link_idx])
return 0; /* already done */
- buf_slot->mr_rx[link_idx] =
+ buf_slot->mr[link_idx] =
ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, 1 << buf_slot->order);
- if (IS_ERR(buf_slot->mr_rx[link_idx])) {
+ if (IS_ERR(buf_slot->mr[link_idx])) {
int rc;
- rc = PTR_ERR(buf_slot->mr_rx[link_idx]);
- buf_slot->mr_rx[link_idx] = NULL;
+ rc = PTR_ERR(buf_slot->mr[link_idx]);
+ buf_slot->mr[link_idx] = NULL;
return rc;
}
- if (smc_ib_map_mr_sg(buf_slot, link_idx) != 1)
+ if (smc_ib_map_mr_sg(buf_slot, link_idx) !=
+ buf_slot->sgt[link_idx].orig_nents)
return -EINVAL;
return 0;
}
+bool smc_ib_is_sg_need_sync(struct smc_link *lnk,
+ struct smc_buf_desc *buf_slot)
+{
+ struct scatterlist *sg;
+ unsigned int i;
+ bool ret = false;
+
+ /* for now there is just one DMA address */
+ for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg,
+ buf_slot->sgt[lnk->link_idx].nents, i) {
+ if (!sg_dma_len(sg))
+ break;
+ if (dma_need_sync(lnk->smcibdev->ibdev->dma_device,
+ sg_dma_address(sg))) {
+ ret = true;
+ goto out;
+ }
+ }
+
+out:
+ return ret;
+}
+
/* synchronize buffer usage for cpu access */
void smc_ib_sync_sg_for_cpu(struct smc_link *lnk,
struct smc_buf_desc *buf_slot,
@@ -737,6 +761,9 @@ void smc_ib_sync_sg_for_cpu(struct smc_link *lnk,
struct scatterlist *sg;
unsigned int i;
+ if (!(buf_slot->is_dma_need_sync & (1U << lnk->link_idx)))
+ return;
+
/* for now there is just one DMA address */
for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg,
buf_slot->sgt[lnk->link_idx].nents, i) {
@@ -757,6 +784,9 @@ void smc_ib_sync_sg_for_device(struct smc_link *lnk,
struct scatterlist *sg;
unsigned int i;
+ if (!(buf_slot->is_dma_need_sync & (1U << lnk->link_idx)))
+ return;
+
/* for now there is just one DMA address */
for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg,
buf_slot->sgt[lnk->link_idx].nents, i) {
diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h
index 5d8b49c57f50..034295676e88 100644
--- a/net/smc/smc_ib.h
+++ b/net/smc/smc_ib.h
@@ -102,6 +102,8 @@ long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev);
int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
struct smc_buf_desc *buf_slot, u8 link_idx);
void smc_ib_put_memory_region(struct ib_mr *mr);
+bool smc_ib_is_sg_need_sync(struct smc_link *lnk,
+ struct smc_buf_desc *buf_slot);
void smc_ib_sync_sg_for_cpu(struct smc_link *lnk,
struct smc_buf_desc *buf_slot,
enum dma_data_direction data_direction);
diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
index 0bde36b56472..175026ae33ae 100644
--- a/net/smc/smc_llc.c
+++ b/net/smc/smc_llc.c
@@ -505,19 +505,22 @@ static int smc_llc_send_confirm_rkey(struct smc_link *send_link,
if (smc_link_active(link) && link != send_link) {
rkeyllc->rtoken[rtok_ix].link_id = link->link_id;
rkeyllc->rtoken[rtok_ix].rmb_key =
- htonl(rmb_desc->mr_rx[link->link_idx]->rkey);
- rkeyllc->rtoken[rtok_ix].rmb_vaddr = cpu_to_be64(
- (u64)sg_dma_address(
- rmb_desc->sgt[link->link_idx].sgl));
+ htonl(rmb_desc->mr[link->link_idx]->rkey);
+ rkeyllc->rtoken[rtok_ix].rmb_vaddr = rmb_desc->is_vm ?
+ cpu_to_be64((uintptr_t)rmb_desc->cpu_addr) :
+ cpu_to_be64((u64)sg_dma_address
+ (rmb_desc->sgt[link->link_idx].sgl));
rtok_ix++;
}
}
/* rkey of send_link is in rtoken[0] */
rkeyllc->rtoken[0].num_rkeys = rtok_ix - 1;
rkeyllc->rtoken[0].rmb_key =
- htonl(rmb_desc->mr_rx[send_link->link_idx]->rkey);
- rkeyllc->rtoken[0].rmb_vaddr = cpu_to_be64(
- (u64)sg_dma_address(rmb_desc->sgt[send_link->link_idx].sgl));
+ htonl(rmb_desc->mr[send_link->link_idx]->rkey);
+ rkeyllc->rtoken[0].rmb_vaddr = rmb_desc->is_vm ?
+ cpu_to_be64((uintptr_t)rmb_desc->cpu_addr) :
+ cpu_to_be64((u64)sg_dma_address
+ (rmb_desc->sgt[send_link->link_idx].sgl));
/* send llc message */
rc = smc_wr_tx_send(send_link, pend);
put_out:
@@ -544,7 +547,7 @@ static int smc_llc_send_delete_rkey(struct smc_link *link,
rkeyllc->hd.common.llc_type = SMC_LLC_DELETE_RKEY;
smc_llc_init_msg_hdr(&rkeyllc->hd, link->lgr, sizeof(*rkeyllc));
rkeyllc->num_rkeys = 1;
- rkeyllc->rkey[0] = htonl(rmb_desc->mr_rx[link->link_idx]->rkey);
+ rkeyllc->rkey[0] = htonl(rmb_desc->mr[link->link_idx]->rkey);
/* send llc message */
rc = smc_wr_tx_send(link, pend);
put_out:
@@ -614,9 +617,10 @@ static int smc_llc_fill_ext_v2(struct smc_llc_msg_add_link_v2_ext *ext,
if (!buf_pos)
break;
rmb = buf_pos;
- ext->rt[i].rmb_key = htonl(rmb->mr_rx[prim_lnk_idx]->rkey);
- ext->rt[i].rmb_key_new = htonl(rmb->mr_rx[lnk_idx]->rkey);
- ext->rt[i].rmb_vaddr_new =
+ ext->rt[i].rmb_key = htonl(rmb->mr[prim_lnk_idx]->rkey);
+ ext->rt[i].rmb_key_new = htonl(rmb->mr[lnk_idx]->rkey);
+ ext->rt[i].rmb_vaddr_new = rmb->is_vm ?
+ cpu_to_be64((uintptr_t)rmb->cpu_addr) :
cpu_to_be64((u64)sg_dma_address(rmb->sgt[lnk_idx].sgl));
buf_pos = smc_llc_get_next_rmb(lgr, &buf_lst, buf_pos);
while (buf_pos && !(buf_pos)->used)
@@ -852,9 +856,10 @@ static int smc_llc_add_link_cont(struct smc_link *link,
}
rmb = *buf_pos;
- addc_llc->rt[i].rmb_key = htonl(rmb->mr_rx[prim_lnk_idx]->rkey);
- addc_llc->rt[i].rmb_key_new = htonl(rmb->mr_rx[lnk_idx]->rkey);
- addc_llc->rt[i].rmb_vaddr_new =
+ addc_llc->rt[i].rmb_key = htonl(rmb->mr[prim_lnk_idx]->rkey);
+ addc_llc->rt[i].rmb_key_new = htonl(rmb->mr[lnk_idx]->rkey);
+ addc_llc->rt[i].rmb_vaddr_new = rmb->is_vm ?
+ cpu_to_be64((uintptr_t)rmb->cpu_addr) :
cpu_to_be64((u64)sg_dma_address(rmb->sgt[lnk_idx].sgl));
(*num_rkeys_todo)--;
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
index 7055ed10e316..4c3bf6db7038 100644
--- a/net/smc/smc_pnet.c
+++ b/net/smc/smc_pnet.c
@@ -120,7 +120,8 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name)
smc_pnet_match(pnetelem->pnet_name, pnet_name)) {
list_del(&pnetelem->list);
if (pnetelem->type == SMC_PNET_ETH && pnetelem->ndev) {
- dev_put_track(pnetelem->ndev, &pnetelem->dev_tracker);
+ netdev_put(pnetelem->ndev,
+ &pnetelem->dev_tracker);
pr_warn_ratelimited("smc: net device %s "
"erased user defined "
"pnetid %.16s\n",
@@ -196,7 +197,7 @@ static int smc_pnet_add_by_ndev(struct net_device *ndev)
list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) {
if (pnetelem->type == SMC_PNET_ETH && !pnetelem->ndev &&
!strncmp(pnetelem->eth_name, ndev->name, IFNAMSIZ)) {
- dev_hold_track(ndev, &pnetelem->dev_tracker, GFP_ATOMIC);
+ netdev_hold(ndev, &pnetelem->dev_tracker, GFP_ATOMIC);
pnetelem->ndev = ndev;
rc = 0;
pr_warn_ratelimited("smc: adding net device %s with "
@@ -227,7 +228,7 @@ static int smc_pnet_remove_by_ndev(struct net_device *ndev)
mutex_lock(&pnettable->lock);
list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) {
if (pnetelem->type == SMC_PNET_ETH && pnetelem->ndev == ndev) {
- dev_put_track(pnetelem->ndev, &pnetelem->dev_tracker);
+ netdev_put(pnetelem->ndev, &pnetelem->dev_tracker);
pnetelem->ndev = NULL;
rc = 0;
pr_warn_ratelimited("smc: removing net device %s with "
diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c
index 338b9ef806e8..17c5aee7ee4f 100644
--- a/net/smc/smc_rx.c
+++ b/net/smc/smc_rx.c
@@ -145,35 +145,93 @@ static void smc_rx_spd_release(struct splice_pipe_desc *spd,
static int smc_rx_splice(struct pipe_inode_info *pipe, char *src, size_t len,
struct smc_sock *smc)
{
+ struct smc_link_group *lgr = smc->conn.lgr;
+ int offset = offset_in_page(src);
+ struct partial_page *partial;
struct splice_pipe_desc spd;
- struct partial_page partial;
- struct smc_spd_priv *priv;
- int bytes;
+ struct smc_spd_priv **priv;
+ struct page **pages;
+ int bytes, nr_pages;
+ int i;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ nr_pages = !lgr->is_smcd && smc->conn.rmb_desc->is_vm ?
+ PAGE_ALIGN(len + offset) / PAGE_SIZE : 1;
+
+ pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
+ if (!pages)
+ goto out;
+ partial = kcalloc(nr_pages, sizeof(*partial), GFP_KERNEL);
+ if (!partial)
+ goto out_page;
+ priv = kcalloc(nr_pages, sizeof(*priv), GFP_KERNEL);
if (!priv)
- return -ENOMEM;
- priv->len = len;
- priv->smc = smc;
- partial.offset = src - (char *)smc->conn.rmb_desc->cpu_addr;
- partial.len = len;
- partial.private = (unsigned long)priv;
-
- spd.nr_pages_max = 1;
- spd.nr_pages = 1;
- spd.pages = &smc->conn.rmb_desc->pages;
- spd.partial = &partial;
+ goto out_part;
+ for (i = 0; i < nr_pages; i++) {
+ priv[i] = kzalloc(sizeof(**priv), GFP_KERNEL);
+ if (!priv[i])
+ goto out_priv;
+ }
+
+ if (lgr->is_smcd ||
+ (!lgr->is_smcd && !smc->conn.rmb_desc->is_vm)) {
+ /* smcd or smcr that uses physically contiguous RMBs */
+ priv[0]->len = len;
+ priv[0]->smc = smc;
+ partial[0].offset = src - (char *)smc->conn.rmb_desc->cpu_addr;
+ partial[0].len = len;
+ partial[0].private = (unsigned long)priv[0];
+ pages[0] = smc->conn.rmb_desc->pages;
+ } else {
+ int size, left = len;
+ void *buf = src;
+ /* smcr that uses virtually contiguous RMBs*/
+ for (i = 0; i < nr_pages; i++) {
+ size = min_t(int, PAGE_SIZE - offset, left);
+ priv[i]->len = size;
+ priv[i]->smc = smc;
+ pages[i] = vmalloc_to_page(buf);
+ partial[i].offset = offset;
+ partial[i].len = size;
+ partial[i].private = (unsigned long)priv[i];
+ buf += size / sizeof(*buf);
+ left -= size;
+ offset = 0;
+ }
+ }
+ spd.nr_pages_max = nr_pages;
+ spd.nr_pages = nr_pages;
+ spd.pages = pages;
+ spd.partial = partial;
spd.ops = &smc_pipe_ops;
spd.spd_release = smc_rx_spd_release;
bytes = splice_to_pipe(pipe, &spd);
if (bytes > 0) {
sock_hold(&smc->sk);
- get_page(smc->conn.rmb_desc->pages);
+ if (!lgr->is_smcd && smc->conn.rmb_desc->is_vm) {
+ for (i = 0; i < PAGE_ALIGN(bytes + offset) / PAGE_SIZE; i++)
+ get_page(pages[i]);
+ } else {
+ get_page(smc->conn.rmb_desc->pages);
+ }
atomic_add(bytes, &smc->conn.splice_pending);
}
+ kfree(priv);
+ kfree(partial);
+ kfree(pages);
return bytes;
+
+out_priv:
+ for (i = (i - 1); i >= 0; i--)
+ kfree(priv[i]);
+ kfree(priv);
+out_part:
+ kfree(partial);
+out_page:
+ kfree(pages);
+out:
+ return -ENOMEM;
}
static int smc_rx_data_available_and_no_splice_pend(struct smc_connection *conn)
@@ -413,7 +471,6 @@ copy:
if (rc < 0) {
if (!read_done)
read_done = -EFAULT;
- smc_rmb_sync_sg_for_device(conn);
goto out;
}
}
@@ -427,7 +484,6 @@ copy:
chunk_len_sum += chunk_len;
chunk_off = 0; /* modulo offset in recv ring buffer */
}
- smc_rmb_sync_sg_for_device(conn);
/* update cursors */
if (!(flags & MSG_PEEK)) {
diff --git a/net/smc/smc_sysctl.c b/net/smc/smc_sysctl.c
index cf3ab1334c00..0613868fdb97 100644
--- a/net/smc/smc_sysctl.c
+++ b/net/smc/smc_sysctl.c
@@ -15,6 +15,7 @@
#include <net/net_namespace.h>
#include "smc.h"
+#include "smc_core.h"
#include "smc_sysctl.h"
static struct ctl_table smc_table[] = {
@@ -25,6 +26,15 @@ static struct ctl_table smc_table[] = {
.mode = 0644,
.proc_handler = proc_douintvec,
},
+ {
+ .procname = "smcr_buf_type",
+ .data = &init_net.smc.sysctl_smcr_buf_type,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_douintvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_TWO,
+ },
{ }
};
@@ -49,6 +59,7 @@ int __net_init smc_sysctl_net_init(struct net *net)
goto err_reg;
net->smc.sysctl_autocorking_size = SMC_AUTOCORKING_DEFAULT_SIZE;
+ net->smc.sysctl_smcr_buf_type = SMCR_PHYS_CONT_BUFS;
return 0;
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
index 805a546e8c04..4e8377657a62 100644
--- a/net/smc/smc_tx.c
+++ b/net/smc/smc_tx.c
@@ -246,7 +246,6 @@ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len)
tx_cnt_prep);
chunk_len_sum = chunk_len;
chunk_off = tx_cnt_prep;
- smc_sndbuf_sync_sg_for_cpu(conn);
for (chunk = 0; chunk < 2; chunk++) {
rc = memcpy_from_msg(sndbuf_base + chunk_off,
msg, chunk_len);
@@ -384,6 +383,7 @@ static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len,
dma_addr_t dma_addr =
sg_dma_address(conn->sndbuf_desc->sgt[link->link_idx].sgl);
+ u64 virt_addr = (uintptr_t)conn->sndbuf_desc->cpu_addr;
int src_len_sum = src_len, dst_len_sum = dst_len;
int sent_count = src_off;
int srcchunk, dstchunk;
@@ -396,7 +396,7 @@ static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len,
u64 base_addr = dma_addr;
if (dst_len < link->qp_attr.cap.max_inline_data) {
- base_addr = (uintptr_t)conn->sndbuf_desc->cpu_addr;
+ base_addr = virt_addr;
wr->wr.send_flags |= IB_SEND_INLINE;
} else {
wr->wr.send_flags &= ~IB_SEND_INLINE;
@@ -404,8 +404,12 @@ static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len,
num_sges = 0;
for (srcchunk = 0; srcchunk < 2; srcchunk++) {
- sge[srcchunk].addr = base_addr + src_off;
+ sge[srcchunk].addr = conn->sndbuf_desc->is_vm ?
+ (virt_addr + src_off) : (base_addr + src_off);
sge[srcchunk].length = src_len;
+ if (conn->sndbuf_desc->is_vm)
+ sge[srcchunk].lkey =
+ conn->sndbuf_desc->mr[link->link_idx]->lkey;
num_sges++;
src_off += src_len;
diff --git a/net/socket.c b/net/socket.c
index 96300cdc0625..b6bd4cf44d3f 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1878,10 +1878,8 @@ out_fd:
return ERR_PTR(err);
}
-int __sys_accept4_file(struct file *file, unsigned file_flags,
- struct sockaddr __user *upeer_sockaddr,
- int __user *upeer_addrlen, int flags,
- unsigned long nofile)
+static int __sys_accept4_file(struct file *file, struct sockaddr __user *upeer_sockaddr,
+ int __user *upeer_addrlen, int flags)
{
struct file *newfile;
int newfd;
@@ -1892,11 +1890,11 @@ int __sys_accept4_file(struct file *file, unsigned file_flags,
if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
- newfd = __get_unused_fd_flags(flags, nofile);
+ newfd = get_unused_fd_flags(flags);
if (unlikely(newfd < 0))
return newfd;
- newfile = do_accept(file, file_flags, upeer_sockaddr, upeer_addrlen,
+ newfile = do_accept(file, 0, upeer_sockaddr, upeer_addrlen,
flags);
if (IS_ERR(newfile)) {
put_unused_fd(newfd);
@@ -1926,9 +1924,8 @@ int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
f = fdget(fd);
if (f.file) {
- ret = __sys_accept4_file(f.file, 0, upeer_sockaddr,
- upeer_addrlen, flags,
- rlimit(RLIMIT_NOFILE));
+ ret = __sys_accept4_file(f.file, upeer_sockaddr,
+ upeer_addrlen, flags);
fdput(f);
}
@@ -2106,6 +2103,7 @@ int __sys_sendto(int fd, void __user *buff, size_t len, unsigned int flags,
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_namelen = 0;
+ msg.msg_ubuf = NULL;
if (addr) {
err = move_addr_to_kernel(addr, addr_len, &address);
if (err < 0)
@@ -2405,6 +2403,7 @@ int __copy_msghdr_from_user(struct msghdr *kmsg,
return -EMSGSIZE;
kmsg->msg_iocb = NULL;
+ kmsg->msg_ubuf = NULL;
*uiov = msg.msg_iov;
*nsegs = msg.msg_iovlen;
return 0;
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
index 1a72c67afed5..8299ceb3e373 100644
--- a/net/strparser/strparser.c
+++ b/net/strparser/strparser.c
@@ -533,6 +533,9 @@ EXPORT_SYMBOL_GPL(strp_check_rcv);
static int __init strp_dev_init(void)
{
+ BUILD_BUG_ON(sizeof(struct sk_skb_cb) >
+ sizeof_field(struct sk_buff, cb));
+
strp_wq = create_singlethread_workqueue("kstrp");
if (unlikely(!strp_wq))
return -ENOMEM;
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index 474f76383033..8cc42aea19c7 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -64,7 +64,7 @@ void switchdev_deferred_process(void)
while ((dfitem = switchdev_deferred_dequeue())) {
dfitem->func(dfitem->dev, dfitem->data);
- dev_put_track(dfitem->dev, &dfitem->dev_tracker);
+ netdev_put(dfitem->dev, &dfitem->dev_tracker);
kfree(dfitem);
}
}
@@ -91,7 +91,7 @@ static int switchdev_deferred_enqueue(struct net_device *dev,
dfitem->dev = dev;
dfitem->func = func;
memcpy(dfitem->data, data, data_len);
- dev_hold_track(dev, &dfitem->dev_tracker, GFP_ATOMIC);
+ netdev_hold(dev, &dfitem->dev_tracker, GFP_ATOMIC);
spin_lock_bh(&deferred_lock);
list_add_tail(&dfitem->list, &deferred);
spin_unlock_bh(&deferred_lock);
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 932c87b98eca..35cac7733fd3 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -788,7 +788,7 @@ int tipc_attach_loopback(struct net *net)
if (!dev)
return -ENODEV;
- dev_hold_track(dev, &tn->loopback_pt.dev_tracker, GFP_KERNEL);
+ netdev_hold(dev, &tn->loopback_pt.dev_tracker, GFP_KERNEL);
tn->loopback_pt.dev = dev;
tn->loopback_pt.type = htons(ETH_P_TIPC);
tn->loopback_pt.func = tipc_loopback_rcv_pkt;
@@ -801,7 +801,7 @@ void tipc_detach_loopback(struct net *net)
struct tipc_net *tn = tipc_net(net);
dev_remove_pack(&tn->loopback_pt);
- dev_put_track(net->loopback_dev, &tn->loopback_pt.dev_tracker);
+ netdev_put(net->loopback_dev, &tn->loopback_pt.dev_tracker);
}
/* Caller should hold rtnl_lock to protect the bearer */
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 1d8ba233d047..d1180370fdf4 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -1202,14 +1202,3 @@ void tipc_dest_list_purge(struct list_head *l)
kfree(dst);
}
}
-
-int tipc_dest_list_len(struct list_head *l)
-{
- struct tipc_dest *dst;
- int i = 0;
-
- list_for_each_entry(dst, l, list) {
- i++;
- }
- return i;
-}
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
index 259f95e3d99c..3bcd9ef8cee3 100644
--- a/net/tipc/name_table.h
+++ b/net/tipc/name_table.h
@@ -151,6 +151,5 @@ bool tipc_dest_push(struct list_head *l, u32 node, u32 port);
bool tipc_dest_pop(struct list_head *l, u32 *node, u32 *port);
bool tipc_dest_del(struct list_head *l, u32 node, u32 port);
void tipc_dest_list_purge(struct list_head *l);
-int tipc_dest_list_len(struct list_head *l);
#endif
diff --git a/net/tls/Makefile b/net/tls/Makefile
index f1ffbfe8968d..e41c800489ac 100644
--- a/net/tls/Makefile
+++ b/net/tls/Makefile
@@ -7,7 +7,7 @@ CFLAGS_trace.o := -I$(src)
obj-$(CONFIG_TLS) += tls.o
-tls-y := tls_main.o tls_sw.o tls_proc.o trace.o
+tls-y := tls_main.o tls_sw.o tls_proc.o trace.o tls_strp.o
tls-$(CONFIG_TLS_TOE) += tls_toe.o
tls-$(CONFIG_TLS_DEVICE) += tls_device.o tls_device_fallback.o
diff --git a/net/tls/tls.h b/net/tls/tls.h
new file mode 100644
index 000000000000..3740740504e3
--- /dev/null
+++ b/net/tls/tls.h
@@ -0,0 +1,298 @@
+/*
+ * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _TLS_INT_H
+#define _TLS_INT_H
+
+#include <asm/byteorder.h>
+#include <linux/types.h>
+#include <linux/skmsg.h>
+#include <net/tls.h>
+
+#define TLS_PAGE_ORDER (min_t(unsigned int, PAGE_ALLOC_COSTLY_ORDER, \
+ TLS_MAX_PAYLOAD_SIZE >> PAGE_SHIFT))
+
+#define __TLS_INC_STATS(net, field) \
+ __SNMP_INC_STATS((net)->mib.tls_statistics, field)
+#define TLS_INC_STATS(net, field) \
+ SNMP_INC_STATS((net)->mib.tls_statistics, field)
+#define TLS_DEC_STATS(net, field) \
+ SNMP_DEC_STATS((net)->mib.tls_statistics, field)
+
+/* TLS records are maintained in 'struct tls_rec'. It stores the memory pages
+ * allocated or mapped for each TLS record. After encryption, the records are
+ * stores in a linked list.
+ */
+struct tls_rec {
+ struct list_head list;
+ int tx_ready;
+ int tx_flags;
+
+ struct sk_msg msg_plaintext;
+ struct sk_msg msg_encrypted;
+
+ /* AAD | msg_plaintext.sg.data | sg_tag */
+ struct scatterlist sg_aead_in[2];
+ /* AAD | msg_encrypted.sg.data (data contains overhead for hdr & iv & tag) */
+ struct scatterlist sg_aead_out[2];
+
+ char content_type;
+ struct scatterlist sg_content_type;
+
+ char aad_space[TLS_AAD_SPACE_SIZE];
+ u8 iv_data[MAX_IV_SIZE];
+ struct aead_request aead_req;
+ u8 aead_req_ctx[];
+};
+
+int __net_init tls_proc_init(struct net *net);
+void __net_exit tls_proc_fini(struct net *net);
+
+struct tls_context *tls_ctx_create(struct sock *sk);
+void tls_ctx_free(struct sock *sk, struct tls_context *ctx);
+void update_sk_prot(struct sock *sk, struct tls_context *ctx);
+
+int wait_on_pending_writer(struct sock *sk, long *timeo);
+int tls_sk_query(struct sock *sk, int optname, char __user *optval,
+ int __user *optlen);
+int tls_sk_attach(struct sock *sk, int optname, char __user *optval,
+ unsigned int optlen);
+void tls_err_abort(struct sock *sk, int err);
+
+int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
+void tls_update_rx_zc_capable(struct tls_context *tls_ctx);
+void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx);
+void tls_sw_strparser_done(struct tls_context *tls_ctx);
+int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
+int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
+ int offset, size_t size, int flags);
+int tls_sw_sendpage(struct sock *sk, struct page *page,
+ int offset, size_t size, int flags);
+void tls_sw_cancel_work_tx(struct tls_context *tls_ctx);
+void tls_sw_release_resources_tx(struct sock *sk);
+void tls_sw_free_ctx_tx(struct tls_context *tls_ctx);
+void tls_sw_free_resources_rx(struct sock *sk);
+void tls_sw_release_resources_rx(struct sock *sk);
+void tls_sw_free_ctx_rx(struct tls_context *tls_ctx);
+int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ int flags, int *addr_len);
+bool tls_sw_sock_is_readable(struct sock *sk);
+ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
+ struct pipe_inode_info *pipe,
+ size_t len, unsigned int flags);
+
+int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
+int tls_device_sendpage(struct sock *sk, struct page *page,
+ int offset, size_t size, int flags);
+int tls_tx_records(struct sock *sk, int flags);
+
+void tls_sw_write_space(struct sock *sk, struct tls_context *ctx);
+void tls_device_write_space(struct sock *sk, struct tls_context *ctx);
+
+int tls_process_cmsg(struct sock *sk, struct msghdr *msg,
+ unsigned char *record_type);
+int decrypt_skb(struct sock *sk, struct scatterlist *sgout);
+
+int tls_sw_fallback_init(struct sock *sk,
+ struct tls_offload_context_tx *offload_ctx,
+ struct tls_crypto_info *crypto_info);
+
+int tls_strp_msg_hold(struct sock *sk, struct sk_buff *skb,
+ struct sk_buff_head *dst);
+
+static inline struct tls_msg *tls_msg(struct sk_buff *skb)
+{
+ struct sk_skb_cb *scb = (struct sk_skb_cb *)skb->cb;
+
+ return &scb->tls;
+}
+
+static inline struct sk_buff *tls_strp_msg(struct tls_sw_context_rx *ctx)
+{
+ return ctx->recv_pkt;
+}
+
+#ifdef CONFIG_TLS_DEVICE
+int tls_device_init(void);
+void tls_device_cleanup(void);
+int tls_set_device_offload(struct sock *sk, struct tls_context *ctx);
+void tls_device_free_resources_tx(struct sock *sk);
+int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx);
+void tls_device_offload_cleanup_rx(struct sock *sk);
+void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq);
+int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx);
+#else
+static inline int tls_device_init(void) { return 0; }
+static inline void tls_device_cleanup(void) {}
+
+static inline int
+tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void tls_device_free_resources_tx(struct sock *sk) {}
+
+static inline int
+tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void tls_device_offload_cleanup_rx(struct sock *sk) {}
+static inline void
+tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) {}
+
+static inline int
+tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx)
+{
+ return 0;
+}
+#endif
+
+int tls_push_sg(struct sock *sk, struct tls_context *ctx,
+ struct scatterlist *sg, u16 first_offset,
+ int flags);
+int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
+ int flags);
+void tls_free_partial_record(struct sock *sk, struct tls_context *ctx);
+
+static inline bool tls_is_partially_sent_record(struct tls_context *ctx)
+{
+ return !!ctx->partially_sent_record;
+}
+
+static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
+{
+ return tls_ctx->pending_open_record_frags;
+}
+
+static inline bool tls_bigint_increment(unsigned char *seq, int len)
+{
+ int i;
+
+ for (i = len - 1; i >= 0; i--) {
+ ++seq[i];
+ if (seq[i] != 0)
+ break;
+ }
+
+ return (i == -1);
+}
+
+static inline void tls_bigint_subtract(unsigned char *seq, int n)
+{
+ u64 rcd_sn;
+ __be64 *p;
+
+ BUILD_BUG_ON(TLS_MAX_REC_SEQ_SIZE != 8);
+
+ p = (__be64 *)seq;
+ rcd_sn = be64_to_cpu(*p);
+ *p = cpu_to_be64(rcd_sn - n);
+}
+
+static inline void
+tls_advance_record_sn(struct sock *sk, struct tls_prot_info *prot,
+ struct cipher_context *ctx)
+{
+ if (tls_bigint_increment(ctx->rec_seq, prot->rec_seq_size))
+ tls_err_abort(sk, -EBADMSG);
+
+ if (prot->version != TLS_1_3_VERSION &&
+ prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305)
+ tls_bigint_increment(ctx->iv + prot->salt_size,
+ prot->iv_size);
+}
+
+static inline void
+tls_xor_iv_with_seq(struct tls_prot_info *prot, char *iv, char *seq)
+{
+ int i;
+
+ if (prot->version == TLS_1_3_VERSION ||
+ prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305) {
+ for (i = 0; i < 8; i++)
+ iv[i + 4] ^= seq[i];
+ }
+}
+
+static inline void
+tls_fill_prepend(struct tls_context *ctx, char *buf, size_t plaintext_len,
+ unsigned char record_type)
+{
+ struct tls_prot_info *prot = &ctx->prot_info;
+ size_t pkt_len, iv_size = prot->iv_size;
+
+ pkt_len = plaintext_len + prot->tag_size;
+ if (prot->version != TLS_1_3_VERSION &&
+ prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305) {
+ pkt_len += iv_size;
+
+ memcpy(buf + TLS_NONCE_OFFSET,
+ ctx->tx.iv + prot->salt_size, iv_size);
+ }
+
+ /* we cover nonce explicit here as well, so buf should be of
+ * size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE
+ */
+ buf[0] = prot->version == TLS_1_3_VERSION ?
+ TLS_RECORD_TYPE_DATA : record_type;
+ /* Note that VERSION must be TLS_1_2 for both TLS1.2 and TLS1.3 */
+ buf[1] = TLS_1_2_VERSION_MINOR;
+ buf[2] = TLS_1_2_VERSION_MAJOR;
+ /* we can use IV for nonce explicit according to spec */
+ buf[3] = pkt_len >> 8;
+ buf[4] = pkt_len & 0xFF;
+}
+
+static inline
+void tls_make_aad(char *buf, size_t size, char *record_sequence,
+ unsigned char record_type, struct tls_prot_info *prot)
+{
+ if (prot->version != TLS_1_3_VERSION) {
+ memcpy(buf, record_sequence, prot->rec_seq_size);
+ buf += 8;
+ } else {
+ size += prot->tag_size;
+ }
+
+ buf[0] = prot->version == TLS_1_3_VERSION ?
+ TLS_RECORD_TYPE_DATA : record_type;
+ buf[1] = TLS_1_2_VERSION_MAJOR;
+ buf[2] = TLS_1_2_VERSION_MINOR;
+ buf[3] = size >> 8;
+ buf[4] = size & 0xFF;
+}
+
+#endif
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index 879b9024678e..b1fcd61836d1 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -38,6 +38,7 @@
#include <net/tcp.h>
#include <net/tls.h>
+#include "tls.h"
#include "trace.h"
/* device_offload_lock is used to synchronize tls_dev_add
@@ -564,7 +565,7 @@ int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
lock_sock(sk);
if (unlikely(msg->msg_controllen)) {
- rc = tls_proccess_cmsg(sk, msg, &record_type);
+ rc = tls_process_cmsg(sk, msg, &record_type);
if (rc)
goto out;
}
@@ -890,14 +891,19 @@ static void tls_device_core_ctrl_rx_resync(struct tls_context *tls_ctx,
}
}
-static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
+static int
+tls_device_reencrypt(struct sock *sk, struct tls_sw_context_rx *sw_ctx)
{
- struct strp_msg *rxm = strp_msg(skb);
- int err = 0, offset = rxm->offset, copy, nsg, data_len, pos;
- struct sk_buff *skb_iter, *unused;
+ int err = 0, offset, copy, nsg, data_len, pos;
+ struct sk_buff *skb, *skb_iter, *unused;
struct scatterlist sg[1];
+ struct strp_msg *rxm;
char *orig_buf, *buf;
+ skb = tls_strp_msg(sw_ctx);
+ rxm = strp_msg(skb);
+ offset = rxm->offset;
+
orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE +
TLS_CIPHER_AES_GCM_128_IV_SIZE, sk->sk_allocation);
if (!orig_buf)
@@ -920,7 +926,7 @@ static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
goto free_buf;
/* We are interested only in the decrypted data not the auth */
- err = decrypt_skb(sk, skb, sg);
+ err = decrypt_skb(sk, sg);
if (err != -EBADMSG)
goto free_buf;
else
@@ -975,10 +981,12 @@ free_buf:
return err;
}
-int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
- struct sk_buff *skb, struct strp_msg *rxm)
+int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx)
{
struct tls_offload_context_rx *ctx = tls_offload_ctx_rx(tls_ctx);
+ struct tls_sw_context_rx *sw_ctx = tls_sw_ctx_rx(tls_ctx);
+ struct sk_buff *skb = tls_strp_msg(sw_ctx);
+ struct strp_msg *rxm = strp_msg(skb);
int is_decrypted = skb->decrypted;
int is_encrypted = !is_decrypted;
struct sk_buff *skb_iter;
@@ -1001,7 +1009,7 @@ int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
* likely have initial fragments decrypted, and final ones not
* decrypted. We need to reencrypt that single SKB.
*/
- return tls_device_reencrypt(sk, skb);
+ return tls_device_reencrypt(sk, sw_ctx);
}
/* Return immediately if the record is either entirely plaintext or
@@ -1018,7 +1026,7 @@ int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
}
ctx->resync_nh_reset = 1;
- return tls_device_reencrypt(sk, skb);
+ return tls_device_reencrypt(sk, sw_ctx);
}
static void tls_device_attach(struct tls_context *ctx, struct sock *sk,
diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c
index e40bedd112b6..618cee704217 100644
--- a/net/tls/tls_device_fallback.c
+++ b/net/tls/tls_device_fallback.c
@@ -34,6 +34,8 @@
#include <crypto/scatterwalk.h>
#include <net/ip6_checksum.h>
+#include "tls.h"
+
static void chain_to_walk(struct scatterlist *sg, struct scatter_walk *walk)
{
struct scatterlist *src = walk->sg;
@@ -232,7 +234,7 @@ static int fill_sg_in(struct scatterlist *sg_in,
s32 *sync_size,
int *resync_sgs)
{
- int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ int tcp_payload_offset = skb_tcp_all_headers(skb);
int payload_len = skb->len - tcp_payload_offset;
u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
struct tls_record_info *record;
@@ -310,8 +312,8 @@ static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
struct sk_buff *skb,
s32 sync_size, u64 rcd_sn)
{
- int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
+ int tcp_payload_offset = skb_tcp_all_headers(skb);
int payload_len = skb->len - tcp_payload_offset;
void *buf, *iv, *aad, *dummy_buf;
struct aead_request *aead_req;
@@ -372,7 +374,7 @@ free_nskb:
static struct sk_buff *tls_sw_fallback(struct sock *sk, struct sk_buff *skb)
{
- int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ int tcp_payload_offset = skb_tcp_all_headers(skb);
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
int payload_len = skb->len - tcp_payload_offset;
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index d80ab3d1764e..9703636cfc60 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -45,6 +45,8 @@
#include <net/tls.h>
#include <net/tls_toe.h>
+#include "tls.h"
+
MODULE_AUTHOR("Mellanox Technologies");
MODULE_DESCRIPTION("Transport Layer Security Support");
MODULE_LICENSE("Dual BSD/GPL");
@@ -164,8 +166,8 @@ static int tls_handle_open_record(struct sock *sk, int flags)
return 0;
}
-int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
- unsigned char *record_type)
+int tls_process_cmsg(struct sock *sk, struct msghdr *msg,
+ unsigned char *record_type)
{
struct cmsghdr *cmsg;
int rc = -EINVAL;
@@ -533,6 +535,36 @@ static int do_tls_getsockopt_tx_zc(struct sock *sk, char __user *optval,
return 0;
}
+static int do_tls_getsockopt_no_pad(struct sock *sk, char __user *optval,
+ int __user *optlen)
+{
+ struct tls_context *ctx = tls_get_ctx(sk);
+ int value, len;
+
+ if (ctx->prot_info.version != TLS_1_3_VERSION)
+ return -EINVAL;
+
+ if (get_user(len, optlen))
+ return -EFAULT;
+ if (len < sizeof(value))
+ return -EINVAL;
+
+ lock_sock(sk);
+ value = -EINVAL;
+ if (ctx->rx_conf == TLS_SW || ctx->rx_conf == TLS_HW)
+ value = ctx->rx_no_pad;
+ release_sock(sk);
+ if (value < 0)
+ return value;
+
+ if (put_user(sizeof(value), optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, &value, sizeof(value)))
+ return -EFAULT;
+
+ return 0;
+}
+
static int do_tls_getsockopt(struct sock *sk, int optname,
char __user *optval, int __user *optlen)
{
@@ -547,6 +579,9 @@ static int do_tls_getsockopt(struct sock *sk, int optname,
case TLS_TX_ZEROCOPY_RO:
rc = do_tls_getsockopt_tx_zc(sk, optval, optlen);
break;
+ case TLS_RX_EXPECT_NO_PAD:
+ rc = do_tls_getsockopt_no_pad(sk, optval, optlen);
+ break;
default:
rc = -ENOPROTOOPT;
break;
@@ -718,6 +753,38 @@ static int do_tls_setsockopt_tx_zc(struct sock *sk, sockptr_t optval,
return 0;
}
+static int do_tls_setsockopt_no_pad(struct sock *sk, sockptr_t optval,
+ unsigned int optlen)
+{
+ struct tls_context *ctx = tls_get_ctx(sk);
+ u32 val;
+ int rc;
+
+ if (ctx->prot_info.version != TLS_1_3_VERSION ||
+ sockptr_is_null(optval) || optlen < sizeof(val))
+ return -EINVAL;
+
+ rc = copy_from_sockptr(&val, optval, sizeof(val));
+ if (rc)
+ return -EFAULT;
+ if (val > 1)
+ return -EINVAL;
+ rc = check_zeroed_sockptr(optval, sizeof(val), optlen - sizeof(val));
+ if (rc < 1)
+ return rc == 0 ? -EINVAL : rc;
+
+ lock_sock(sk);
+ rc = -EINVAL;
+ if (ctx->rx_conf == TLS_SW || ctx->rx_conf == TLS_HW) {
+ ctx->rx_no_pad = val;
+ tls_update_rx_zc_capable(ctx);
+ rc = 0;
+ }
+ release_sock(sk);
+
+ return rc;
+}
+
static int do_tls_setsockopt(struct sock *sk, int optname, sockptr_t optval,
unsigned int optlen)
{
@@ -736,6 +803,9 @@ static int do_tls_setsockopt(struct sock *sk, int optname, sockptr_t optval,
rc = do_tls_setsockopt_tx_zc(sk, optval, optlen);
release_sock(sk);
break;
+ case TLS_RX_EXPECT_NO_PAD:
+ rc = do_tls_setsockopt_no_pad(sk, optval, optlen);
+ break;
default:
rc = -ENOPROTOOPT;
break;
@@ -934,6 +1004,23 @@ static void tls_update(struct sock *sk, struct proto *p,
}
}
+static u16 tls_user_config(struct tls_context *ctx, bool tx)
+{
+ u16 config = tx ? ctx->tx_conf : ctx->rx_conf;
+
+ switch (config) {
+ case TLS_BASE:
+ return TLS_CONF_BASE;
+ case TLS_SW:
+ return TLS_CONF_SW;
+ case TLS_HW:
+ return TLS_CONF_HW;
+ case TLS_HW_RECORD:
+ return TLS_CONF_HW_RECORD;
+ }
+ return 0;
+}
+
static int tls_get_info(const struct sock *sk, struct sk_buff *skb)
{
u16 version, cipher_type;
@@ -976,6 +1063,11 @@ static int tls_get_info(const struct sock *sk, struct sk_buff *skb)
if (err)
goto nla_failure;
}
+ if (ctx->rx_no_pad) {
+ err = nla_put_flag(skb, TLS_INFO_RX_NO_PAD);
+ if (err)
+ goto nla_failure;
+ }
rcu_read_unlock();
nla_nest_end(skb, start);
@@ -997,6 +1089,7 @@ static size_t tls_get_info_size(const struct sock *sk)
nla_total_size(sizeof(u16)) + /* TLS_INFO_RXCONF */
nla_total_size(sizeof(u16)) + /* TLS_INFO_TXCONF */
nla_total_size(0) + /* TLS_INFO_ZC_RO_TX */
+ nla_total_size(0) + /* TLS_INFO_RX_NO_PAD */
0;
return size;
diff --git a/net/tls/tls_proc.c b/net/tls/tls_proc.c
index feeceb0e4cb4..68982728f620 100644
--- a/net/tls/tls_proc.c
+++ b/net/tls/tls_proc.c
@@ -6,6 +6,8 @@
#include <net/snmp.h>
#include <net/tls.h>
+#include "tls.h"
+
#ifdef CONFIG_PROC_FS
static const struct snmp_mib tls_mib_list[] = {
SNMP_MIB_ITEM("TlsCurrTxSw", LINUX_MIB_TLSCURRTXSW),
@@ -18,6 +20,8 @@ static const struct snmp_mib tls_mib_list[] = {
SNMP_MIB_ITEM("TlsRxDevice", LINUX_MIB_TLSRXDEVICE),
SNMP_MIB_ITEM("TlsDecryptError", LINUX_MIB_TLSDECRYPTERROR),
SNMP_MIB_ITEM("TlsRxDeviceResync", LINUX_MIB_TLSRXDEVICERESYNC),
+ SNMP_MIB_ITEM("TlsDecryptRetry", LINUX_MIB_TLSDECRYPTRETRY),
+ SNMP_MIB_ITEM("TlsRxNoPadViolation", LINUX_MIB_TLSRXNOPADVIOL),
SNMP_MIB_SENTINEL
};
diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c
new file mode 100644
index 000000000000..9ccab79a6e1e
--- /dev/null
+++ b/net/tls/tls_strp.c
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/skbuff.h>
+
+#include "tls.h"
+
+int tls_strp_msg_hold(struct sock *sk, struct sk_buff *skb,
+ struct sk_buff_head *dst)
+{
+ struct sk_buff *clone;
+
+ clone = skb_clone(skb, sk->sk_allocation);
+ if (!clone)
+ return -ENOMEM;
+ __skb_queue_tail(dst, clone);
+ return 0;
+}
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index e30649f6dde5..859ea02022c0 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -44,9 +44,23 @@
#include <net/strparser.h>
#include <net/tls.h>
+#include "tls.h"
+
struct tls_decrypt_arg {
+ struct_group(inargs,
bool zc;
bool async;
+ u8 tail;
+ );
+
+ struct sk_buff *skb;
+};
+
+struct tls_decrypt_ctx {
+ u8 iv[MAX_IV_SIZE];
+ u8 aad[TLS_MAX_AAD_SIZE];
+ u8 tail;
+ struct scatterlist sg[];
};
noinline void tls_err_abort(struct sock *sk, int err)
@@ -133,7 +147,8 @@ static int skb_nsg(struct sk_buff *skb, int offset, int len)
return __skb_nsg(skb, offset, len, 0);
}
-static int padding_length(struct tls_prot_info *prot, struct sk_buff *skb)
+static int tls_padding_length(struct tls_prot_info *prot, struct sk_buff *skb,
+ struct tls_decrypt_arg *darg)
{
struct strp_msg *rxm = strp_msg(skb);
struct tls_msg *tlm = tls_msg(skb);
@@ -142,7 +157,7 @@ static int padding_length(struct tls_prot_info *prot, struct sk_buff *skb)
/* Determine zero-padding length */
if (prot->version == TLS_1_3_VERSION) {
int offset = rxm->full_len - TLS_TAG_SIZE - 1;
- char content_type = 0;
+ char content_type = darg->zc ? darg->tail : 0;
int err;
while (content_type == 0) {
@@ -169,39 +184,22 @@ static void tls_decrypt_done(struct crypto_async_request *req, int err)
struct scatterlist *sgin = aead_req->src;
struct tls_sw_context_rx *ctx;
struct tls_context *tls_ctx;
- struct tls_prot_info *prot;
struct scatterlist *sg;
- struct sk_buff *skb;
unsigned int pages;
+ struct sock *sk;
- skb = (struct sk_buff *)req->data;
- tls_ctx = tls_get_ctx(skb->sk);
+ sk = (struct sock *)req->data;
+ tls_ctx = tls_get_ctx(sk);
ctx = tls_sw_ctx_rx(tls_ctx);
- prot = &tls_ctx->prot_info;
/* Propagate if there was an err */
if (err) {
if (err == -EBADMSG)
- TLS_INC_STATS(sock_net(skb->sk),
- LINUX_MIB_TLSDECRYPTERROR);
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
ctx->async_wait.err = err;
- tls_err_abort(skb->sk, err);
- } else {
- struct strp_msg *rxm = strp_msg(skb);
-
- /* No TLS 1.3 support with async crypto */
- WARN_ON(prot->tail_size);
-
- rxm->offset += prot->prepend_size;
- rxm->full_len -= prot->overhead_size;
+ tls_err_abort(sk, err);
}
- /* After using skb->sk to propagate sk through crypto async callback
- * we need to NULL it again.
- */
- skb->sk = NULL;
-
-
/* Free the destination pages if skb was not decrypted inplace */
if (sgout != sgin) {
/* Skip the first S/G entry as it points to AAD */
@@ -221,7 +219,6 @@ static void tls_decrypt_done(struct crypto_async_request *req, int err)
}
static int tls_do_decryption(struct sock *sk,
- struct sk_buff *skb,
struct scatterlist *sgin,
struct scatterlist *sgout,
char *iv_recv,
@@ -241,16 +238,9 @@ static int tls_do_decryption(struct sock *sk,
(u8 *)iv_recv);
if (darg->async) {
- /* Using skb->sk to push sk through to crypto async callback
- * handler. This allows propagating errors up to the socket
- * if needed. It _must_ be cleared in the async handler
- * before consume_skb is called. We _know_ skb->sk is NULL
- * because it is a clone from strparser.
- */
- skb->sk = sk;
aead_request_set_callback(aead_req,
CRYPTO_TFM_REQ_MAY_BACKLOG,
- tls_decrypt_done, skb);
+ tls_decrypt_done, sk);
atomic_inc(&ctx->decrypt_pending);
} else {
aead_request_set_callback(aead_req,
@@ -515,7 +505,8 @@ static int tls_do_encryption(struct sock *sk,
memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
prot->iv_size + prot->salt_size);
- xor_iv_with_seq(prot, rec->iv_data + iv_offset, tls_ctx->tx.rec_seq);
+ tls_xor_iv_with_seq(prot, rec->iv_data + iv_offset,
+ tls_ctx->tx.rec_seq);
sge->offset += prot->prepend_size;
sge->length -= prot->prepend_size;
@@ -952,7 +943,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
lock_sock(sk);
if (unlikely(msg->msg_controllen)) {
- ret = tls_proccess_cmsg(sk, msg, &record_type);
+ ret = tls_process_cmsg(sk, msg, &record_type);
if (ret) {
if (ret == -EINPROGRESS)
num_async++;
@@ -1290,54 +1281,50 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
return ret;
}
-static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock,
- bool nonblock, long timeo, int *err)
+static int
+tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
+ long timeo)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
- struct sk_buff *skb;
DEFINE_WAIT_FUNC(wait, woken_wake_function);
- while (!(skb = ctx->recv_pkt) && sk_psock_queue_empty(psock)) {
- if (sk->sk_err) {
- *err = sock_error(sk);
- return NULL;
- }
+ while (!ctx->recv_pkt) {
+ if (!sk_psock_queue_empty(psock))
+ return 0;
+
+ if (sk->sk_err)
+ return sock_error(sk);
if (!skb_queue_empty(&sk->sk_receive_queue)) {
__strp_unpause(&ctx->strp);
if (ctx->recv_pkt)
- return ctx->recv_pkt;
+ break;
}
if (sk->sk_shutdown & RCV_SHUTDOWN)
- return NULL;
+ return 0;
if (sock_flag(sk, SOCK_DONE))
- return NULL;
+ return 0;
- if (nonblock || !timeo) {
- *err = -EAGAIN;
- return NULL;
- }
+ if (nonblock || !timeo)
+ return -EAGAIN;
add_wait_queue(sk_sleep(sk), &wait);
sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
sk_wait_event(sk, &timeo,
- ctx->recv_pkt != skb ||
- !sk_psock_queue_empty(psock),
+ ctx->recv_pkt || !sk_psock_queue_empty(psock),
&wait);
sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
remove_wait_queue(sk_sleep(sk), &wait);
/* Handle signals */
- if (signal_pending(current)) {
- *err = sock_intr_errno(timeo);
- return NULL;
- }
+ if (signal_pending(current))
+ return sock_intr_errno(timeo);
}
- return skb;
+ return 1;
}
static int tls_setup_from_iter(struct iov_iter *from,
@@ -1396,84 +1383,120 @@ out:
return rc;
}
+static struct sk_buff *
+tls_alloc_clrtxt_skb(struct sock *sk, struct sk_buff *skb,
+ unsigned int full_len)
+{
+ struct strp_msg *clr_rxm;
+ struct sk_buff *clr_skb;
+ int err;
+
+ clr_skb = alloc_skb_with_frags(0, full_len, TLS_PAGE_ORDER,
+ &err, sk->sk_allocation);
+ if (!clr_skb)
+ return NULL;
+
+ skb_copy_header(clr_skb, skb);
+ clr_skb->len = full_len;
+ clr_skb->data_len = full_len;
+
+ clr_rxm = strp_msg(clr_skb);
+ clr_rxm->offset = 0;
+
+ return clr_skb;
+}
+
+/* Decrypt handlers
+ *
+ * tls_decrypt_sg() and tls_decrypt_device() are decrypt handlers.
+ * They must transform the darg in/out argument are as follows:
+ * | Input | Output
+ * -------------------------------------------------------------------
+ * zc | Zero-copy decrypt allowed | Zero-copy performed
+ * async | Async decrypt allowed | Async crypto used / in progress
+ * skb | * | Output skb
+ */
+
/* This function decrypts the input skb into either out_iov or in out_sg
- * or in skb buffers itself. The input parameter 'zc' indicates if
+ * or in skb buffers itself. The input parameter 'darg->zc' indicates if
* zero-copy mode needs to be tried or not. With zero-copy mode, either
* out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
* NULL, then the decryption happens inside skb buffers itself, i.e.
- * zero-copy gets disabled and 'zc' is updated.
+ * zero-copy gets disabled and 'darg->zc' is updated.
*/
-
-static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
- struct iov_iter *out_iov,
- struct scatterlist *out_sg,
- struct tls_decrypt_arg *darg)
+static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
+ struct scatterlist *out_sg,
+ struct tls_decrypt_arg *darg)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
struct tls_prot_info *prot = &tls_ctx->prot_info;
- struct strp_msg *rxm = strp_msg(skb);
- struct tls_msg *tlm = tls_msg(skb);
- int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0;
+ int n_sgin, n_sgout, aead_size, err, pages = 0;
+ struct sk_buff *skb = tls_strp_msg(ctx);
+ const struct strp_msg *rxm = strp_msg(skb);
+ const struct tls_msg *tlm = tls_msg(skb);
struct aead_request *aead_req;
- struct sk_buff *unused;
- u8 *aad, *iv, *mem = NULL;
struct scatterlist *sgin = NULL;
struct scatterlist *sgout = NULL;
- const int data_len = rxm->full_len - prot->overhead_size +
- prot->tail_size;
+ const int data_len = rxm->full_len - prot->overhead_size;
+ int tail_pages = !!prot->tail_size;
+ struct tls_decrypt_ctx *dctx;
+ struct sk_buff *clear_skb;
int iv_offset = 0;
+ u8 *mem;
+
+ n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
+ rxm->full_len - prot->prepend_size);
+ if (n_sgin < 1)
+ return n_sgin ?: -EBADMSG;
if (darg->zc && (out_iov || out_sg)) {
+ clear_skb = NULL;
+
if (out_iov)
- n_sgout = 1 +
+ n_sgout = 1 + tail_pages +
iov_iter_npages_cap(out_iov, INT_MAX, data_len);
else
n_sgout = sg_nents(out_sg);
- n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
- rxm->full_len - prot->prepend_size);
} else {
- n_sgout = 0;
darg->zc = false;
- n_sgin = skb_cow_data(skb, 0, &unused);
- }
- if (n_sgin < 1)
- return -EBADMSG;
+ clear_skb = tls_alloc_clrtxt_skb(sk, skb, rxm->full_len);
+ if (!clear_skb)
+ return -ENOMEM;
+
+ n_sgout = 1 + skb_shinfo(clear_skb)->nr_frags;
+ }
/* Increment to accommodate AAD */
n_sgin = n_sgin + 1;
- nsg = n_sgin + n_sgout;
-
- aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
- mem_size = aead_size + (nsg * sizeof(struct scatterlist));
- mem_size = mem_size + prot->aad_size;
- mem_size = mem_size + MAX_IV_SIZE;
-
/* Allocate a single block of memory which contains
- * aead_req || sgin[] || sgout[] || aad || iv.
- * This order achieves correct alignment for aead_req, sgin, sgout.
+ * aead_req || tls_decrypt_ctx.
+ * Both structs are variable length.
*/
- mem = kmalloc(mem_size, sk->sk_allocation);
- if (!mem)
- return -ENOMEM;
+ aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
+ mem = kmalloc(aead_size + struct_size(dctx, sg, n_sgin + n_sgout),
+ sk->sk_allocation);
+ if (!mem) {
+ err = -ENOMEM;
+ goto exit_free_skb;
+ }
/* Segment the allocated memory */
aead_req = (struct aead_request *)mem;
- sgin = (struct scatterlist *)(mem + aead_size);
- sgout = sgin + n_sgin;
- aad = (u8 *)(sgout + n_sgout);
- iv = aad + prot->aad_size;
+ dctx = (struct tls_decrypt_ctx *)(mem + aead_size);
+ sgin = &dctx->sg[0];
+ sgout = &dctx->sg[n_sgin];
/* For CCM based ciphers, first byte of nonce+iv is a constant */
switch (prot->cipher_type) {
case TLS_CIPHER_AES_CCM_128:
- iv[0] = TLS_AES_CCM_IV_B0_BYTE;
+ dctx->iv[0] = TLS_AES_CCM_IV_B0_BYTE;
iv_offset = 1;
break;
case TLS_CIPHER_SM4_CCM:
- iv[0] = TLS_SM4_CCM_IV_B0_BYTE;
+ dctx->iv[0] = TLS_SM4_CCM_IV_B0_BYTE;
iv_offset = 1;
break;
}
@@ -1481,130 +1504,167 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
/* Prepare IV */
if (prot->version == TLS_1_3_VERSION ||
prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305) {
- memcpy(iv + iv_offset, tls_ctx->rx.iv,
+ memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv,
prot->iv_size + prot->salt_size);
} else {
err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
- iv + iv_offset + prot->salt_size,
+ &dctx->iv[iv_offset] + prot->salt_size,
prot->iv_size);
- if (err < 0) {
- kfree(mem);
- return err;
- }
- memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size);
+ if (err < 0)
+ goto exit_free;
+ memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv, prot->salt_size);
}
- xor_iv_with_seq(prot, iv + iv_offset, tls_ctx->rx.rec_seq);
+ tls_xor_iv_with_seq(prot, &dctx->iv[iv_offset], tls_ctx->rx.rec_seq);
/* Prepare AAD */
- tls_make_aad(aad, rxm->full_len - prot->overhead_size +
+ tls_make_aad(dctx->aad, rxm->full_len - prot->overhead_size +
prot->tail_size,
tls_ctx->rx.rec_seq, tlm->control, prot);
/* Prepare sgin */
sg_init_table(sgin, n_sgin);
- sg_set_buf(&sgin[0], aad, prot->aad_size);
+ sg_set_buf(&sgin[0], dctx->aad, prot->aad_size);
err = skb_to_sgvec(skb, &sgin[1],
rxm->offset + prot->prepend_size,
rxm->full_len - prot->prepend_size);
- if (err < 0) {
- kfree(mem);
- return err;
- }
+ if (err < 0)
+ goto exit_free;
- if (n_sgout) {
- if (out_iov) {
- sg_init_table(sgout, n_sgout);
- sg_set_buf(&sgout[0], aad, prot->aad_size);
+ if (clear_skb) {
+ sg_init_table(sgout, n_sgout);
+ sg_set_buf(&sgout[0], dctx->aad, prot->aad_size);
- err = tls_setup_from_iter(out_iov, data_len,
- &pages, &sgout[1],
- (n_sgout - 1));
- if (err < 0)
- goto fallback_to_reg_recv;
- } else if (out_sg) {
- memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
- } else {
- goto fallback_to_reg_recv;
+ err = skb_to_sgvec(clear_skb, &sgout[1], prot->prepend_size,
+ data_len + prot->tail_size);
+ if (err < 0)
+ goto exit_free;
+ } else if (out_iov) {
+ sg_init_table(sgout, n_sgout);
+ sg_set_buf(&sgout[0], dctx->aad, prot->aad_size);
+
+ err = tls_setup_from_iter(out_iov, data_len, &pages, &sgout[1],
+ (n_sgout - 1 - tail_pages));
+ if (err < 0)
+ goto exit_free_pages;
+
+ if (prot->tail_size) {
+ sg_unmark_end(&sgout[pages]);
+ sg_set_buf(&sgout[pages + 1], &dctx->tail,
+ prot->tail_size);
+ sg_mark_end(&sgout[pages + 1]);
}
- } else {
-fallback_to_reg_recv:
- sgout = sgin;
- pages = 0;
- darg->zc = false;
+ } else if (out_sg) {
+ memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
}
/* Prepare and submit AEAD request */
- err = tls_do_decryption(sk, skb, sgin, sgout, iv,
- data_len, aead_req, darg);
- if (darg->async)
- return 0;
+ err = tls_do_decryption(sk, sgin, sgout, dctx->iv,
+ data_len + prot->tail_size, aead_req, darg);
+ if (err)
+ goto exit_free_pages;
+
+ darg->skb = clear_skb ?: tls_strp_msg(ctx);
+ clear_skb = NULL;
+ if (unlikely(darg->async)) {
+ err = tls_strp_msg_hold(sk, skb, &ctx->async_hold);
+ if (err)
+ __skb_queue_tail(&ctx->async_hold, darg->skb);
+ return err;
+ }
+
+ if (prot->tail_size)
+ darg->tail = dctx->tail;
+
+exit_free_pages:
/* Release the pages in case iov was mapped to pages */
for (; pages > 0; pages--)
put_page(sg_page(&sgout[pages]));
-
+exit_free:
kfree(mem);
+exit_free_skb:
+ consume_skb(clear_skb);
return err;
}
-static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
- struct iov_iter *dest,
- struct tls_decrypt_arg *darg)
+static int
+tls_decrypt_device(struct sock *sk, struct tls_context *tls_ctx,
+ struct tls_decrypt_arg *darg)
+{
+ struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
+ int err;
+
+ if (tls_ctx->rx_conf != TLS_HW)
+ return 0;
+
+ err = tls_device_decrypted(sk, tls_ctx);
+ if (err <= 0)
+ return err;
+
+ darg->zc = false;
+ darg->async = false;
+ darg->skb = tls_strp_msg(ctx);
+ ctx->recv_pkt = NULL;
+ return 1;
+}
+
+static int tls_rx_one_record(struct sock *sk, struct iov_iter *dest,
+ struct tls_decrypt_arg *darg)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
struct tls_prot_info *prot = &tls_ctx->prot_info;
- struct strp_msg *rxm = strp_msg(skb);
- struct tls_msg *tlm = tls_msg(skb);
+ struct strp_msg *rxm;
int pad, err;
- if (tlm->decrypted) {
- darg->zc = false;
- darg->async = false;
- return 0;
- }
-
- if (tls_ctx->rx_conf == TLS_HW) {
- err = tls_device_decrypted(sk, tls_ctx, skb, rxm);
- if (err < 0)
- return err;
- if (err > 0) {
- tlm->decrypted = 1;
- darg->zc = false;
- darg->async = false;
- goto decrypt_done;
- }
- }
+ err = tls_decrypt_device(sk, tls_ctx, darg);
+ if (err < 0)
+ return err;
+ if (err)
+ goto decrypt_done;
- err = decrypt_internal(sk, skb, dest, NULL, darg);
+ err = tls_decrypt_sg(sk, dest, NULL, darg);
if (err < 0) {
if (err == -EBADMSG)
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
return err;
}
if (darg->async)
- goto decrypt_next;
+ goto decrypt_done;
+ /* If opportunistic TLS 1.3 ZC failed retry without ZC */
+ if (unlikely(darg->zc && prot->version == TLS_1_3_VERSION &&
+ darg->tail != TLS_RECORD_TYPE_DATA)) {
+ darg->zc = false;
+ if (!darg->tail)
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXNOPADVIOL);
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTRETRY);
+ return tls_rx_one_record(sk, dest, darg);
+ }
decrypt_done:
- pad = padding_length(prot, skb);
- if (pad < 0)
+ if (darg->skb == ctx->recv_pkt)
+ ctx->recv_pkt = NULL;
+
+ pad = tls_padding_length(prot, darg->skb, darg);
+ if (pad < 0) {
+ consume_skb(darg->skb);
return pad;
+ }
+ rxm = strp_msg(darg->skb);
rxm->full_len -= pad;
rxm->offset += prot->prepend_size;
rxm->full_len -= prot->overhead_size;
- tlm->decrypted = 1;
-decrypt_next:
tls_advance_record_sn(sk, prot, &tls_ctx->rx);
return 0;
}
-int decrypt_skb(struct sock *sk, struct sk_buff *skb,
- struct scatterlist *sgout)
+int decrypt_skb(struct sock *sk, struct scatterlist *sgout)
{
struct tls_decrypt_arg darg = { .zc = true, };
- return decrypt_internal(sk, skb, NULL, sgout, &darg);
+ return tls_decrypt_sg(sk, NULL, sgout, &darg);
}
static int tls_record_content_type(struct msghdr *msg, struct tls_msg *tlm,
@@ -1630,6 +1690,13 @@ static int tls_record_content_type(struct msghdr *msg, struct tls_msg *tlm,
return 1;
}
+static void tls_rx_rec_done(struct tls_sw_context_rx *ctx)
+{
+ consume_skb(ctx->recv_pkt);
+ ctx->recv_pkt = NULL;
+ __strp_unpause(&ctx->strp);
+}
+
/* This function traverses the rx_list in tls receive context to copies the
* decrypted records into the buffer provided by caller zero copy is not
* true. Further, the records are removed from the rx_list if it is not a peek
@@ -1640,7 +1707,6 @@ static int process_rx_list(struct tls_sw_context_rx *ctx,
u8 *control,
size_t skip,
size_t len,
- bool zc,
bool is_peek)
{
struct sk_buff *skb = skb_peek(&ctx->rx_list);
@@ -1674,12 +1740,10 @@ static int process_rx_list(struct tls_sw_context_rx *ctx,
if (err <= 0)
goto out;
- if (!zc || (rxm->full_len - skip) > len) {
- err = skb_copy_datagram_msg(skb, rxm->offset + skip,
- msg, chunk);
- if (err < 0)
- goto out;
- }
+ err = skb_copy_datagram_msg(skb, rxm->offset + skip,
+ msg, chunk);
+ if (err < 0)
+ goto out;
len = len - chunk;
copied = copied + chunk;
@@ -1717,6 +1781,69 @@ out:
return copied ? : err;
}
+static void
+tls_read_flush_backlog(struct sock *sk, struct tls_prot_info *prot,
+ size_t len_left, size_t decrypted, ssize_t done,
+ size_t *flushed_at)
+{
+ size_t max_rec;
+
+ if (len_left <= decrypted)
+ return;
+
+ max_rec = prot->overhead_size - prot->tail_size + TLS_MAX_PAYLOAD_SIZE;
+ if (done - *flushed_at < SZ_128K && tcp_inq(sk) > max_rec)
+ return;
+
+ *flushed_at = done;
+ sk_flush_backlog(sk);
+}
+
+static long tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx,
+ bool nonblock)
+{
+ long timeo;
+
+ lock_sock(sk);
+
+ timeo = sock_rcvtimeo(sk, nonblock);
+
+ while (unlikely(ctx->reader_present)) {
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+
+ ctx->reader_contended = 1;
+
+ add_wait_queue(&ctx->wq, &wait);
+ sk_wait_event(sk, &timeo,
+ !READ_ONCE(ctx->reader_present), &wait);
+ remove_wait_queue(&ctx->wq, &wait);
+
+ if (!timeo)
+ return -EAGAIN;
+ if (signal_pending(current))
+ return sock_intr_errno(timeo);
+ }
+
+ WRITE_ONCE(ctx->reader_present, 1);
+
+ return timeo;
+}
+
+static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx)
+{
+ if (unlikely(ctx->reader_contended)) {
+ if (wq_has_sleeper(&ctx->wq))
+ wake_up(&ctx->wq);
+ else
+ ctx->reader_contended = 0;
+
+ WARN_ON_ONCE(!ctx->reader_present);
+ }
+
+ WRITE_ONCE(ctx->reader_present, 0);
+ release_sock(sk);
+}
+
int tls_sw_recvmsg(struct sock *sk,
struct msghdr *msg,
size_t len,
@@ -1726,9 +1853,10 @@ int tls_sw_recvmsg(struct sock *sk,
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
struct tls_prot_info *prot = &tls_ctx->prot_info;
+ ssize_t decrypted = 0, async_copy_bytes = 0;
struct sk_psock *psock;
unsigned char control = 0;
- ssize_t decrypted = 0;
+ size_t flushed_at = 0;
struct strp_msg *rxm;
struct tls_msg *tlm;
struct sk_buff *skb;
@@ -1745,7 +1873,9 @@ int tls_sw_recvmsg(struct sock *sk,
return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
psock = sk_psock_get(sk);
- lock_sock(sk);
+ timeo = tls_rx_reader_lock(sk, ctx, flags & MSG_DONTWAIT);
+ if (timeo < 0)
+ return timeo;
bpf_strp_enabled = sk_psock_strp_enabled(psock);
/* If crypto failed the connection is broken */
@@ -1754,7 +1884,7 @@ int tls_sw_recvmsg(struct sock *sk,
goto end;
/* Process pending decrypted records. It must be non-zero-copy */
- err = process_rx_list(ctx, msg, &control, 0, len, false, is_peek);
+ err = process_rx_list(ctx, msg, &control, 0, len, is_peek);
if (err < 0)
goto end;
@@ -1764,28 +1894,32 @@ int tls_sw_recvmsg(struct sock *sk,
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
len = len - copied;
- timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
zc_capable = !bpf_strp_enabled && !is_kvec && !is_peek &&
- prot->version != TLS_1_3_VERSION;
+ ctx->zc_capable;
decrypted = 0;
while (len && (decrypted + copied < target || ctx->recv_pkt)) {
- struct tls_decrypt_arg darg = {};
+ struct tls_decrypt_arg darg;
int to_decrypt, chunk;
- skb = tls_wait_data(sk, psock, flags & MSG_DONTWAIT, timeo, &err);
- if (!skb) {
+ err = tls_rx_rec_wait(sk, psock, flags & MSG_DONTWAIT, timeo);
+ if (err <= 0) {
if (psock) {
chunk = sk_msg_recvmsg(sk, psock, msg, len,
flags);
- if (chunk > 0)
- goto leave_on_list;
+ if (chunk > 0) {
+ decrypted += chunk;
+ len -= chunk;
+ continue;
+ }
}
goto recv_end;
}
- rxm = strp_msg(skb);
- tlm = tls_msg(skb);
+ memset(&darg.inargs, 0, sizeof(darg.inargs));
+
+ rxm = strp_msg(ctx->recv_pkt);
+ tlm = tls_msg(ctx->recv_pkt);
to_decrypt = rxm->full_len - prot->overhead_size;
@@ -1799,12 +1933,16 @@ int tls_sw_recvmsg(struct sock *sk,
else
darg.async = false;
- err = decrypt_skb_update(sk, skb, &msg->msg_iter, &darg);
+ err = tls_rx_one_record(sk, &msg->msg_iter, &darg);
if (err < 0) {
tls_err_abort(sk, -EBADMSG);
goto recv_end;
}
+ skb = darg.skb;
+ rxm = strp_msg(skb);
+ tlm = tls_msg(skb);
+
async |= darg.async;
/* If the type of records being processed is not known yet,
@@ -1815,30 +1953,36 @@ int tls_sw_recvmsg(struct sock *sk,
* For tls1.3, we disable async.
*/
err = tls_record_content_type(msg, tlm, &control);
- if (err <= 0)
+ if (err <= 0) {
+ tls_rx_rec_done(ctx);
+put_on_rx_list_err:
+ __skb_queue_tail(&ctx->rx_list, skb);
goto recv_end;
-
- ctx->recv_pkt = NULL;
- __strp_unpause(&ctx->strp);
- __skb_queue_tail(&ctx->rx_list, skb);
-
- if (async) {
- /* TLS 1.2-only, to_decrypt must be text length */
- chunk = min_t(int, to_decrypt, len);
-leave_on_list:
- decrypted += chunk;
- len -= chunk;
- continue;
}
+
+ /* periodically flush backlog, and feed strparser */
+ tls_read_flush_backlog(sk, prot, len, to_decrypt,
+ decrypted + copied, &flushed_at);
+
/* TLS 1.3 may have updated the length by more than overhead */
chunk = rxm->full_len;
+ tls_rx_rec_done(ctx);
if (!darg.zc) {
bool partially_consumed = chunk > len;
+ if (async) {
+ /* TLS 1.2-only, to_decrypt must be text len */
+ chunk = min_t(int, to_decrypt, len);
+ async_copy_bytes += chunk;
+put_on_rx_list:
+ decrypted += chunk;
+ len -= chunk;
+ __skb_queue_tail(&ctx->rx_list, skb);
+ continue;
+ }
+
if (bpf_strp_enabled) {
- /* BPF may try to queue the skb */
- __skb_unlink(skb, &ctx->rx_list);
err = sk_psock_tls_strp_read(psock, skb);
if (err != __SK_PASS) {
rxm->offset = rxm->offset + rxm->full_len;
@@ -1847,7 +1991,6 @@ leave_on_list:
consume_skb(skb);
continue;
}
- __skb_queue_tail(&ctx->rx_list, skb);
}
if (partially_consumed)
@@ -1856,22 +1999,21 @@ leave_on_list:
err = skb_copy_datagram_msg(skb, rxm->offset,
msg, chunk);
if (err < 0)
- goto recv_end;
+ goto put_on_rx_list_err;
if (is_peek)
- goto leave_on_list;
+ goto put_on_rx_list;
if (partially_consumed) {
rxm->offset += chunk;
rxm->full_len -= chunk;
- goto leave_on_list;
+ goto put_on_rx_list;
}
}
decrypted += chunk;
len -= chunk;
- __skb_unlink(skb, &ctx->rx_list);
consume_skb(skb);
/* Return full control message to userspace before trying
@@ -1891,30 +2033,32 @@ recv_end:
reinit_completion(&ctx->async_wait.completion);
pending = atomic_read(&ctx->decrypt_pending);
spin_unlock_bh(&ctx->decrypt_compl_lock);
- if (pending) {
+ ret = 0;
+ if (pending)
ret = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
- if (ret) {
- if (err >= 0 || err == -EINPROGRESS)
- err = ret;
- decrypted = 0;
- goto end;
- }
+ __skb_queue_purge(&ctx->async_hold);
+
+ if (ret) {
+ if (err >= 0 || err == -EINPROGRESS)
+ err = ret;
+ decrypted = 0;
+ goto end;
}
/* Drain records from the rx_list & copy if required */
if (is_peek || is_kvec)
err = process_rx_list(ctx, msg, &control, copied,
- decrypted, false, is_peek);
+ decrypted, is_peek);
else
err = process_rx_list(ctx, msg, &control, 0,
- decrypted, true, is_peek);
+ async_copy_bytes, is_peek);
decrypted = max(err, 0);
}
copied += decrypted;
end:
- release_sock(sk);
+ tls_rx_reader_unlock(sk, ctx);
if (psock)
sk_psock_put(sk, psock);
return copied ? : err;
@@ -1931,31 +2075,34 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
struct tls_msg *tlm;
struct sk_buff *skb;
ssize_t copied = 0;
- bool from_queue;
int err = 0;
long timeo;
int chunk;
- lock_sock(sk);
-
- timeo = sock_rcvtimeo(sk, flags & SPLICE_F_NONBLOCK);
+ timeo = tls_rx_reader_lock(sk, ctx, flags & SPLICE_F_NONBLOCK);
+ if (timeo < 0)
+ return timeo;
- from_queue = !skb_queue_empty(&ctx->rx_list);
- if (from_queue) {
+ if (!skb_queue_empty(&ctx->rx_list)) {
skb = __skb_dequeue(&ctx->rx_list);
} else {
- struct tls_decrypt_arg darg = {};
+ struct tls_decrypt_arg darg;
- skb = tls_wait_data(sk, NULL, flags & SPLICE_F_NONBLOCK, timeo,
- &err);
- if (!skb)
+ err = tls_rx_rec_wait(sk, NULL, flags & SPLICE_F_NONBLOCK,
+ timeo);
+ if (err <= 0)
goto splice_read_end;
- err = decrypt_skb_update(sk, skb, NULL, &darg);
+ memset(&darg.inargs, 0, sizeof(darg.inargs));
+
+ err = tls_rx_one_record(sk, NULL, &darg);
if (err < 0) {
tls_err_abort(sk, -EBADMSG);
goto splice_read_end;
}
+
+ tls_rx_rec_done(ctx);
+ skb = darg.skb;
}
rxm = strp_msg(skb);
@@ -1964,29 +2111,29 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
/* splice does not support reading control messages */
if (tlm->control != TLS_RECORD_TYPE_DATA) {
err = -EINVAL;
- goto splice_read_end;
+ goto splice_requeue;
}
chunk = min_t(unsigned int, rxm->full_len, len);
copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
if (copied < 0)
- goto splice_read_end;
+ goto splice_requeue;
- if (!from_queue) {
- ctx->recv_pkt = NULL;
- __strp_unpause(&ctx->strp);
- }
if (chunk < rxm->full_len) {
- __skb_queue_head(&ctx->rx_list, skb);
rxm->offset += len;
rxm->full_len -= len;
- } else {
- consume_skb(skb);
+ goto splice_requeue;
}
+ consume_skb(skb);
+
splice_read_end:
- release_sock(sk);
+ tls_rx_reader_unlock(sk, ctx);
return copied ? : err;
+
+splice_requeue:
+ __skb_queue_head(&ctx->rx_list, skb);
+ goto splice_read_end;
}
bool tls_sw_sock_is_readable(struct sock *sk)
@@ -2032,7 +2179,6 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
if (ret < 0)
goto read_failure;
- tlm->decrypted = 0;
tlm->control = header[0];
data_len = ((header[4] & 0xFF) | (header[3] << 8));
@@ -2227,12 +2373,23 @@ static void tx_work_handler(struct work_struct *work)
mutex_unlock(&tls_ctx->tx_lock);
}
+static bool tls_is_tx_ready(struct tls_sw_context_tx *ctx)
+{
+ struct tls_rec *rec;
+
+ rec = list_first_entry(&ctx->tx_list, struct tls_rec, list);
+ if (!rec)
+ return false;
+
+ return READ_ONCE(rec->tx_ready);
+}
+
void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
{
struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
/* Schedule the transmission if tx list is ready */
- if (is_tx_ready(tx_ctx) &&
+ if (tls_is_tx_ready(tx_ctx) &&
!test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
schedule_delayed_work(&tx_ctx->tx_work.work, 0);
}
@@ -2249,6 +2406,14 @@ void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
strp_check_rcv(&rx_ctx->strp);
}
+void tls_update_rx_zc_capable(struct tls_context *tls_ctx)
+{
+ struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
+
+ rx_ctx->zc_capable = tls_ctx->rx_no_pad ||
+ tls_ctx->prot_info.version != TLS_1_3_VERSION;
+}
+
int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
@@ -2308,9 +2473,11 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
} else {
crypto_init_wait(&sw_ctx_rx->async_wait);
spin_lock_init(&sw_ctx_rx->decrypt_compl_lock);
+ init_waitqueue_head(&sw_ctx_rx->wq);
crypto_info = &ctx->crypto_recv.info;
cctx = &ctx->rx;
skb_queue_head_init(&sw_ctx_rx->rx_list);
+ skb_queue_head_init(&sw_ctx_rx->async_hold);
aead = &sw_ctx_rx->aead_recv;
}
@@ -2422,13 +2589,6 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
goto free_priv;
}
- /* Sanity-check the sizes for stack allocations. */
- if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE ||
- rec_seq_size > TLS_MAX_REC_SEQ_SIZE || tag_size != TLS_TAG_SIZE) {
- rc = -EINVAL;
- goto free_priv;
- }
-
if (crypto_info->version == TLS_1_3_VERSION) {
nonce_size = 0;
prot->aad_size = TLS_HEADER_SIZE;
@@ -2438,6 +2598,14 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
prot->tail_size = 0;
}
+ /* Sanity-check the sizes for stack allocations. */
+ if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE ||
+ rec_seq_size > TLS_MAX_REC_SEQ_SIZE || tag_size != TLS_TAG_SIZE ||
+ prot->aad_size > TLS_MAX_AAD_SIZE) {
+ rc = -EINVAL;
+ goto free_priv;
+ }
+
prot->version = crypto_info->version;
prot->cipher_type = crypto_info->cipher_type;
prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
@@ -2484,12 +2652,10 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
if (sw_ctx_rx) {
tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
- if (crypto_info->version == TLS_1_3_VERSION)
- sw_ctx_rx->async_capable = 0;
- else
- sw_ctx_rx->async_capable =
- !!(tfm->__crt_alg->cra_flags &
- CRYPTO_ALG_ASYNC);
+ tls_update_rx_zc_capable(ctx);
+ sw_ctx_rx->async_capable =
+ crypto_info->version != TLS_1_3_VERSION &&
+ !!(tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC);
/* Set up strparser */
memset(&cb, 0, sizeof(cb));
diff --git a/net/tls/tls_toe.c b/net/tls/tls_toe.c
index 7e1330f19165..825669e1ab47 100644
--- a/net/tls/tls_toe.c
+++ b/net/tls/tls_toe.c
@@ -38,6 +38,8 @@
#include <net/tls.h>
#include <net/tls_toe.h>
+#include "tls.h"
+
static LIST_HEAD(device_list);
static DEFINE_SPINLOCK(device_spinlock);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 2206e6f8902d..bf338b782fc4 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -118,15 +118,13 @@
#include "scm.h"
-spinlock_t unix_table_locks[2 * UNIX_HASH_SIZE];
-EXPORT_SYMBOL_GPL(unix_table_locks);
-struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
-EXPORT_SYMBOL_GPL(unix_socket_table);
static atomic_long_t unix_nr_socks;
+static struct hlist_head bsd_socket_buckets[UNIX_HASH_SIZE / 2];
+static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2];
/* SMP locking strategy:
- * hash table is protected with spinlock unix_table_locks
- * each socket state is protected by separate spin lock.
+ * hash table is protected with spinlock.
+ * each socket state is protected by separate spinlock.
*/
static unsigned int unix_unbound_hash(struct sock *sk)
@@ -137,12 +135,12 @@ static unsigned int unix_unbound_hash(struct sock *sk)
hash ^= hash >> 8;
hash ^= sk->sk_type;
- return UNIX_HASH_SIZE + (hash & (UNIX_HASH_SIZE - 1));
+ return hash & UNIX_HASH_MOD;
}
static unsigned int unix_bsd_hash(struct inode *i)
{
- return i->i_ino & (UNIX_HASH_SIZE - 1);
+ return i->i_ino & UNIX_HASH_MOD;
}
static unsigned int unix_abstract_hash(struct sockaddr_un *sunaddr,
@@ -155,26 +153,34 @@ static unsigned int unix_abstract_hash(struct sockaddr_un *sunaddr,
hash ^= hash >> 8;
hash ^= type;
- return hash & (UNIX_HASH_SIZE - 1);
+ return UNIX_HASH_MOD + 1 + (hash & UNIX_HASH_MOD);
}
-static void unix_table_double_lock(unsigned int hash1, unsigned int hash2)
+static void unix_table_double_lock(struct net *net,
+ unsigned int hash1, unsigned int hash2)
{
- /* hash1 and hash2 is never the same because
- * one is between 0 and UNIX_HASH_SIZE - 1, and
- * another is between UNIX_HASH_SIZE and UNIX_HASH_SIZE * 2.
- */
+ if (hash1 == hash2) {
+ spin_lock(&net->unx.table.locks[hash1]);
+ return;
+ }
+
if (hash1 > hash2)
swap(hash1, hash2);
- spin_lock(&unix_table_locks[hash1]);
- spin_lock_nested(&unix_table_locks[hash2], SINGLE_DEPTH_NESTING);
+ spin_lock(&net->unx.table.locks[hash1]);
+ spin_lock_nested(&net->unx.table.locks[hash2], SINGLE_DEPTH_NESTING);
}
-static void unix_table_double_unlock(unsigned int hash1, unsigned int hash2)
+static void unix_table_double_unlock(struct net *net,
+ unsigned int hash1, unsigned int hash2)
{
- spin_unlock(&unix_table_locks[hash1]);
- spin_unlock(&unix_table_locks[hash2]);
+ if (hash1 == hash2) {
+ spin_unlock(&net->unx.table.locks[hash1]);
+ return;
+ }
+
+ spin_unlock(&net->unx.table.locks[hash1]);
+ spin_unlock(&net->unx.table.locks[hash2]);
}
#ifdef CONFIG_SECURITY_NETWORK
@@ -300,34 +306,52 @@ static void __unix_remove_socket(struct sock *sk)
sk_del_node_init(sk);
}
-static void __unix_insert_socket(struct sock *sk)
+static void __unix_insert_socket(struct net *net, struct sock *sk)
{
- WARN_ON(!sk_unhashed(sk));
- sk_add_node(sk, &unix_socket_table[sk->sk_hash]);
+ DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
+ sk_add_node(sk, &net->unx.table.buckets[sk->sk_hash]);
}
-static void __unix_set_addr_hash(struct sock *sk, struct unix_address *addr,
- unsigned int hash)
+static void __unix_set_addr_hash(struct net *net, struct sock *sk,
+ struct unix_address *addr, unsigned int hash)
{
__unix_remove_socket(sk);
smp_store_release(&unix_sk(sk)->addr, addr);
sk->sk_hash = hash;
- __unix_insert_socket(sk);
+ __unix_insert_socket(net, sk);
}
-static void unix_remove_socket(struct sock *sk)
+static void unix_remove_socket(struct net *net, struct sock *sk)
{
- spin_lock(&unix_table_locks[sk->sk_hash]);
+ spin_lock(&net->unx.table.locks[sk->sk_hash]);
__unix_remove_socket(sk);
- spin_unlock(&unix_table_locks[sk->sk_hash]);
+ spin_unlock(&net->unx.table.locks[sk->sk_hash]);
+}
+
+static void unix_insert_unbound_socket(struct net *net, struct sock *sk)
+{
+ spin_lock(&net->unx.table.locks[sk->sk_hash]);
+ __unix_insert_socket(net, sk);
+ spin_unlock(&net->unx.table.locks[sk->sk_hash]);
+}
+
+static void unix_insert_bsd_socket(struct sock *sk)
+{
+ spin_lock(&bsd_socket_locks[sk->sk_hash]);
+ sk_add_bind_node(sk, &bsd_socket_buckets[sk->sk_hash]);
+ spin_unlock(&bsd_socket_locks[sk->sk_hash]);
}
-static void unix_insert_unbound_socket(struct sock *sk)
+static void unix_remove_bsd_socket(struct sock *sk)
{
- spin_lock(&unix_table_locks[sk->sk_hash]);
- __unix_insert_socket(sk);
- spin_unlock(&unix_table_locks[sk->sk_hash]);
+ if (!hlist_unhashed(&sk->sk_bind_node)) {
+ spin_lock(&bsd_socket_locks[sk->sk_hash]);
+ __sk_del_bind_node(sk);
+ spin_unlock(&bsd_socket_locks[sk->sk_hash]);
+
+ sk_node_init(&sk->sk_bind_node);
+ }
}
static struct sock *__unix_find_socket_byname(struct net *net,
@@ -336,12 +360,9 @@ static struct sock *__unix_find_socket_byname(struct net *net,
{
struct sock *s;
- sk_for_each(s, &unix_socket_table[hash]) {
+ sk_for_each(s, &net->unx.table.buckets[hash]) {
struct unix_sock *u = unix_sk(s);
- if (!net_eq(sock_net(s), net))
- continue;
-
if (u->addr->len == len &&
!memcmp(u->addr->name, sunname, len))
return s;
@@ -355,11 +376,11 @@ static inline struct sock *unix_find_socket_byname(struct net *net,
{
struct sock *s;
- spin_lock(&unix_table_locks[hash]);
+ spin_lock(&net->unx.table.locks[hash]);
s = __unix_find_socket_byname(net, sunname, len, hash);
if (s)
sock_hold(s);
- spin_unlock(&unix_table_locks[hash]);
+ spin_unlock(&net->unx.table.locks[hash]);
return s;
}
@@ -368,17 +389,17 @@ static struct sock *unix_find_socket_byinode(struct inode *i)
unsigned int hash = unix_bsd_hash(i);
struct sock *s;
- spin_lock(&unix_table_locks[hash]);
- sk_for_each(s, &unix_socket_table[hash]) {
+ spin_lock(&bsd_socket_locks[hash]);
+ sk_for_each_bound(s, &bsd_socket_buckets[hash]) {
struct dentry *dentry = unix_sk(s)->path.dentry;
if (dentry && d_backing_inode(dentry) == i) {
sock_hold(s);
- spin_unlock(&unix_table_locks[hash]);
+ spin_unlock(&bsd_socket_locks[hash]);
return s;
}
}
- spin_unlock(&unix_table_locks[hash]);
+ spin_unlock(&bsd_socket_locks[hash]);
return NULL;
}
@@ -554,9 +575,9 @@ static void unix_sock_destructor(struct sock *sk)
u->oob_skb = NULL;
}
#endif
- WARN_ON(refcount_read(&sk->sk_wmem_alloc));
- WARN_ON(!sk_unhashed(sk));
- WARN_ON(sk->sk_socket);
+ DEBUG_NET_WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
+ DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
+ DEBUG_NET_WARN_ON_ONCE(sk->sk_socket);
if (!sock_flag(sk, SOCK_DEAD)) {
pr_info("Attempt to release alive unix socket: %p\n", sk);
return;
@@ -576,12 +597,13 @@ static void unix_sock_destructor(struct sock *sk)
static void unix_release_sock(struct sock *sk, int embrion)
{
struct unix_sock *u = unix_sk(sk);
- struct path path;
struct sock *skpair;
struct sk_buff *skb;
+ struct path path;
int state;
- unix_remove_socket(sk);
+ unix_remove_socket(sock_net(sk), sk);
+ unix_remove_bsd_socket(sk);
/* Clear state */
unix_state_lock(sk);
@@ -741,10 +763,8 @@ static ssize_t unix_stream_splice_read(struct socket *, loff_t *ppos,
unsigned int flags);
static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
-static int unix_read_sock(struct sock *sk, read_descriptor_t *desc,
- sk_read_actor_t recv_actor);
-static int unix_stream_read_sock(struct sock *sk, read_descriptor_t *desc,
- sk_read_actor_t recv_actor);
+static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
+static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
static int unix_dgram_connect(struct socket *, struct sockaddr *,
int, int);
static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
@@ -798,7 +818,7 @@ static const struct proto_ops unix_stream_ops = {
.shutdown = unix_shutdown,
.sendmsg = unix_stream_sendmsg,
.recvmsg = unix_stream_recvmsg,
- .read_sock = unix_stream_read_sock,
+ .read_skb = unix_stream_read_skb,
.mmap = sock_no_mmap,
.sendpage = unix_stream_sendpage,
.splice_read = unix_stream_splice_read,
@@ -823,7 +843,7 @@ static const struct proto_ops unix_dgram_ops = {
.listen = sock_no_listen,
.shutdown = unix_shutdown,
.sendmsg = unix_dgram_sendmsg,
- .read_sock = unix_read_sock,
+ .read_skb = unix_read_skb,
.recvmsg = unix_dgram_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
@@ -930,9 +950,9 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern,
init_waitqueue_head(&u->peer_wait);
init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
memset(&u->scm_stat, 0, sizeof(struct scm_stat));
- unix_insert_unbound_socket(sk);
+ unix_insert_unbound_socket(net, sk);
- sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+ sock_prot_inuse_add(net, sk->sk_prot, 1);
return sk;
@@ -993,8 +1013,8 @@ static int unix_release(struct socket *sock)
return 0;
}
-static struct sock *unix_find_bsd(struct net *net, struct sockaddr_un *sunaddr,
- int addr_len, int type)
+static struct sock *unix_find_bsd(struct sockaddr_un *sunaddr, int addr_len,
+ int type)
{
struct inode *inode;
struct path path;
@@ -1063,7 +1083,7 @@ static struct sock *unix_find_other(struct net *net,
struct sock *sk;
if (sunaddr->sun_path[0])
- sk = unix_find_bsd(net, sunaddr, addr_len, type);
+ sk = unix_find_bsd(sunaddr, addr_len, type);
else
sk = unix_find_abstract(net, sunaddr, addr_len, type);
@@ -1074,6 +1094,7 @@ static int unix_autobind(struct sock *sk)
{
unsigned int new_hash, old_hash = sk->sk_hash;
struct unix_sock *u = unix_sk(sk);
+ struct net *net = sock_net(sk);
struct unix_address *addr;
u32 lastnum, ordernum;
int err;
@@ -1102,11 +1123,10 @@ retry:
sprintf(addr->name->sun_path + 1, "%05x", ordernum);
new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
- unix_table_double_lock(old_hash, new_hash);
+ unix_table_double_lock(net, old_hash, new_hash);
- if (__unix_find_socket_byname(sock_net(sk), addr->name, addr->len,
- new_hash)) {
- unix_table_double_unlock(old_hash, new_hash);
+ if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash)) {
+ unix_table_double_unlock(net, old_hash, new_hash);
/* __unix_find_socket_byname() may take long time if many names
* are already in use.
@@ -1123,8 +1143,8 @@ retry:
goto retry;
}
- __unix_set_addr_hash(sk, addr, new_hash);
- unix_table_double_unlock(old_hash, new_hash);
+ __unix_set_addr_hash(net, sk, addr, new_hash);
+ unix_table_double_unlock(net, old_hash, new_hash);
err = 0;
out: mutex_unlock(&u->bindlock);
@@ -1138,6 +1158,7 @@ static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
(SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask());
unsigned int new_hash, old_hash = sk->sk_hash;
struct unix_sock *u = unix_sk(sk);
+ struct net *net = sock_net(sk);
struct user_namespace *ns; // barf...
struct unix_address *addr;
struct dentry *dentry;
@@ -1178,11 +1199,12 @@ static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
goto out_unlock;
new_hash = unix_bsd_hash(d_backing_inode(dentry));
- unix_table_double_lock(old_hash, new_hash);
+ unix_table_double_lock(net, old_hash, new_hash);
u->path.mnt = mntget(parent.mnt);
u->path.dentry = dget(dentry);
- __unix_set_addr_hash(sk, addr, new_hash);
- unix_table_double_unlock(old_hash, new_hash);
+ __unix_set_addr_hash(net, sk, addr, new_hash);
+ unix_table_double_unlock(net, old_hash, new_hash);
+ unix_insert_bsd_socket(sk);
mutex_unlock(&u->bindlock);
done_path_create(&parent, dentry);
return 0;
@@ -1205,6 +1227,7 @@ static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
{
unsigned int new_hash, old_hash = sk->sk_hash;
struct unix_sock *u = unix_sk(sk);
+ struct net *net = sock_net(sk);
struct unix_address *addr;
int err;
@@ -1222,19 +1245,18 @@ static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
}
new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
- unix_table_double_lock(old_hash, new_hash);
+ unix_table_double_lock(net, old_hash, new_hash);
- if (__unix_find_socket_byname(sock_net(sk), addr->name, addr->len,
- new_hash))
+ if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash))
goto out_spin;
- __unix_set_addr_hash(sk, addr, new_hash);
- unix_table_double_unlock(old_hash, new_hash);
+ __unix_set_addr_hash(net, sk, addr, new_hash);
+ unix_table_double_unlock(net, old_hash, new_hash);
mutex_unlock(&u->bindlock);
return 0;
out_spin:
- unix_table_double_unlock(old_hash, new_hash);
+ unix_table_double_unlock(net, old_hash, new_hash);
err = -EADDRINUSE;
out_mutex:
mutex_unlock(&u->bindlock);
@@ -1293,9 +1315,8 @@ static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
int alen, int flags)
{
- struct sock *sk = sock->sk;
- struct net *net = sock_net(sk);
struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
+ struct sock *sk = sock->sk;
struct sock *other;
int err;
@@ -1316,7 +1337,7 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
}
restart:
- other = unix_find_other(net, sunaddr, alen, sock->type);
+ other = unix_find_other(sock_net(sk), sunaddr, alen, sock->type);
if (IS_ERR(other)) {
err = PTR_ERR(other);
goto out;
@@ -1404,15 +1425,13 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags)
{
struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
- struct sock *sk = sock->sk;
- struct net *net = sock_net(sk);
+ struct sock *sk = sock->sk, *newsk = NULL, *other = NULL;
struct unix_sock *u = unix_sk(sk), *newu, *otheru;
- struct sock *newsk = NULL;
- struct sock *other = NULL;
+ struct net *net = sock_net(sk);
struct sk_buff *skb = NULL;
- int st;
- int err;
long timeo;
+ int err;
+ int st;
err = unix_validate_addr(sunaddr, addr_len);
if (err)
@@ -1432,7 +1451,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
*/
/* create new sock for complete connection */
- newsk = unix_create1(sock_net(sk), NULL, 0, sock->type);
+ newsk = unix_create1(net, NULL, 0, sock->type);
if (IS_ERR(newsk)) {
err = PTR_ERR(newsk);
newsk = NULL;
@@ -1541,9 +1560,9 @@ restart:
*
* The contents of *(otheru->addr) and otheru->path
* are seen fully set up here, since we have found
- * otheru in hash under unix_table_locks. Insertion
- * into the hash chain we'd found it in had been done
- * in an earlier critical area protected by unix_table_locks,
+ * otheru in hash under its lock. Insertion into the
+ * hash chain we'd found it in had been done in an
+ * earlier critical area protected by the chain's lock,
* the same one where we'd set *(otheru->addr) contents,
* as well as otheru->path and otheru->addr itself.
*
@@ -1840,17 +1859,15 @@ static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
size_t len)
{
- struct sock *sk = sock->sk;
- struct net *net = sock_net(sk);
- struct unix_sock *u = unix_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
- struct sock *other = NULL;
- int err;
- struct sk_buff *skb;
- long timeo;
+ struct sock *sk = sock->sk, *other = NULL;
+ struct unix_sock *u = unix_sk(sk);
struct scm_cookie scm;
+ struct sk_buff *skb;
int data_len = 0;
int sk_locked;
+ long timeo;
+ int err;
wait_for_unix_gc();
err = scm_send(sock, msg, &scm, false);
@@ -1917,7 +1934,7 @@ restart:
if (sunaddr == NULL)
goto out_free;
- other = unix_find_other(net, sunaddr, msg->msg_namelen,
+ other = unix_find_other(sock_net(sk), sunaddr, msg->msg_namelen,
sk->sk_type);
if (IS_ERR(other)) {
err = PTR_ERR(other);
@@ -2487,8 +2504,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t si
return __unix_dgram_recvmsg(sk, msg, size, flags);
}
-static int unix_read_sock(struct sock *sk, read_descriptor_t *desc,
- sk_read_actor_t recv_actor)
+static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
{
int copied = 0;
@@ -2503,7 +2519,7 @@ static int unix_read_sock(struct sock *sk, read_descriptor_t *desc,
if (!skb)
return err;
- used = recv_actor(desc, skb, 0, skb->len);
+ used = recv_actor(sk, skb);
if (used <= 0) {
if (!copied)
copied = used;
@@ -2514,8 +2530,7 @@ static int unix_read_sock(struct sock *sk, read_descriptor_t *desc,
}
kfree_skb(skb);
- if (!desc->count)
- break;
+ break;
}
return copied;
@@ -2650,13 +2665,12 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
}
#endif
-static int unix_stream_read_sock(struct sock *sk, read_descriptor_t *desc,
- sk_read_actor_t recv_actor)
+static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
{
if (unlikely(sk->sk_state != TCP_ESTABLISHED))
return -ENOTCONN;
- return unix_read_sock(sk, desc, recv_actor);
+ return unix_read_skb(sk, recv_actor);
}
static int unix_stream_read_generic(struct unix_stream_read_state *state,
@@ -3226,12 +3240,11 @@ static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
{
unsigned long offset = get_offset(*pos);
unsigned long bucket = get_bucket(*pos);
- struct sock *sk;
unsigned long count = 0;
+ struct sock *sk;
- for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) {
- if (sock_net(sk) != seq_file_net(seq))
- continue;
+ for (sk = sk_head(&seq_file_net(seq)->unx.table.buckets[bucket]);
+ sk; sk = sk_next(sk)) {
if (++count == offset)
break;
}
@@ -3242,16 +3255,17 @@ static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
static struct sock *unix_get_first(struct seq_file *seq, loff_t *pos)
{
unsigned long bucket = get_bucket(*pos);
+ struct net *net = seq_file_net(seq);
struct sock *sk;
- while (bucket < ARRAY_SIZE(unix_socket_table)) {
- spin_lock(&unix_table_locks[bucket]);
+ while (bucket < UNIX_HASH_SIZE) {
+ spin_lock(&net->unx.table.locks[bucket]);
sk = unix_from_bucket(seq, pos);
if (sk)
return sk;
- spin_unlock(&unix_table_locks[bucket]);
+ spin_unlock(&net->unx.table.locks[bucket]);
*pos = set_bucket_offset(++bucket, 1);
}
@@ -3264,11 +3278,12 @@ static struct sock *unix_get_next(struct seq_file *seq, struct sock *sk,
{
unsigned long bucket = get_bucket(*pos);
- for (sk = sk_next(sk); sk; sk = sk_next(sk))
- if (sock_net(sk) == seq_file_net(seq))
- return sk;
+ sk = sk_next(sk);
+ if (sk)
+ return sk;
- spin_unlock(&unix_table_locks[bucket]);
+
+ spin_unlock(&seq_file_net(seq)->unx.table.locks[bucket]);
*pos = set_bucket_offset(++bucket, 1);
@@ -3298,7 +3313,7 @@ static void unix_seq_stop(struct seq_file *seq, void *v)
struct sock *sk = v;
if (sk)
- spin_unlock(&unix_table_locks[sk->sk_hash]);
+ spin_unlock(&seq_file_net(seq)->unx.table.locks[sk->sk_hash]);
}
static int unix_seq_show(struct seq_file *seq, void *v)
@@ -3323,7 +3338,7 @@ static int unix_seq_show(struct seq_file *seq, void *v)
(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
sock_i_ino(s));
- if (u->addr) { // under unix_table_locks here
+ if (u->addr) { // under a hash table lock here
int i, len;
seq_putc(seq, ' ');
@@ -3393,9 +3408,6 @@ static int bpf_iter_unix_hold_batch(struct seq_file *seq, struct sock *start_sk)
iter->batch[iter->end_sk++] = start_sk;
for (sk = sk_next(start_sk); sk; sk = sk_next(sk)) {
- if (sock_net(sk) != seq_file_net(seq))
- continue;
-
if (iter->end_sk < iter->max_sk) {
sock_hold(sk);
iter->batch[iter->end_sk++] = sk;
@@ -3404,7 +3416,7 @@ static int bpf_iter_unix_hold_batch(struct seq_file *seq, struct sock *start_sk)
expected++;
}
- spin_unlock(&unix_table_locks[start_sk->sk_hash]);
+ spin_unlock(&seq_file_net(seq)->unx.table.locks[start_sk->sk_hash]);
return expected;
}
@@ -3564,7 +3576,7 @@ static const struct net_proto_family unix_family_ops = {
static int __net_init unix_net_init(struct net *net)
{
- int error = -ENOMEM;
+ int i;
net->unx.sysctl_max_dgram_qlen = 10;
if (unix_sysctl_register(net))
@@ -3572,18 +3584,44 @@ static int __net_init unix_net_init(struct net *net)
#ifdef CONFIG_PROC_FS
if (!proc_create_net("unix", 0, net->proc_net, &unix_seq_ops,
- sizeof(struct seq_net_private))) {
- unix_sysctl_unregister(net);
- goto out;
+ sizeof(struct seq_net_private)))
+ goto err_sysctl;
+#endif
+
+ net->unx.table.locks = kvmalloc_array(UNIX_HASH_SIZE,
+ sizeof(spinlock_t), GFP_KERNEL);
+ if (!net->unx.table.locks)
+ goto err_proc;
+
+ net->unx.table.buckets = kvmalloc_array(UNIX_HASH_SIZE,
+ sizeof(struct hlist_head),
+ GFP_KERNEL);
+ if (!net->unx.table.buckets)
+ goto free_locks;
+
+ for (i = 0; i < UNIX_HASH_SIZE; i++) {
+ spin_lock_init(&net->unx.table.locks[i]);
+ INIT_HLIST_HEAD(&net->unx.table.buckets[i]);
}
+
+ return 0;
+
+free_locks:
+ kvfree(net->unx.table.locks);
+err_proc:
+#ifdef CONFIG_PROC_FS
+ remove_proc_entry("unix", net->proc_net);
+err_sysctl:
#endif
- error = 0;
+ unix_sysctl_unregister(net);
out:
- return error;
+ return -ENOMEM;
}
static void __net_exit unix_net_exit(struct net *net)
{
+ kvfree(net->unx.table.buckets);
+ kvfree(net->unx.table.locks);
unix_sysctl_unregister(net);
remove_proc_entry("unix", net->proc_net);
}
@@ -3671,8 +3709,10 @@ static int __init af_unix_init(void)
BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb));
- for (i = 0; i < 2 * UNIX_HASH_SIZE; i++)
- spin_lock_init(&unix_table_locks[i]);
+ for (i = 0; i < UNIX_HASH_SIZE / 2; i++) {
+ spin_lock_init(&bsd_socket_locks[i]);
+ INIT_HLIST_HEAD(&bsd_socket_buckets[i]);
+ }
rc = proto_register(&unix_dgram_proto, 1);
if (rc != 0) {
diff --git a/net/unix/diag.c b/net/unix/diag.c
index bb0b5ea1655f..105f522a89fe 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -13,7 +13,7 @@
static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
{
- /* might or might not have unix_table_locks */
+ /* might or might not have a hash table lock */
struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
if (!addr)
@@ -195,25 +195,21 @@ static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
- struct unix_diag_req *req;
- int num, s_num, slot, s_slot;
struct net *net = sock_net(skb->sk);
+ int num, s_num, slot, s_slot;
+ struct unix_diag_req *req;
req = nlmsg_data(cb->nlh);
s_slot = cb->args[0];
num = s_num = cb->args[1];
- for (slot = s_slot;
- slot < ARRAY_SIZE(unix_socket_table);
- s_num = 0, slot++) {
+ for (slot = s_slot; slot < UNIX_HASH_SIZE; s_num = 0, slot++) {
struct sock *sk;
num = 0;
- spin_lock(&unix_table_locks[slot]);
- sk_for_each(sk, &unix_socket_table[slot]) {
- if (!net_eq(sock_net(sk), net))
- continue;
+ spin_lock(&net->unx.table.locks[slot]);
+ sk_for_each(sk, &net->unx.table.buckets[slot]) {
if (num < s_num)
goto next;
if (!(req->udiag_states & (1 << sk->sk_state)))
@@ -222,13 +218,13 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NLM_F_MULTI) < 0) {
- spin_unlock(&unix_table_locks[slot]);
+ spin_unlock(&net->unx.table.locks[slot]);
goto done;
}
next:
num++;
}
- spin_unlock(&unix_table_locks[slot]);
+ spin_unlock(&net->unx.table.locks[slot]);
}
done:
cb->args[0] = slot;
@@ -237,20 +233,21 @@ done:
return skb->len;
}
-static struct sock *unix_lookup_by_ino(unsigned int ino)
+static struct sock *unix_lookup_by_ino(struct net *net, unsigned int ino)
{
struct sock *sk;
int i;
- for (i = 0; i < ARRAY_SIZE(unix_socket_table); i++) {
- spin_lock(&unix_table_locks[i]);
- sk_for_each(sk, &unix_socket_table[i])
+ for (i = 0; i < UNIX_HASH_SIZE; i++) {
+ spin_lock(&net->unx.table.locks[i]);
+ sk_for_each(sk, &net->unx.table.buckets[i]) {
if (ino == sock_i_ino(sk)) {
sock_hold(sk);
- spin_unlock(&unix_table_locks[i]);
+ spin_unlock(&net->unx.table.locks[i]);
return sk;
}
- spin_unlock(&unix_table_locks[i]);
+ }
+ spin_unlock(&net->unx.table.locks[i]);
}
return NULL;
}
@@ -259,21 +256,20 @@ static int unix_diag_get_exact(struct sk_buff *in_skb,
const struct nlmsghdr *nlh,
struct unix_diag_req *req)
{
- int err = -EINVAL;
- struct sock *sk;
- struct sk_buff *rep;
- unsigned int extra_len;
struct net *net = sock_net(in_skb->sk);
+ unsigned int extra_len;
+ struct sk_buff *rep;
+ struct sock *sk;
+ int err;
+ err = -EINVAL;
if (req->udiag_ino == 0)
goto out_nosk;
- sk = unix_lookup_by_ino(req->udiag_ino);
+ sk = unix_lookup_by_ino(net, req->udiag_ino);
err = -ENOENT;
if (sk == NULL)
goto out_nosk;
- if (!net_eq(sock_net(sk), net))
- goto out;
err = sock_diag_check_cookie(sk, req->udiag_cookie);
if (err)
@@ -308,7 +304,6 @@ out_nosk:
static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
{
int hdrlen = sizeof(struct unix_diag_req);
- struct net *net = sock_net(skb->sk);
if (nlmsg_len(h) < hdrlen)
return -EINVAL;
@@ -317,7 +312,7 @@ static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
struct netlink_dump_control c = {
.dump = unix_diag_dump,
};
- return netlink_dump_start(net->diag_nlsk, skb, h, &c);
+ return netlink_dump_start(sock_net(skb->sk)->diag_nlsk, skb, h, &c);
} else
return unix_diag_get_exact(skb, h, nlmsg_data(h));
}
diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c
index 01d44e2598e2..500129aa710c 100644
--- a/net/unix/sysctl_net_unix.c
+++ b/net/unix/sysctl_net_unix.c
@@ -26,11 +26,16 @@ int __net_init unix_sysctl_register(struct net *net)
{
struct ctl_table *table;
- table = kmemdup(unix_table, sizeof(unix_table), GFP_KERNEL);
- if (table == NULL)
- goto err_alloc;
+ if (net_eq(net, &init_net)) {
+ table = unix_table;
+ } else {
+ table = kmemdup(unix_table, sizeof(unix_table), GFP_KERNEL);
+ if (!table)
+ goto err_alloc;
+
+ table[0].data = &net->unx.sysctl_max_dgram_qlen;
+ }
- table[0].data = &net->unx.sysctl_max_dgram_qlen;
net->unx.ctl = register_net_sysctl(net, "net/unix", table);
if (net->unx.ctl == NULL)
goto err_reg;
@@ -38,7 +43,8 @@ int __net_init unix_sysctl_register(struct net *net)
return 0;
err_reg:
- kfree(table);
+ if (!net_eq(net, &init_net))
+ kfree(table);
err_alloc:
return -ENOMEM;
}
@@ -49,5 +55,6 @@ void unix_sysctl_unregister(struct net *net)
table = net->unx.ctl->ctl_table_arg;
unregister_net_sysctl_table(net->unx.ctl);
- kfree(table);
+ if (!net_eq(net, &init_net))
+ kfree(table);
}
diff --git a/net/wireless/ap.c b/net/wireless/ap.c
index 550ac9d827fe..e68923200018 100644
--- a/net/wireless/ap.c
+++ b/net/wireless/ap.c
@@ -1,4 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
+/*
+ * Parts of this file are
+ * Copyright (C) 2022 Intel Corporation
+ */
#include <linux/ieee80211.h>
#include <linux/export.h>
#include <net/cfg80211.h>
@@ -7,8 +11,9 @@
#include "rdev-ops.h"
-int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
- struct net_device *dev, bool notify)
+static int ___cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, unsigned int link_id,
+ bool notify)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
int err;
@@ -22,15 +27,16 @@ int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
return -EOPNOTSUPP;
- if (!wdev->beacon_interval)
+ if (!wdev->links[link_id].ap.beacon_interval)
return -ENOENT;
- err = rdev_stop_ap(rdev, dev);
+ err = rdev_stop_ap(rdev, dev, link_id);
if (!err) {
wdev->conn_owner_nlportid = 0;
- wdev->beacon_interval = 0;
- memset(&wdev->chandef, 0, sizeof(wdev->chandef));
- wdev->ssid_len = 0;
+ wdev->links[link_id].ap.beacon_interval = 0;
+ memset(&wdev->links[link_id].ap.chandef, 0,
+ sizeof(wdev->links[link_id].ap.chandef));
+ wdev->u.ap.ssid_len = 0;
rdev_set_qos_map(rdev, dev, NULL);
if (notify)
nl80211_send_ap_stopped(wdev);
@@ -46,14 +52,36 @@ int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
return err;
}
+int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, int link_id,
+ bool notify)
+{
+ unsigned int link;
+ int ret = 0;
+
+ if (link_id >= 0)
+ return ___cfg80211_stop_ap(rdev, dev, link_id, notify);
+
+ for_each_valid_link(dev->ieee80211_ptr, link) {
+ int ret1 = ___cfg80211_stop_ap(rdev, dev, link, notify);
+
+ if (ret1)
+ ret = ret1;
+ /* try the next one also if one errored */
+ }
+
+ return ret;
+}
+
int cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
- struct net_device *dev, bool notify)
+ struct net_device *dev, int link_id,
+ bool notify)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
int err;
wdev_lock(wdev);
- err = __cfg80211_stop_ap(rdev, dev, notify);
+ err = __cfg80211_stop_ap(rdev, dev, link_id, notify);
wdev_unlock(wdev);
return err;
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index f74f176e0d9d..0e5835cd8c61 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -672,14 +672,21 @@ bool cfg80211_chandef_dfs_usable(struct wiphy *wiphy,
* range of chandef.
*/
bool cfg80211_is_sub_chan(struct cfg80211_chan_def *chandef,
- struct ieee80211_channel *chan)
+ struct ieee80211_channel *chan,
+ bool primary_only)
{
int width;
u32 freq;
+ if (!chandef->chan)
+ return false;
+
if (chandef->chan->center_freq == chan->center_freq)
return true;
+ if (primary_only)
+ return false;
+
width = cfg80211_chandef_get_width(chandef);
if (width <= 20)
return false;
@@ -704,23 +711,25 @@ bool cfg80211_is_sub_chan(struct cfg80211_chan_def *chandef,
bool cfg80211_beaconing_iface_active(struct wireless_dev *wdev)
{
- bool active = false;
+ unsigned int link;
ASSERT_WDEV_LOCK(wdev);
- if (!wdev->chandef.chan)
- return false;
-
switch (wdev->iftype) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
- active = wdev->beacon_interval != 0;
+ for_each_valid_link(wdev, link) {
+ if (wdev->links[link].ap.beacon_interval)
+ return true;
+ }
break;
case NL80211_IFTYPE_ADHOC:
- active = wdev->ssid_len != 0;
+ if (wdev->u.ibss.ssid_len)
+ return true;
break;
case NL80211_IFTYPE_MESH_POINT:
- active = wdev->mesh_id_len != 0;
+ if (wdev->u.mesh.id_len)
+ return true;
break;
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_OCB:
@@ -737,7 +746,35 @@ bool cfg80211_beaconing_iface_active(struct wireless_dev *wdev)
WARN_ON(1);
}
- return active;
+ return false;
+}
+
+bool cfg80211_wdev_on_sub_chan(struct wireless_dev *wdev,
+ struct ieee80211_channel *chan,
+ bool primary_only)
+{
+ unsigned int link;
+
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ for_each_valid_link(wdev, link) {
+ if (cfg80211_is_sub_chan(&wdev->links[link].ap.chandef,
+ chan, primary_only))
+ return true;
+ }
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ return cfg80211_is_sub_chan(&wdev->u.ibss.chandef, chan,
+ primary_only);
+ case NL80211_IFTYPE_MESH_POINT:
+ return cfg80211_is_sub_chan(&wdev->u.mesh.chandef, chan,
+ primary_only);
+ default:
+ break;
+ }
+
+ return false;
}
static bool cfg80211_is_wiphy_oper_chan(struct wiphy *wiphy,
@@ -752,7 +789,7 @@ static bool cfg80211_is_wiphy_oper_chan(struct wiphy *wiphy,
continue;
}
- if (cfg80211_is_sub_chan(&wdev->chandef, chan)) {
+ if (cfg80211_wdev_on_sub_chan(wdev, chan, false)) {
wdev_unlock(wdev);
return true;
}
@@ -772,7 +809,8 @@ cfg80211_offchan_chain_is_active(struct cfg80211_registered_device *rdev,
if (!cfg80211_chandef_valid(&rdev->background_radar_chandef))
return false;
- return cfg80211_is_sub_chan(&rdev->background_radar_chandef, channel);
+ return cfg80211_is_sub_chan(&rdev->background_radar_chandef, channel,
+ false);
}
bool cfg80211_any_wiphy_oper_chan(struct wiphy *wiphy,
@@ -1176,6 +1214,68 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
}
EXPORT_SYMBOL(cfg80211_chandef_usable);
+static bool cfg80211_ir_permissive_check_wdev(enum nl80211_iftype iftype,
+ struct wireless_dev *wdev,
+ struct ieee80211_channel *chan)
+{
+ struct ieee80211_channel *other_chan = NULL;
+ unsigned int link_id;
+ int r1, r2;
+
+ for_each_valid_link(wdev, link_id) {
+ if (wdev->iftype == NL80211_IFTYPE_STATION &&
+ wdev->links[link_id].client.current_bss)
+ other_chan = wdev->links[link_id].client.current_bss->pub.channel;
+
+ /*
+ * If a GO already operates on the same GO_CONCURRENT channel,
+ * this one (maybe the same one) can beacon as well. We allow
+ * the operation even if the station we relied on with
+ * GO_CONCURRENT is disconnected now. But then we must make sure
+ * we're not outdoor on an indoor-only channel.
+ */
+ if (iftype == NL80211_IFTYPE_P2P_GO &&
+ wdev->iftype == NL80211_IFTYPE_P2P_GO &&
+ wdev->links[link_id].ap.beacon_interval &&
+ !(chan->flags & IEEE80211_CHAN_INDOOR_ONLY))
+ other_chan = wdev->links[link_id].ap.chandef.chan;
+
+ if (!other_chan)
+ continue;
+
+ if (chan == other_chan)
+ return true;
+
+ if (chan->band != NL80211_BAND_5GHZ &&
+ chan->band != NL80211_BAND_6GHZ)
+ continue;
+
+ r1 = cfg80211_get_unii(chan->center_freq);
+ r2 = cfg80211_get_unii(other_chan->center_freq);
+
+ if (r1 != -EINVAL && r1 == r2) {
+ /*
+ * At some locations channels 149-165 are considered a
+ * bundle, but at other locations, e.g., Indonesia,
+ * channels 149-161 are considered a bundle while
+ * channel 165 is left out and considered to be in a
+ * different bundle. Thus, in case that there is a
+ * station interface connected to an AP on channel 165,
+ * it is assumed that channels 149-161 are allowed for
+ * GO operations. However, having a station interface
+ * connected to an AP on channels 149-161, does not
+ * allow GO operation on channel 165.
+ */
+ if (chan->center_freq == 5825 &&
+ other_chan->center_freq != 5825)
+ continue;
+ return true;
+ }
+ }
+
+ return false;
+}
+
/*
* Check if the channel can be used under permissive conditions mandated by
* some regulatory bodies, i.e., the channel is marked with
@@ -1219,59 +1319,14 @@ static bool cfg80211_ir_permissive_chan(struct wiphy *wiphy,
* the current registered device.
*/
list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
- struct ieee80211_channel *other_chan = NULL;
- int r1, r2;
+ bool ret;
wdev_lock(wdev);
- if (wdev->iftype == NL80211_IFTYPE_STATION &&
- wdev->current_bss)
- other_chan = wdev->current_bss->pub.channel;
-
- /*
- * If a GO already operates on the same GO_CONCURRENT channel,
- * this one (maybe the same one) can beacon as well. We allow
- * the operation even if the station we relied on with
- * GO_CONCURRENT is disconnected now. But then we must make sure
- * we're not outdoor on an indoor-only channel.
- */
- if (iftype == NL80211_IFTYPE_P2P_GO &&
- wdev->iftype == NL80211_IFTYPE_P2P_GO &&
- wdev->beacon_interval &&
- !(chan->flags & IEEE80211_CHAN_INDOOR_ONLY))
- other_chan = wdev->chandef.chan;
+ ret = cfg80211_ir_permissive_check_wdev(iftype, wdev, chan);
wdev_unlock(wdev);
- if (!other_chan)
- continue;
-
- if (chan == other_chan)
- return true;
-
- if (chan->band != NL80211_BAND_5GHZ &&
- chan->band != NL80211_BAND_6GHZ)
- continue;
-
- r1 = cfg80211_get_unii(chan->center_freq);
- r2 = cfg80211_get_unii(other_chan->center_freq);
-
- if (r1 != -EINVAL && r1 == r2) {
- /*
- * At some locations channels 149-165 are considered a
- * bundle, but at other locations, e.g., Indonesia,
- * channels 149-161 are considered a bundle while
- * channel 165 is left out and considered to be in a
- * different bundle. Thus, in case that there is a
- * station interface connected to an AP on channel 165,
- * it is assumed that channels 149-161 are allowed for
- * GO operations. However, having a station interface
- * connected to an AP on channels 149-161, does not
- * allow GO operation on channel 165.
- */
- if (chan->center_freq == 5825 &&
- other_chan->center_freq != 5825)
- continue;
- return true;
- }
+ if (ret)
+ return ret;
}
return false;
@@ -1374,3 +1429,34 @@ bool cfg80211_any_usable_channels(struct wiphy *wiphy,
return false;
}
EXPORT_SYMBOL(cfg80211_any_usable_channels);
+
+struct cfg80211_chan_def *wdev_chandef(struct wireless_dev *wdev,
+ unsigned int link_id)
+{
+ /*
+ * We need to sort out the locking here - in some cases
+ * where we get here we really just don't care (yet)
+ * about the valid links, but in others we do. But we
+ * get here with various driver cases, so we cannot
+ * easily require the wdev mutex.
+ */
+ if (link_id || wdev->valid_links & BIT(0)) {
+ ASSERT_WDEV_LOCK(wdev);
+ WARN_ON(!(wdev->valid_links & BIT(link_id)));
+ }
+
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_MESH_POINT:
+ return &wdev->u.mesh.chandef;
+ case NL80211_IFTYPE_ADHOC:
+ return &wdev->u.ibss.chandef;
+ case NL80211_IFTYPE_OCB:
+ return &wdev->u.ocb.chandef;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ return &wdev->links[link_id].ap.chandef;
+ default:
+ return NULL;
+ }
+}
+EXPORT_SYMBOL(wdev_chandef);
diff --git a/net/wireless/core.c b/net/wireless/core.c
index f08d4b3bb148..6b5321bb1176 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -913,6 +913,12 @@ int wiphy_register(struct wiphy *wiphy)
return -EINVAL;
#endif
+ if (!wiphy->max_num_akm_suites)
+ wiphy->max_num_akm_suites = NL80211_MAX_NR_AKM_SUITES;
+ else if (wiphy->max_num_akm_suites < NL80211_MAX_NR_AKM_SUITES ||
+ wiphy->max_num_akm_suites > CFG80211_MAX_NUM_AKM_SUITES)
+ return -EINVAL;
+
/* check and set up bitrates */
ieee80211_set_bitrate_flags(wiphy);
@@ -1118,6 +1124,7 @@ static void _cfg80211_unregister_wdev(struct wireless_dev *wdev,
bool unregister_netdev)
{
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
+ unsigned int link_id;
ASSERT_RTNL();
lockdep_assert_held(&rdev->wiphy.mtx);
@@ -1167,11 +1174,22 @@ static void _cfg80211_unregister_wdev(struct wireless_dev *wdev,
*/
cfg80211_process_wdev_events(wdev);
- if (WARN_ON(wdev->current_bss)) {
- cfg80211_unhold_bss(wdev->current_bss);
- cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
- wdev->current_bss = NULL;
+ if (wdev->iftype == NL80211_IFTYPE_STATION ||
+ wdev->iftype == NL80211_IFTYPE_P2P_CLIENT) {
+ for (link_id = 0; link_id < ARRAY_SIZE(wdev->links); link_id++) {
+ struct cfg80211_internal_bss *curbss;
+
+ curbss = wdev->links[link_id].client.current_bss;
+
+ if (WARN_ON(curbss)) {
+ cfg80211_unhold_bss(curbss);
+ cfg80211_put_bss(wdev->wiphy, &curbss->pub);
+ wdev->links[link_id].client.current_bss = NULL;
+ }
+ }
}
+
+ wdev->connected = false;
}
void cfg80211_unregister_wdev(struct wireless_dev *wdev)
@@ -1233,7 +1251,7 @@ void __cfg80211_leave(struct cfg80211_registered_device *rdev,
break;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
- __cfg80211_stop_ap(rdev, dev, true);
+ __cfg80211_stop_ap(rdev, dev, -1, true);
break;
case NL80211_IFTYPE_OCB:
__cfg80211_leave_ocb(rdev, dev);
@@ -1463,9 +1481,9 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
memcpy(&setup, &default_mesh_setup,
sizeof(setup));
/* back compat only needed for mesh_id */
- setup.mesh_id = wdev->ssid;
- setup.mesh_id_len = wdev->mesh_id_up_len;
- if (wdev->mesh_id_up_len)
+ setup.mesh_id = wdev->u.mesh.id;
+ setup.mesh_id_len = wdev->u.mesh.id_up_len;
+ if (wdev->u.mesh.id_up_len)
__cfg80211_join_mesh(rdev, dev,
&setup,
&default_mesh_config);
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 5436ada91b1a..fd723fa5e2d7 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -307,6 +307,7 @@ void cfg80211_bss_expire(struct cfg80211_registered_device *rdev);
void cfg80211_bss_age(struct cfg80211_registered_device *rdev,
unsigned long age_secs);
void cfg80211_update_assoc_bss_entry(struct wireless_dev *wdev,
+ unsigned int link,
struct ieee80211_channel *channel);
/* IBSS */
@@ -353,25 +354,18 @@ int cfg80211_leave_ocb(struct cfg80211_registered_device *rdev,
/* AP */
int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
- struct net_device *dev, bool notify);
+ struct net_device *dev, int link,
+ bool notify);
int cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
- struct net_device *dev, bool notify);
+ struct net_device *dev, int link,
+ bool notify);
/* MLME */
int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
struct net_device *dev,
- struct ieee80211_channel *chan,
- enum nl80211_auth_type auth_type,
- const u8 *bssid,
- const u8 *ssid, int ssid_len,
- const u8 *ie, int ie_len,
- const u8 *key, int key_len, int key_idx,
- const u8 *auth_data, int auth_data_len);
+ struct cfg80211_auth_request *req);
int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
struct net_device *dev,
- struct ieee80211_channel *chan,
- const u8 *bssid,
- const u8 *ssid, int ssid_len,
struct cfg80211_assoc_request *req);
int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
struct net_device *dev, const u8 *bssid,
@@ -507,7 +501,11 @@ bool cfg80211_any_wiphy_oper_chan(struct wiphy *wiphy,
bool cfg80211_beaconing_iface_active(struct wireless_dev *wdev);
bool cfg80211_is_sub_chan(struct cfg80211_chan_def *chandef,
- struct ieee80211_channel *chan);
+ struct ieee80211_channel *chan,
+ bool primary_only);
+bool cfg80211_wdev_on_sub_chan(struct wireless_dev *wdev,
+ struct ieee80211_channel *chan,
+ bool primary_only);
static inline unsigned int elapsed_jiffies_msecs(unsigned long start)
{
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 5d89eec2869a..4935f94d1acc 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -28,7 +28,7 @@ void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC))
return;
- if (!wdev->ssid_len)
+ if (!wdev->u.ibss.ssid_len)
return;
bss = cfg80211_get_bss(wdev->wiphy, channel, bssid, NULL, 0,
@@ -37,13 +37,13 @@ void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
if (WARN_ON(!bss))
return;
- if (wdev->current_bss) {
- cfg80211_unhold_bss(wdev->current_bss);
- cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
+ if (wdev->u.ibss.current_bss) {
+ cfg80211_unhold_bss(wdev->u.ibss.current_bss);
+ cfg80211_put_bss(wdev->wiphy, &wdev->u.ibss.current_bss->pub);
}
cfg80211_hold_bss(bss_from_pub(bss));
- wdev->current_bss = bss_from_pub(bss);
+ wdev->u.ibss.current_bss = bss_from_pub(bss);
if (!(wdev->wiphy->flags & WIPHY_FLAG_HAS_STATIC_WEP))
cfg80211_upload_connect_keys(wdev);
@@ -96,7 +96,7 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
lockdep_assert_held(&rdev->wiphy.mtx);
ASSERT_WDEV_LOCK(wdev);
- if (wdev->ssid_len)
+ if (wdev->u.ibss.ssid_len)
return -EALREADY;
if (!params->basic_rates) {
@@ -131,7 +131,7 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
kfree_sensitive(wdev->connect_keys);
wdev->connect_keys = connkeys;
- wdev->chandef = params->chandef;
+ wdev->u.ibss.chandef = params->chandef;
if (connkeys) {
params->wep_keys = connkeys->params;
params->wep_tx_key = connkeys->def;
@@ -146,8 +146,8 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
return err;
}
- memcpy(wdev->ssid, params->ssid, params->ssid_len);
- wdev->ssid_len = params->ssid_len;
+ memcpy(wdev->u.ibss.ssid, params->ssid, params->ssid_len);
+ wdev->u.ibss.ssid_len = params->ssid_len;
return 0;
}
@@ -173,14 +173,14 @@ static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext)
for (i = 0; i < 6; i++)
rdev_del_key(rdev, dev, i, false, NULL);
- if (wdev->current_bss) {
- cfg80211_unhold_bss(wdev->current_bss);
- cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
+ if (wdev->u.ibss.current_bss) {
+ cfg80211_unhold_bss(wdev->u.ibss.current_bss);
+ cfg80211_put_bss(wdev->wiphy, &wdev->u.ibss.current_bss->pub);
}
- wdev->current_bss = NULL;
- wdev->ssid_len = 0;
- memset(&wdev->chandef, 0, sizeof(wdev->chandef));
+ wdev->u.ibss.current_bss = NULL;
+ wdev->u.ibss.ssid_len = 0;
+ memset(&wdev->u.ibss.chandef, 0, sizeof(wdev->u.ibss.chandef));
#ifdef CONFIG_CFG80211_WEXT
if (!nowext)
wdev->wext.ibss.ssid_len = 0;
@@ -205,7 +205,7 @@ int __cfg80211_leave_ibss(struct cfg80211_registered_device *rdev,
ASSERT_WDEV_LOCK(wdev);
- if (!wdev->ssid_len)
+ if (!wdev->u.ibss.ssid_len)
return -ENOLINK;
err = rdev_leave_ibss(rdev, dev);
@@ -339,7 +339,7 @@ int cfg80211_ibss_wext_siwfreq(struct net_device *dev,
wdev_lock(wdev);
err = 0;
- if (wdev->ssid_len)
+ if (wdev->u.ibss.ssid_len)
err = __cfg80211_leave_ibss(rdev, dev, true);
wdev_unlock(wdev);
@@ -374,8 +374,8 @@ int cfg80211_ibss_wext_giwfreq(struct net_device *dev,
return -EINVAL;
wdev_lock(wdev);
- if (wdev->current_bss)
- chan = wdev->current_bss->pub.channel;
+ if (wdev->u.ibss.current_bss)
+ chan = wdev->u.ibss.current_bss->pub.channel;
else if (wdev->wext.ibss.chandef.chan)
chan = wdev->wext.ibss.chandef.chan;
wdev_unlock(wdev);
@@ -408,7 +408,7 @@ int cfg80211_ibss_wext_siwessid(struct net_device *dev,
wdev_lock(wdev);
err = 0;
- if (wdev->ssid_len)
+ if (wdev->u.ibss.ssid_len)
err = __cfg80211_leave_ibss(rdev, dev, true);
wdev_unlock(wdev);
@@ -419,8 +419,8 @@ int cfg80211_ibss_wext_siwessid(struct net_device *dev,
if (len > 0 && ssid[len - 1] == '\0')
len--;
- memcpy(wdev->ssid, ssid, len);
- wdev->wext.ibss.ssid = wdev->ssid;
+ memcpy(wdev->u.ibss.ssid, ssid, len);
+ wdev->wext.ibss.ssid = wdev->u.ibss.ssid;
wdev->wext.ibss.ssid_len = len;
wdev_lock(wdev);
@@ -443,10 +443,10 @@ int cfg80211_ibss_wext_giwessid(struct net_device *dev,
data->flags = 0;
wdev_lock(wdev);
- if (wdev->ssid_len) {
+ if (wdev->u.ibss.ssid_len) {
data->flags = 1;
- data->length = wdev->ssid_len;
- memcpy(ssid, wdev->ssid, data->length);
+ data->length = wdev->u.ibss.ssid_len;
+ memcpy(ssid, wdev->u.ibss.ssid, data->length);
} else if (wdev->wext.ibss.ssid && wdev->wext.ibss.ssid_len) {
data->flags = 1;
data->length = wdev->wext.ibss.ssid_len;
@@ -494,7 +494,7 @@ int cfg80211_ibss_wext_siwap(struct net_device *dev,
wdev_lock(wdev);
err = 0;
- if (wdev->ssid_len)
+ if (wdev->u.ibss.ssid_len)
err = __cfg80211_leave_ibss(rdev, dev, true);
wdev_unlock(wdev);
@@ -527,8 +527,9 @@ int cfg80211_ibss_wext_giwap(struct net_device *dev,
ap_addr->sa_family = ARPHRD_ETHER;
wdev_lock(wdev);
- if (wdev->current_bss)
- memcpy(ap_addr->sa_data, wdev->current_bss->pub.bssid, ETH_ALEN);
+ if (wdev->u.ibss.current_bss)
+ memcpy(ap_addr->sa_data, wdev->u.ibss.current_bss->pub.bssid,
+ ETH_ALEN);
else if (wdev->wext.ibss.bssid)
memcpy(ap_addr->sa_data, wdev->wext.ibss.bssid, ETH_ALEN);
else
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index e4e363138279..59a3c5c092b1 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -1,4 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
+/*
+ * Portions
+ * Copyright (C) 2022 Intel Corporation
+ */
#include <linux/ieee80211.h>
#include <linux/export.h>
#include <net/cfg80211.h>
@@ -114,7 +118,7 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
setup->is_secure)
return -EOPNOTSUPP;
- if (wdev->mesh_id_len)
+ if (wdev->u.mesh.id_len)
return -EALREADY;
if (!setup->mesh_id_len)
@@ -125,7 +129,7 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
if (!setup->chandef.chan) {
/* if no channel explicitly given, use preset channel */
- setup->chandef = wdev->preset_chandef;
+ setup->chandef = wdev->u.mesh.preset_chandef;
}
if (!setup->chandef.chan) {
@@ -209,10 +213,10 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
err = rdev_join_mesh(rdev, dev, conf, setup);
if (!err) {
- memcpy(wdev->ssid, setup->mesh_id, setup->mesh_id_len);
- wdev->mesh_id_len = setup->mesh_id_len;
- wdev->chandef = setup->chandef;
- wdev->beacon_interval = setup->beacon_interval;
+ memcpy(wdev->u.mesh.id, setup->mesh_id, setup->mesh_id_len);
+ wdev->u.mesh.id_len = setup->mesh_id_len;
+ wdev->u.mesh.chandef = setup->chandef;
+ wdev->u.mesh.beacon_interval = setup->beacon_interval;
}
return err;
@@ -241,15 +245,15 @@ int cfg80211_set_mesh_channel(struct cfg80211_registered_device *rdev,
err = rdev_libertas_set_mesh_channel(rdev, wdev->netdev,
chandef->chan);
if (!err)
- wdev->chandef = *chandef;
+ wdev->u.mesh.chandef = *chandef;
return err;
}
- if (wdev->mesh_id_len)
+ if (wdev->u.mesh.id_len)
return -EBUSY;
- wdev->preset_chandef = *chandef;
+ wdev->u.mesh.preset_chandef = *chandef;
return 0;
}
@@ -267,15 +271,16 @@ int __cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
if (!rdev->ops->leave_mesh)
return -EOPNOTSUPP;
- if (!wdev->mesh_id_len)
+ if (!wdev->u.mesh.id_len)
return -ENOTCONN;
err = rdev_leave_mesh(rdev, dev);
if (!err) {
wdev->conn_owner_nlportid = 0;
- wdev->mesh_id_len = 0;
- wdev->beacon_interval = 0;
- memset(&wdev->chandef, 0, sizeof(wdev->chandef));
+ wdev->u.mesh.id_len = 0;
+ wdev->u.mesh.beacon_interval = 0;
+ memset(&wdev->u.mesh.chandef, 0,
+ sizeof(wdev->u.mesh.chandef));
rdev_set_qos_map(rdev, dev, NULL);
cfg80211_sched_dfs_chan_update(rdev);
}
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index c8155a483ec2..14584488de67 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -42,8 +42,8 @@ void cfg80211_rx_assoc_resp(struct net_device *dev, struct cfg80211_bss *bss,
memset(&cr, 0, sizeof(cr));
cr.status = (int)le16_to_cpu(mgmt->u.assoc_resp.status_code);
- cr.bssid = mgmt->bssid;
- cr.bss = bss;
+ cr.links[0].bssid = mgmt->bssid;
+ cr.links[0].bss = bss;
cr.req_ie = req_ies;
cr.req_ie_len = req_ies_len;
cr.resp_ie = resp_ie;
@@ -92,8 +92,7 @@ static void cfg80211_process_deauth(struct wireless_dev *wdev,
nl80211_send_deauth(rdev, wdev->netdev, buf, len, reconnect, GFP_KERNEL);
- if (!wdev->current_bss ||
- !ether_addr_equal(wdev->current_bss->pub.bssid, bssid))
+ if (!wdev->connected || !ether_addr_equal(wdev->u.client.connected_addr, bssid))
return;
__cfg80211_disconnected(wdev->netdev, NULL, 0, reason_code, from_ap);
@@ -113,8 +112,8 @@ static void cfg80211_process_disassoc(struct wireless_dev *wdev,
nl80211_send_disassoc(rdev, wdev->netdev, buf, len, reconnect,
GFP_KERNEL);
- if (WARN_ON(!wdev->current_bss ||
- !ether_addr_equal(wdev->current_bss->pub.bssid, bssid)))
+ if (WARN_ON(!wdev->connected ||
+ !ether_addr_equal(wdev->u.client.connected_addr, bssid)))
return;
__cfg80211_disconnected(wdev->netdev, NULL, 0, reason_code, from_ap);
@@ -233,47 +232,30 @@ EXPORT_SYMBOL(cfg80211_michael_mic_failure);
/* some MLME handling for userspace SME */
int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
struct net_device *dev,
- struct ieee80211_channel *chan,
- enum nl80211_auth_type auth_type,
- const u8 *bssid,
- const u8 *ssid, int ssid_len,
- const u8 *ie, int ie_len,
- const u8 *key, int key_len, int key_idx,
- const u8 *auth_data, int auth_data_len)
+ struct cfg80211_auth_request *req)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
- struct cfg80211_auth_request req = {
- .ie = ie,
- .ie_len = ie_len,
- .auth_data = auth_data,
- .auth_data_len = auth_data_len,
- .auth_type = auth_type,
- .key = key,
- .key_len = key_len,
- .key_idx = key_idx,
- };
- int err;
ASSERT_WDEV_LOCK(wdev);
- if (auth_type == NL80211_AUTHTYPE_SHARED_KEY)
- if (!key || !key_len || key_idx < 0 || key_idx > 3)
- return -EINVAL;
+ if (!req->bss)
+ return -ENOENT;
- if (wdev->current_bss &&
- ether_addr_equal(bssid, wdev->current_bss->pub.bssid))
- return -EALREADY;
+ if (req->link_id >= 0 &&
+ !(wdev->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO))
+ return -EINVAL;
- req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len,
- IEEE80211_BSS_TYPE_ESS,
- IEEE80211_PRIVACY_ANY);
- if (!req.bss)
- return -ENOENT;
+ if (req->auth_type == NL80211_AUTHTYPE_SHARED_KEY) {
+ if (!req->key || !req->key_len ||
+ req->key_idx < 0 || req->key_idx > 3)
+ return -EINVAL;
+ }
- err = rdev_auth(rdev, dev, &req);
+ if (wdev->connected &&
+ ether_addr_equal(req->bss->bssid, wdev->u.client.connected_addr))
+ return -EALREADY;
- cfg80211_put_bss(&rdev->wiphy, req.bss);
- return err;
+ return rdev_auth(rdev, dev, req);
}
/* Do a logical ht_capa &= ht_capa_mask. */
@@ -310,21 +292,28 @@ void cfg80211_oper_and_vht_capa(struct ieee80211_vht_cap *vht_capa,
p1[i] &= p2[i];
}
+/* Note: caller must cfg80211_put_bss() regardless of result */
int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
struct net_device *dev,
- struct ieee80211_channel *chan,
- const u8 *bssid,
- const u8 *ssid, int ssid_len,
struct cfg80211_assoc_request *req)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
- int err;
+ int err, i, j;
ASSERT_WDEV_LOCK(wdev);
- if (wdev->current_bss &&
- (!req->prev_bssid || !ether_addr_equal(wdev->current_bss->pub.bssid,
- req->prev_bssid)))
+ for (i = 1; i < ARRAY_SIZE(req->links); i++) {
+ if (!req->links[i].bss)
+ continue;
+ for (j = 0; j < i; j++) {
+ if (req->links[i].bss == req->links[j].bss)
+ return -EINVAL;
+ }
+ }
+
+ if (wdev->connected &&
+ (!req->prev_bssid ||
+ !ether_addr_equal(wdev->u.client.connected_addr, req->prev_bssid)))
return -EALREADY;
cfg80211_oper_and_ht_capa(&req->ht_capa_mask,
@@ -332,18 +321,22 @@ int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
cfg80211_oper_and_vht_capa(&req->vht_capa_mask,
rdev->wiphy.vht_capa_mod_mask);
- req->bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len,
- IEEE80211_BSS_TYPE_ESS,
- IEEE80211_PRIVACY_ANY);
- if (!req->bss)
- return -ENOENT;
-
err = rdev_assoc(rdev, dev, req);
- if (!err)
- cfg80211_hold_bss(bss_from_pub(req->bss));
- else
- cfg80211_put_bss(&rdev->wiphy, req->bss);
+ if (!err) {
+ int link_id;
+
+ if (req->bss) {
+ cfg80211_ref_bss(&rdev->wiphy, req->bss);
+ cfg80211_hold_bss(bss_from_pub(req->bss));
+ }
+ for (link_id = 0; link_id < ARRAY_SIZE(req->links); link_id++) {
+ if (!req->links[link_id].bss)
+ continue;
+ cfg80211_ref_bss(&rdev->wiphy, req->links[link_id].bss);
+ cfg80211_hold_bss(bss_from_pub(req->links[link_id].bss));
+ }
+ }
return err;
}
@@ -364,13 +357,13 @@ int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
ASSERT_WDEV_LOCK(wdev);
if (local_state_change &&
- (!wdev->current_bss ||
- !ether_addr_equal(wdev->current_bss->pub.bssid, bssid)))
+ (!wdev->connected ||
+ !ether_addr_equal(wdev->u.client.connected_addr, bssid)))
return 0;
if (ether_addr_equal(wdev->disconnect_bssid, bssid) ||
- (wdev->current_bss &&
- ether_addr_equal(wdev->current_bss->pub.bssid, bssid)))
+ (wdev->connected &&
+ ether_addr_equal(wdev->u.client.connected_addr, bssid)))
wdev->conn_owner_nlportid = 0;
return rdev_deauth(rdev, dev, &req);
@@ -392,11 +385,12 @@ int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
ASSERT_WDEV_LOCK(wdev);
- if (!wdev->current_bss)
+ if (!wdev->connected)
return -ENOTCONN;
- if (ether_addr_equal(wdev->current_bss->pub.bssid, bssid))
- req.bss = &wdev->current_bss->pub;
+ if (ether_addr_equal(wdev->links[0].client.current_bss->pub.bssid,
+ bssid))
+ req.bss = &wdev->links[0].client.current_bss->pub;
else
return -ENOTCONN;
@@ -405,7 +399,7 @@ int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
return err;
/* driver should have reported the disassoc */
- WARN_ON(wdev->current_bss);
+ WARN_ON(wdev->connected);
return 0;
}
@@ -420,10 +414,10 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev,
if (!rdev->ops->deauth)
return;
- if (!wdev->current_bss)
+ if (!wdev->connected)
return;
- memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN);
+ memcpy(bssid, wdev->u.client.connected_addr, ETH_ALEN);
cfg80211_mlme_deauth(rdev, dev, bssid, NULL, 0,
WLAN_REASON_DEAUTH_LEAVING, false);
}
@@ -676,28 +670,34 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
switch (wdev->iftype) {
case NL80211_IFTYPE_ADHOC:
+ /*
+ * check for IBSS DA must be done by driver as
+ * cfg80211 doesn't track the stations
+ */
+ if (!wdev->u.ibss.current_bss ||
+ !ether_addr_equal(wdev->u.ibss.current_bss->pub.bssid,
+ mgmt->bssid)) {
+ err = -ENOTCONN;
+ break;
+ }
+ break;
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
- if (!wdev->current_bss) {
+ if (!wdev->connected) {
err = -ENOTCONN;
break;
}
- if (!ether_addr_equal(wdev->current_bss->pub.bssid,
+ /* FIXME: MLD may address this differently */
+
+ if (!ether_addr_equal(wdev->u.client.connected_addr,
mgmt->bssid)) {
err = -ENOTCONN;
break;
}
- /*
- * check for IBSS DA must be done by driver as
- * cfg80211 doesn't track the stations
- */
- if (wdev->iftype == NL80211_IFTYPE_ADHOC)
- break;
-
/* for station, check that DA is the AP */
- if (!ether_addr_equal(wdev->current_bss->pub.bssid,
+ if (!ether_addr_equal(wdev->u.client.connected_addr,
mgmt->da)) {
err = -ENOTCONN;
break;
@@ -743,19 +743,19 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
if (!ieee80211_is_action(mgmt->frame_control) ||
mgmt->u.action.category != WLAN_CATEGORY_PUBLIC)
return -EINVAL;
- if (!wdev->current_bss &&
+ if (!wdev->connected &&
!wiphy_ext_feature_isset(
&rdev->wiphy,
NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA))
return -EINVAL;
- if (wdev->current_bss &&
+ if (wdev->connected &&
!wiphy_ext_feature_isset(
&rdev->wiphy,
NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED))
return -EINVAL;
}
- /* Transmit the Action frame as requested by user space */
+ /* Transmit the management frame as requested by user space */
return rdev_mgmt_tx(rdev, wdev, params, cookie);
}
@@ -940,14 +940,15 @@ void cfg80211_cac_event(struct net_device *netdev,
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
unsigned long timeout;
+ /* not yet supported */
+ if (wdev->valid_links)
+ return;
+
trace_cfg80211_cac_event(netdev, event);
if (WARN_ON(!wdev->cac_started && event != NL80211_RADAR_CAC_STARTED))
return;
- if (WARN_ON(!wdev->chandef.chan))
- return;
-
switch (event) {
case NL80211_RADAR_CAC_FINISHED:
timeout = wdev->cac_start_time +
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 740b29481bc6..22c4cf6fbb57 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -792,6 +792,13 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
NL80211_EHT_MIN_CAPABILITY_LEN,
NL80211_EHT_MAX_CAPABILITY_LEN),
[NL80211_ATTR_DISABLE_EHT] = { .type = NLA_FLAG },
+ [NL80211_ATTR_MLO_LINKS] =
+ NLA_POLICY_NESTED_ARRAY(nl80211_policy),
+ [NL80211_ATTR_MLO_LINK_ID] =
+ NLA_POLICY_RANGE(NLA_U8, 0, IEEE80211_MLD_MAX_NUM_LINKS),
+ [NL80211_ATTR_MLD_ADDR] = NLA_POLICY_EXACT_LEN(ETH_ALEN),
+ [NL80211_ATTR_MLO_SUPPORT] = { .type = NLA_FLAG },
+ [NL80211_ATTR_MAX_NUM_AKM_SUITES] = { .type = NLA_REJECT },
};
/* policy for the key attributes */
@@ -1225,6 +1232,37 @@ static bool nl80211_put_txq_stats(struct sk_buff *msg,
/* netlink command implementations */
+/**
+ * nl80211_link_id - return link ID
+ * @attrs: attributes to look at
+ *
+ * Returns: the link ID or 0 if not given
+ *
+ * Note this function doesn't do any validation of the link
+ * ID validity wrt. links that were actually added, so it must
+ * be called only from ops with %NL80211_FLAG_MLO_VALID_LINK_ID
+ * or if additional validation is done.
+ */
+static unsigned int nl80211_link_id(struct nlattr **attrs)
+{
+ struct nlattr *linkid = attrs[NL80211_ATTR_MLO_LINK_ID];
+
+ if (!linkid)
+ return 0;
+
+ return nla_get_u8(linkid);
+}
+
+static int nl80211_link_id_or_invalid(struct nlattr **attrs)
+{
+ struct nlattr *linkid = attrs[NL80211_ATTR_MLO_LINK_ID];
+
+ if (!linkid)
+ return -1;
+
+ return nla_get_u8(linkid);
+}
+
struct key_parse {
struct key_params p;
int idx;
@@ -1496,11 +1534,15 @@ static int nl80211_key_allowed(struct wireless_dev *wdev)
case NL80211_IFTYPE_MESH_POINT:
break;
case NL80211_IFTYPE_ADHOC:
+ if (wdev->u.ibss.current_bss)
+ return 0;
+ return -ENOLINK;
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
- if (!wdev->current_bss)
- return -ENOLINK;
- break;
+ /* for MLO, require driver validation of the link ID */
+ if (wdev->connected)
+ return 0;
+ return -ENOLINK;
case NL80211_IFTYPE_UNSPECIFIED:
case NL80211_IFTYPE_OCB:
case NL80211_IFTYPE_MONITOR:
@@ -2891,6 +2933,10 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
if (nl80211_put_mbssid_support(&rdev->wiphy, msg))
goto nla_put_failure;
+ if (nla_put_u16(msg, NL80211_ATTR_MAX_NUM_AKM_SUITES,
+ rdev->wiphy.max_num_akm_suites))
+ goto nla_put_failure;
+
/* done */
state->split_start = 0;
break;
@@ -3232,12 +3278,14 @@ int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
struct net_device *dev,
- struct genl_info *info)
+ struct genl_info *info,
+ int _link_id)
{
struct cfg80211_chan_def chandef;
int result;
enum nl80211_iftype iftype = NL80211_IFTYPE_MONITOR;
struct wireless_dev *wdev = NULL;
+ int link_id = _link_id;
if (dev)
wdev = dev->ieee80211_ptr;
@@ -3246,6 +3294,12 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
if (wdev)
iftype = wdev->iftype;
+ if (link_id < 0) {
+ if (wdev && wdev->valid_links)
+ return -EINVAL;
+ link_id = 0;
+ }
+
result = nl80211_parse_chandef(rdev, info, &chandef);
if (result)
return result;
@@ -3254,49 +3308,48 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &chandef,
- iftype)) {
- result = -EINVAL;
- break;
- }
- if (wdev->beacon_interval) {
+ iftype))
+ return -EINVAL;
+ if (wdev->links[link_id].ap.beacon_interval) {
+ struct ieee80211_channel *cur_chan;
+
if (!dev || !rdev->ops->set_ap_chanwidth ||
!(rdev->wiphy.features &
- NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE)) {
- result = -EBUSY;
- break;
- }
+ NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE))
+ return -EBUSY;
/* Only allow dynamic channel width changes */
- if (chandef.chan != wdev->preset_chandef.chan) {
- result = -EBUSY;
- break;
- }
- result = rdev_set_ap_chanwidth(rdev, dev, &chandef);
+ cur_chan = wdev->links[link_id].ap.chandef.chan;
+ if (chandef.chan != cur_chan)
+ return -EBUSY;
+
+ result = rdev_set_ap_chanwidth(rdev, dev, link_id,
+ &chandef);
if (result)
- break;
+ return result;
+ wdev->links[link_id].ap.chandef = chandef;
+ } else {
+ wdev->u.ap.preset_chandef = chandef;
}
- wdev->preset_chandef = chandef;
- result = 0;
- break;
+ return 0;
case NL80211_IFTYPE_MESH_POINT:
- result = cfg80211_set_mesh_channel(rdev, wdev, &chandef);
- break;
+ return cfg80211_set_mesh_channel(rdev, wdev, &chandef);
case NL80211_IFTYPE_MONITOR:
- result = cfg80211_set_monitor_channel(rdev, &chandef);
- break;
+ return cfg80211_set_monitor_channel(rdev, &chandef);
default:
- result = -EINVAL;
+ break;
}
- return result;
+ return -EINVAL;
}
static int nl80211_set_channel(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ int link_id = nl80211_link_id_or_invalid(info->attrs);
struct net_device *netdev = info->user_ptr[1];
- return __nl80211_set_channel(rdev, netdev, info);
+ return __nl80211_set_channel(rdev, netdev, info, link_id);
}
static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
@@ -3411,7 +3464,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
result = __nl80211_set_channel(
rdev,
nl80211_can_set_dev_channel(wdev) ? netdev : NULL,
- info);
+ info, -1);
if (result)
goto out;
}
@@ -3696,15 +3749,13 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
nla_put_u8(msg, NL80211_ATTR_4ADDR, wdev->use_4addr))
goto nla_put_failure;
- if (rdev->ops->get_channel) {
- int ret;
+ if (rdev->ops->get_channel && !wdev->valid_links) {
struct cfg80211_chan_def chandef = {};
+ int ret;
- ret = rdev_get_channel(rdev, wdev, &chandef);
- if (ret == 0) {
- if (nl80211_send_chandef(msg, &chandef))
- goto nla_put_failure;
- }
+ ret = rdev_get_channel(rdev, wdev, 0, &chandef);
+ if (ret == 0 && nl80211_send_chandef(msg, &chandef))
+ goto nla_put_failure;
}
if (rdev->ops->get_tx_power) {
@@ -3721,27 +3772,24 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
switch (wdev->iftype) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
- if (wdev->ssid_len &&
- nla_put(msg, NL80211_ATTR_SSID, wdev->ssid_len, wdev->ssid))
+ if (wdev->u.ap.ssid_len &&
+ nla_put(msg, NL80211_ATTR_SSID, wdev->u.ap.ssid_len,
+ wdev->u.ap.ssid))
goto nla_put_failure_locked;
break;
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
- case NL80211_IFTYPE_ADHOC: {
- const struct element *ssid_elem;
-
- if (!wdev->current_bss)
- break;
- rcu_read_lock();
- ssid_elem = ieee80211_bss_get_elem(&wdev->current_bss->pub,
- WLAN_EID_SSID);
- if (ssid_elem &&
- nla_put(msg, NL80211_ATTR_SSID, ssid_elem->datalen,
- ssid_elem->data))
- goto nla_put_failure_rcu_locked;
- rcu_read_unlock();
+ if (wdev->u.client.ssid_len &&
+ nla_put(msg, NL80211_ATTR_SSID, wdev->u.client.ssid_len,
+ wdev->u.client.ssid))
+ goto nla_put_failure_locked;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ if (wdev->u.ibss.ssid_len &&
+ nla_put(msg, NL80211_ATTR_SSID, wdev->u.ibss.ssid_len,
+ wdev->u.ibss.ssid))
+ goto nla_put_failure_locked;
break;
- }
default:
/* nothing */
break;
@@ -3758,11 +3806,31 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
goto nla_put_failure;
}
+ if (wdev->valid_links) {
+ unsigned int link_id;
+ struct nlattr *links = nla_nest_start(msg,
+ NL80211_ATTR_MLO_LINKS);
+
+ if (!links)
+ goto nla_put_failure;
+
+ for_each_valid_link(wdev, link_id) {
+ struct nlattr *link = nla_nest_start(msg, link_id + 1);
+
+ if (nla_put_u8(msg, NL80211_ATTR_MLO_LINK_ID, link_id))
+ goto nla_put_failure;
+ if (nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN,
+ wdev->links[link_id].addr))
+ goto nla_put_failure;
+ nla_nest_end(msg, link);
+ }
+
+ nla_nest_end(msg, links);
+ }
+
genlmsg_end(msg, hdr);
return 0;
- nla_put_failure_rcu_locked:
- rcu_read_unlock();
nla_put_failure_locked:
wdev_unlock(wdev);
nla_put_failure:
@@ -4014,10 +4082,11 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
wdev_lock(wdev);
BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN !=
IEEE80211_MAX_MESH_ID_LEN);
- wdev->mesh_id_up_len =
+ wdev->u.mesh.id_up_len =
nla_len(info->attrs[NL80211_ATTR_MESH_ID]);
- memcpy(wdev->ssid, nla_data(info->attrs[NL80211_ATTR_MESH_ID]),
- wdev->mesh_id_up_len);
+ memcpy(wdev->u.mesh.id,
+ nla_data(info->attrs[NL80211_ATTR_MESH_ID]),
+ wdev->u.mesh.id_up_len);
wdev_unlock(wdev);
}
@@ -4122,10 +4191,11 @@ static int _nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
wdev_lock(wdev);
BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN !=
IEEE80211_MAX_MESH_ID_LEN);
- wdev->mesh_id_up_len =
+ wdev->u.mesh.id_up_len =
nla_len(info->attrs[NL80211_ATTR_MESH_ID]);
- memcpy(wdev->ssid, nla_data(info->attrs[NL80211_ATTR_MESH_ID]),
- wdev->mesh_id_up_len);
+ memcpy(wdev->u.mesh.id,
+ nla_data(info->attrs[NL80211_ATTR_MESH_ID]),
+ wdev->u.mesh.id_up_len);
wdev_unlock(wdev);
break;
case NL80211_IFTYPE_NAN:
@@ -4662,7 +4732,7 @@ static int nl80211_set_mac_acl(struct sk_buff *skb, struct genl_info *info)
dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
return -EOPNOTSUPP;
- if (!dev->ieee80211_ptr->beacon_interval)
+ if (!dev->ieee80211_ptr->links[0].ap.beacon_interval)
return -EINVAL;
acl = parse_acl_data(&rdev->wiphy, info);
@@ -4818,14 +4888,24 @@ static void he_build_mcs_mask(u16 he_mcs_map,
}
}
-static u16 he_get_txmcsmap(struct genl_info *info,
+static u16 he_get_txmcsmap(struct genl_info *info, unsigned int link_id,
const struct ieee80211_sta_he_cap *he_cap)
{
struct net_device *dev = info->user_ptr[1];
struct wireless_dev *wdev = dev->ieee80211_ptr;
- __le16 tx_mcs;
+ struct cfg80211_chan_def *chandef;
+ __le16 tx_mcs;
+
+ chandef = wdev_chandef(wdev, link_id);
+ if (!chandef) {
+ /*
+ * This is probably broken, but we never maintained
+ * a chandef in these cases, so it always was.
+ */
+ return le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80);
+ }
- switch (wdev->chandef.width) {
+ switch (chandef->width) {
case NL80211_CHAN_WIDTH_80P80:
tx_mcs = he_cap->he_mcs_nss_supp.tx_mcs_80p80;
break;
@@ -4836,6 +4916,7 @@ static u16 he_get_txmcsmap(struct genl_info *info,
tx_mcs = he_cap->he_mcs_nss_supp.tx_mcs_80;
break;
}
+
return le16_to_cpu(tx_mcs);
}
@@ -4843,7 +4924,8 @@ static bool he_set_mcs_mask(struct genl_info *info,
struct wireless_dev *wdev,
struct ieee80211_supported_band *sband,
struct nl80211_txrate_he *txrate,
- u16 mcs[NL80211_HE_NSS_MAX])
+ u16 mcs[NL80211_HE_NSS_MAX],
+ unsigned int link_id)
{
const struct ieee80211_sta_he_cap *he_cap;
u16 tx_mcs_mask[NL80211_HE_NSS_MAX] = {};
@@ -4856,7 +4938,7 @@ static bool he_set_mcs_mask(struct genl_info *info,
memset(mcs, 0, sizeof(u16) * NL80211_HE_NSS_MAX);
- tx_mcs_map = he_get_txmcsmap(info, he_cap);
+ tx_mcs_map = he_get_txmcsmap(info, link_id, he_cap);
/* Build he_mcs_mask from HE capabilities */
he_build_mcs_mask(tx_mcs_map, tx_mcs_mask);
@@ -4876,7 +4958,8 @@ static int nl80211_parse_tx_bitrate_mask(struct genl_info *info,
enum nl80211_attrs attr,
struct cfg80211_bitrate_mask *mask,
struct net_device *dev,
- bool default_all_enabled)
+ bool default_all_enabled,
+ unsigned int link_id)
{
struct nlattr *tb[NL80211_TXRATE_MAX + 1];
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -4913,7 +4996,7 @@ static int nl80211_parse_tx_bitrate_mask(struct genl_info *info,
if (!he_cap)
continue;
- he_tx_mcs_map = he_get_txmcsmap(info, he_cap);
+ he_tx_mcs_map = he_get_txmcsmap(info, link_id, he_cap);
he_build_mcs_mask(he_tx_mcs_map, mask->control[i].he_mcs);
mask->control[i].he_gi = 0xFF;
@@ -4978,7 +5061,8 @@ static int nl80211_parse_tx_bitrate_mask(struct genl_info *info,
if (tb[NL80211_TXRATE_HE] &&
!he_set_mcs_mask(info, wdev, sband,
nla_data(tb[NL80211_TXRATE_HE]),
- mask->control[band].he_mcs))
+ mask->control[band].he_mcs,
+ link_id))
return -EINVAL;
if (tb[NL80211_TXRATE_HE_GI])
@@ -5215,6 +5299,8 @@ static int nl80211_parse_beacon(struct cfg80211_registered_device *rdev,
memset(bcn, 0, sizeof(*bcn));
+ bcn->link_id = nl80211_link_id(attrs);
+
if (attrs[NL80211_ATTR_BEACON_HEAD]) {
bcn->head = nla_data(attrs[NL80211_ATTR_BEACON_HEAD]);
bcn->head_len = nla_len(attrs[NL80211_ATTR_BEACON_HEAD]);
@@ -5436,7 +5522,7 @@ static void nl80211_check_ap_rate_selectors(struct cfg80211_ap_settings *params,
* HT/VHT requirements/capabilities, we parse them out of the IEs for the
* benefit of drivers that rebuild IEs in the firmware.
*/
-static void nl80211_calculate_ap_params(struct cfg80211_ap_settings *params)
+static int nl80211_calculate_ap_params(struct cfg80211_ap_settings *params)
{
const struct cfg80211_beacon_data *bcn = &params->beacon;
size_t ies_len = bcn->tail_len;
@@ -5462,28 +5548,46 @@ static void nl80211_calculate_ap_params(struct cfg80211_ap_settings *params)
cap = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, ies, ies_len);
if (cap && cap->datalen >= sizeof(*params->he_oper) + 1)
params->he_oper = (void *)(cap->data + 1);
+ cap = cfg80211_find_ext_elem(WLAN_EID_EXT_EHT_CAPABILITY, ies, ies_len);
+ if (cap) {
+ if (!cap->datalen)
+ return -EINVAL;
+ params->eht_cap = (void *)(cap->data + 1);
+ if (!ieee80211_eht_capa_size_ok((const u8 *)params->he_cap,
+ (const u8 *)params->eht_cap,
+ cap->datalen - 1))
+ return -EINVAL;
+ }
+ cap = cfg80211_find_ext_elem(WLAN_EID_EXT_EHT_OPERATION, ies, ies_len);
+ if (cap) {
+ if (!cap->datalen)
+ return -EINVAL;
+ params->eht_oper = (void *)(cap->data + 1);
+ if (!ieee80211_eht_oper_size_ok((const u8 *)params->eht_oper,
+ cap->datalen - 1))
+ return -EINVAL;
+ }
+ return 0;
}
static bool nl80211_get_ap_channel(struct cfg80211_registered_device *rdev,
struct cfg80211_ap_settings *params)
{
struct wireless_dev *wdev;
- bool ret = false;
list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
if (wdev->iftype != NL80211_IFTYPE_AP &&
wdev->iftype != NL80211_IFTYPE_P2P_GO)
continue;
- if (!wdev->preset_chandef.chan)
+ if (!wdev->u.ap.preset_chandef.chan)
continue;
- params->chandef = wdev->preset_chandef;
- ret = true;
- break;
+ params->chandef = wdev->u.ap.preset_chandef;
+ return true;
}
- return ret;
+ return false;
}
static bool nl80211_valid_auth_type(struct cfg80211_registered_device *rdev,
@@ -5541,6 +5645,7 @@ static bool nl80211_valid_auth_type(struct cfg80211_registered_device *rdev,
static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ unsigned int link_id = nl80211_link_id(info->attrs);
struct net_device *dev = info->user_ptr[1];
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_ap_settings *params;
@@ -5553,7 +5658,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
if (!rdev->ops->start_ap)
return -EOPNOTSUPP;
- if (wdev->beacon_interval)
+ if (wdev->links[link_id].ap.beacon_interval)
return -EALREADY;
/* these are required for START_AP */
@@ -5595,6 +5700,18 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
err = -EINVAL;
goto out;
}
+
+ if (wdev->u.ap.ssid_len &&
+ (wdev->u.ap.ssid_len != params->ssid_len ||
+ memcmp(wdev->u.ap.ssid, params->ssid, params->ssid_len))) {
+ /* require identical SSID for MLO */
+ err = -EINVAL;
+ goto out;
+ }
+ } else if (wdev->valid_links) {
+ /* require SSID for MLO */
+ err = -EINVAL;
+ goto out;
}
if (info->attrs[NL80211_ATTR_HIDDEN_SSID])
@@ -5662,8 +5779,12 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
err = nl80211_parse_chandef(rdev, info, &params->chandef);
if (err)
goto out;
- } else if (wdev->preset_chandef.chan) {
- params->chandef = wdev->preset_chandef;
+ } else if (wdev->valid_links) {
+ /* with MLD need to specify the channel configuration */
+ err = -EINVAL;
+ goto out;
+ } else if (wdev->u.ap.preset_chandef.chan) {
+ params->chandef = wdev->u.ap.preset_chandef;
} else if (!nl80211_get_ap_channel(rdev, params)) {
err = -EINVAL;
goto out;
@@ -5675,18 +5796,20 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
goto out;
}
+ wdev_lock(wdev);
+
if (info->attrs[NL80211_ATTR_TX_RATES]) {
err = nl80211_parse_tx_bitrate_mask(info, info->attrs,
NL80211_ATTR_TX_RATES,
&params->beacon_rate,
- dev, false);
+ dev, false, link_id);
if (err)
- goto out;
+ goto out_unlock;
err = validate_beacon_tx_rate(rdev, params->chandef.chan->band,
&params->beacon_rate);
if (err)
- goto out;
+ goto out_unlock;
}
if (info->attrs[NL80211_ATTR_SMPS_MODE]) {
@@ -5699,19 +5822,19 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
if (!(rdev->wiphy.features &
NL80211_FEATURE_STATIC_SMPS)) {
err = -EINVAL;
- goto out;
+ goto out_unlock;
}
break;
case NL80211_SMPS_DYNAMIC:
if (!(rdev->wiphy.features &
NL80211_FEATURE_DYNAMIC_SMPS)) {
err = -EINVAL;
- goto out;
+ goto out_unlock;
}
break;
default:
err = -EINVAL;
- goto out;
+ goto out_unlock;
}
} else {
params->smps_mode = NL80211_SMPS_OFF;
@@ -5720,7 +5843,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
params->pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]);
if (params->pbss && !rdev->wiphy.bands[NL80211_BAND_60GHZ]) {
err = -EOPNOTSUPP;
- goto out;
+ goto out_unlock;
}
if (info->attrs[NL80211_ATTR_ACL_POLICY]) {
@@ -5728,7 +5851,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
if (IS_ERR(params->acl)) {
err = PTR_ERR(params->acl);
params->acl = NULL;
- goto out;
+ goto out_unlock;
}
}
@@ -5740,7 +5863,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
info->attrs[NL80211_ATTR_HE_OBSS_PD],
&params->he_obss_pd);
if (err)
- goto out;
+ goto out_unlock;
}
if (info->attrs[NL80211_ATTR_FILS_DISCOVERY]) {
@@ -5748,7 +5871,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
info->attrs[NL80211_ATTR_FILS_DISCOVERY],
params);
if (err)
- goto out;
+ goto out_unlock;
}
if (info->attrs[NL80211_ATTR_UNSOL_BCAST_PROBE_RESP]) {
@@ -5756,7 +5879,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
rdev, info->attrs[NL80211_ATTR_UNSOL_BCAST_PROBE_RESP],
params);
if (err)
- goto out;
+ goto out_unlock;
}
if (info->attrs[NL80211_ATTR_MBSSID_CONFIG]) {
@@ -5767,10 +5890,12 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
params->beacon.mbssid_ies->cnt :
0);
if (err)
- goto out;
+ goto out_unlock;
}
- nl80211_calculate_ap_params(params);
+ err = nl80211_calculate_ap_params(params);
+ if (err)
+ goto out_unlock;
if (info->attrs[NL80211_ATTR_AP_SETTINGS_FLAGS])
params->flags = nla_get_u32(
@@ -5778,20 +5903,28 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
else if (info->attrs[NL80211_ATTR_EXTERNAL_AUTH_SUPPORT])
params->flags |= NL80211_AP_SETTINGS_EXTERNAL_AUTH_SUPPORT;
- wdev_lock(wdev);
+ if (wdev->conn_owner_nlportid &&
+ info->attrs[NL80211_ATTR_SOCKET_OWNER] &&
+ wdev->conn_owner_nlportid != info->snd_portid) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ /* FIXME: validate MLO/link-id against driver capabilities */
+
err = rdev_start_ap(rdev, dev, params);
if (!err) {
- wdev->preset_chandef = params->chandef;
- wdev->beacon_interval = params->beacon_interval;
- wdev->chandef = params->chandef;
- wdev->ssid_len = params->ssid_len;
- memcpy(wdev->ssid, params->ssid, wdev->ssid_len);
+ wdev->links[link_id].ap.beacon_interval = params->beacon_interval;
+ wdev->links[link_id].ap.chandef = params->chandef;
+ wdev->u.ap.ssid_len = params->ssid_len;
+ memcpy(wdev->u.ap.ssid, params->ssid,
+ params->ssid_len);
if (info->attrs[NL80211_ATTR_SOCKET_OWNER])
wdev->conn_owner_nlportid = info->snd_portid;
}
+out_unlock:
wdev_unlock(wdev);
-
out:
kfree(params->acl);
kfree(params->beacon.mbssid_ies);
@@ -5807,6 +5940,7 @@ out:
static int nl80211_set_beacon(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ unsigned int link_id = nl80211_link_id(info->attrs);
struct net_device *dev = info->user_ptr[1];
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_beacon_data params;
@@ -5819,7 +5953,7 @@ static int nl80211_set_beacon(struct sk_buff *skb, struct genl_info *info)
if (!rdev->ops->change_beacon)
return -EOPNOTSUPP;
- if (!wdev->beacon_interval)
+ if (!wdev->links[link_id].ap.beacon_interval)
return -EINVAL;
err = nl80211_parse_beacon(rdev, info->attrs, &params);
@@ -5838,9 +5972,10 @@ out:
static int nl80211_stop_ap(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ unsigned int link_id = nl80211_link_id(info->attrs);
struct net_device *dev = info->user_ptr[1];
- return cfg80211_stop_ap(rdev, dev, false);
+ return cfg80211_stop_ap(rdev, dev, link_id, false);
}
static const struct nla_policy sta_flags_policy[NL80211_STA_FLAG_MAX + 1] = {
@@ -7590,7 +7725,7 @@ static int nl80211_get_mesh_config(struct sk_buff *skb,
wdev_lock(wdev);
/* If not connected, get default parameters */
- if (!wdev->mesh_id_len)
+ if (!wdev->u.mesh.id_len)
memcpy(&cur_params, &default_mesh_config, sizeof(cur_params));
else
err = rdev_get_mesh_config(rdev, dev, &cur_params);
@@ -7971,7 +8106,7 @@ static int nl80211_update_mesh_config(struct sk_buff *skb,
return err;
wdev_lock(wdev);
- if (!wdev->mesh_id_len)
+ if (!wdev->u.mesh.id_len)
err = -ENOLINK;
if (!err)
@@ -8463,14 +8598,44 @@ int nl80211_parse_random_mac(struct nlattr **attrs,
return 0;
}
-static bool cfg80211_off_channel_oper_allowed(struct wireless_dev *wdev)
+static bool cfg80211_off_channel_oper_allowed(struct wireless_dev *wdev,
+ struct ieee80211_channel *chan)
{
+ unsigned int link_id;
+ bool all_ok = true;
+
ASSERT_WDEV_LOCK(wdev);
if (!cfg80211_beaconing_iface_active(wdev))
return true;
- if (!(wdev->chandef.chan->flags & IEEE80211_CHAN_RADAR))
+ /*
+ * FIXME: check if we have a free HW resource/link for chan
+ *
+ * This, as well as the FIXME below, requires knowing the link
+ * capabilities of the hardware.
+ */
+
+ /* we cannot leave radar channels */
+ for_each_valid_link(wdev, link_id) {
+ struct cfg80211_chan_def *chandef;
+
+ chandef = wdev_chandef(wdev, link_id);
+ if (!chandef)
+ continue;
+
+ /*
+ * FIXME: don't require all_ok, but rather check only the
+ * correct HW resource/link onto which 'chan' falls,
+ * as only that link leaves the channel for doing
+ * the off-channel operation.
+ */
+
+ if (chandef->chan->flags & IEEE80211_CHAN_RADAR)
+ all_ok = false;
+ }
+
+ if (all_ok)
return true;
return regulatory_pre_cac_allowed(wdev->wiphy);
@@ -8553,7 +8718,7 @@ nl80211_check_scan_flags(struct wiphy *wiphy, struct wireless_dev *wdev,
int err;
if (!(wiphy->features & randomness_flag) ||
- (wdev && wdev->current_bss))
+ (wdev && wdev->connected))
return -EOPNOTSUPP;
err = nl80211_parse_random_mac(attrs, mac_addr, mac_addr_mask);
@@ -8690,17 +8855,14 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
request->n_channels = i;
wdev_lock(wdev);
- if (!cfg80211_off_channel_oper_allowed(wdev)) {
- struct ieee80211_channel *chan;
+ for (i = 0; i < request->n_channels; i++) {
+ struct ieee80211_channel *chan = request->channels[i];
- if (request->n_channels != 1) {
- wdev_unlock(wdev);
- err = -EBUSY;
- goto out_free;
- }
+ /* if we can go off-channel to the target channel we're good */
+ if (cfg80211_off_channel_oper_allowed(wdev, chan))
+ continue;
- chan = request->channels[0];
- if (chan->center_freq != wdev->chandef.chan->center_freq) {
+ if (!cfg80211_wdev_on_sub_chan(wdev, chan, true)) {
wdev_unlock(wdev);
err = -EBUSY;
goto out_free;
@@ -9445,7 +9607,7 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
err = rdev_start_radar_detection(rdev, dev, &chandef, cac_time_ms);
if (!err) {
- wdev->chandef = chandef;
+ wdev->links[0].ap.chandef = chandef;
wdev->cac_started = true;
wdev->cac_start_time = jiffies;
wdev->cac_time_ms = cac_time_ms;
@@ -9513,6 +9675,7 @@ static int nl80211_notify_radar_detection(struct sk_buff *skb,
static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ unsigned int link_id = nl80211_link_id(info->attrs);
struct net_device *dev = info->user_ptr[1];
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_csa_settings params;
@@ -9539,15 +9702,15 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
need_handle_dfs_flag = false;
/* useless if AP is not running */
- if (!wdev->beacon_interval)
+ if (!wdev->links[link_id].ap.beacon_interval)
return -ENOTCONN;
break;
case NL80211_IFTYPE_ADHOC:
- if (!wdev->ssid_len)
+ if (!wdev->u.ibss.ssid_len)
return -ENOTCONN;
break;
case NL80211_IFTYPE_MESH_POINT:
- if (!wdev->mesh_id_len)
+ if (!wdev->u.mesh.id_len)
return -ENOTCONN;
break;
default:
@@ -9718,6 +9881,7 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
{
struct cfg80211_bss *res = &intbss->pub;
const struct cfg80211_bss_ies *ies;
+ unsigned int link_id;
void *hdr;
struct nlattr *bss;
@@ -9822,13 +9986,18 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
switch (wdev->iftype) {
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_STATION:
- if (intbss == wdev->current_bss &&
- nla_put_u32(msg, NL80211_BSS_STATUS,
- NL80211_BSS_STATUS_ASSOCIATED))
- goto nla_put_failure;
+ for_each_valid_link(wdev, link_id) {
+ if (intbss == wdev->links[link_id].client.current_bss &&
+ (nla_put_u32(msg, NL80211_BSS_STATUS,
+ NL80211_BSS_STATUS_ASSOCIATED) ||
+ (wdev->valid_links &&
+ nla_put_u8(msg, NL80211_BSS_MLO_LINK_ID,
+ link_id))))
+ goto nla_put_failure;
+ }
break;
case NL80211_IFTYPE_ADHOC:
- if (intbss == wdev->current_bss &&
+ if (intbss == wdev->u.ibss.current_bss &&
nla_put_u32(msg, NL80211_BSS_STATUS,
NL80211_BSS_STATUS_IBSS_JOINED))
goto nla_put_failure;
@@ -10054,11 +10223,12 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct ieee80211_channel *chan;
- const u8 *bssid, *ssid, *ie = NULL, *auth_data = NULL;
- int err, ssid_len, ie_len = 0, auth_data_len = 0;
+ const u8 *bssid, *ssid;
+ int err, ssid_len;
enum nl80211_auth_type auth_type;
struct key_parse key;
bool local_state_change;
+ struct cfg80211_auth_request req = {};
u32 freq;
if (!info->attrs[NL80211_ATTR_MAC])
@@ -10129,8 +10299,8 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
if (info->attrs[NL80211_ATTR_IE]) {
- ie = nla_data(info->attrs[NL80211_ATTR_IE]);
- ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
+ req.ie = nla_data(info->attrs[NL80211_ATTR_IE]);
+ req.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
}
auth_type = nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]);
@@ -10150,8 +10320,8 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
auth_type != NL80211_AUTHTYPE_FILS_SK_PFS &&
auth_type != NL80211_AUTHTYPE_FILS_PK)
return -EINVAL;
- auth_data = nla_data(info->attrs[NL80211_ATTR_AUTH_DATA]);
- auth_data_len = nla_len(info->attrs[NL80211_ATTR_AUTH_DATA]);
+ req.auth_data = nla_data(info->attrs[NL80211_ATTR_AUTH_DATA]);
+ req.auth_data_len = nla_len(info->attrs[NL80211_ATTR_AUTH_DATA]);
}
local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE];
@@ -10163,12 +10333,29 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
if (local_state_change)
return 0;
+ req.auth_type = auth_type;
+ req.key = key.p.key;
+ req.key_len = key.p.key_len;
+ req.key_idx = key.idx;
+ req.link_id = nl80211_link_id_or_invalid(info->attrs);
+ if (req.link_id >= 0) {
+ if (!info->attrs[NL80211_ATTR_MLD_ADDR])
+ return -EINVAL;
+ req.ap_mld_addr = nla_data(info->attrs[NL80211_ATTR_MLD_ADDR]);
+ }
+
+ req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len,
+ IEEE80211_BSS_TYPE_ESS,
+ IEEE80211_PRIVACY_ANY);
+ if (!req.bss)
+ return -ENOENT;
+
wdev_lock(dev->ieee80211_ptr);
- err = cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid,
- ssid, ssid_len, ie, ie_len,
- key.p.key, key.p.key_len, key.idx,
- auth_data, auth_data_len);
+ err = cfg80211_mlme_auth(rdev, dev, &req);
wdev_unlock(dev->ieee80211_ptr);
+
+ cfg80211_put_bss(&rdev->wiphy, req.bss);
+
return err;
}
@@ -10272,7 +10459,7 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
if (len % sizeof(u32))
return -EINVAL;
- if (settings->n_akm_suites > NL80211_MAX_NR_AKM_SUITES)
+ if (settings->n_akm_suites > rdev->wiphy.max_num_akm_suites)
return -EINVAL;
memcpy(settings->akm_suites, data, len);
@@ -10310,23 +10497,55 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
return 0;
}
+static struct cfg80211_bss *nl80211_assoc_bss(struct cfg80211_registered_device *rdev,
+ const u8 *ssid, int ssid_len,
+ struct nlattr **attrs,
+ const u8 **bssid_out)
+{
+ struct ieee80211_channel *chan;
+ struct cfg80211_bss *bss;
+ const u8 *bssid;
+ u32 freq;
+
+ if (!attrs[NL80211_ATTR_MAC] || !attrs[NL80211_ATTR_WIPHY_FREQ])
+ return ERR_PTR(-EINVAL);
+
+ bssid = nla_data(attrs[NL80211_ATTR_MAC]);
+
+ freq = MHZ_TO_KHZ(nla_get_u32(attrs[NL80211_ATTR_WIPHY_FREQ]));
+ if (attrs[NL80211_ATTR_WIPHY_FREQ_OFFSET])
+ freq += nla_get_u32(attrs[NL80211_ATTR_WIPHY_FREQ_OFFSET]);
+
+ chan = nl80211_get_valid_chan(&rdev->wiphy, freq);
+ if (!chan)
+ return ERR_PTR(-EINVAL);
+
+ bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid,
+ ssid, ssid_len,
+ IEEE80211_BSS_TYPE_ESS,
+ IEEE80211_PRIVACY_ANY);
+ if (!bss)
+ return ERR_PTR(-ENOENT);
+
+ *bssid_out = bssid;
+ return bss;
+}
+
static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
- struct ieee80211_channel *chan;
struct cfg80211_assoc_request req = {};
+ struct nlattr **attrs = NULL;
const u8 *bssid, *ssid;
- int err, ssid_len = 0;
- u32 freq;
+ unsigned int link_id;
+ int err, ssid_len;
if (dev->ieee80211_ptr->conn_owner_nlportid &&
dev->ieee80211_ptr->conn_owner_nlportid != info->snd_portid)
return -EPERM;
- if (!info->attrs[NL80211_ATTR_MAC] ||
- !info->attrs[NL80211_ATTR_SSID] ||
- !info->attrs[NL80211_ATTR_WIPHY_FREQ])
+ if (!info->attrs[NL80211_ATTR_SSID])
return -EINVAL;
if (!rdev->ops->assoc)
@@ -10336,16 +10555,6 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT)
return -EOPNOTSUPP;
- bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
-
- freq = MHZ_TO_KHZ(nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]));
- if (info->attrs[NL80211_ATTR_WIPHY_FREQ_OFFSET])
- freq +=
- nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ_OFFSET]);
- chan = nl80211_get_valid_chan(&rdev->wiphy, freq);
- if (!chan)
- return -EINVAL;
-
ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
@@ -10439,12 +10648,87 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
sizeof(req.s1g_capa));
}
+ req.link_id = nl80211_link_id_or_invalid(info->attrs);
+
+ if (info->attrs[NL80211_ATTR_MLO_LINKS]) {
+ unsigned int attrsize = NUM_NL80211_ATTR * sizeof(*attrs);
+ struct nlattr *link;
+ int rem = 0;
+
+ if (req.link_id < 0)
+ return -EINVAL;
+
+ if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_MLO))
+ return -EINVAL;
+
+ if (info->attrs[NL80211_ATTR_MAC] ||
+ info->attrs[NL80211_ATTR_WIPHY_FREQ] ||
+ !info->attrs[NL80211_ATTR_MLD_ADDR])
+ return -EINVAL;
+
+ req.ap_mld_addr = nla_data(info->attrs[NL80211_ATTR_MLD_ADDR]);
+
+ attrs = kzalloc(attrsize, GFP_KERNEL);
+ if (!attrs)
+ return -ENOMEM;
+
+ nla_for_each_nested(link,
+ info->attrs[NL80211_ATTR_MLO_LINKS],
+ rem) {
+ memset(attrs, 0, attrsize);
+
+ nla_parse_nested(attrs, NL80211_ATTR_MAX,
+ link, NULL, NULL);
+
+ if (!attrs[NL80211_ATTR_MLO_LINK_ID]) {
+ err = -EINVAL;
+ goto free;
+ }
+
+ link_id = nla_get_u8(attrs[NL80211_ATTR_MLO_LINK_ID]);
+ /* cannot use the same link ID again */
+ if (req.links[link_id].bss) {
+ err = -EINVAL;
+ goto free;
+ }
+ req.links[link_id].bss =
+ nl80211_assoc_bss(rdev, ssid, ssid_len, attrs,
+ &bssid);
+ if (IS_ERR(req.links[link_id].bss)) {
+ err = PTR_ERR(req.links[link_id].bss);
+ goto free;
+ }
+
+ if (attrs[NL80211_ATTR_IE]) {
+ req.links[link_id].elems =
+ nla_data(attrs[NL80211_ATTR_IE]);
+ req.links[link_id].elems_len =
+ nla_len(attrs[NL80211_ATTR_IE]);
+ }
+ }
+
+ if (!req.links[req.link_id].bss) {
+ err = -EINVAL;
+ goto free;
+ }
+
+ kfree(attrs);
+ attrs = NULL;
+ } else {
+ if (req.link_id >= 0)
+ return -EINVAL;
+
+ req.bss = nl80211_assoc_bss(rdev, ssid, ssid_len, info->attrs,
+ &bssid);
+ if (IS_ERR(req.bss))
+ return PTR_ERR(req.bss);
+ }
+
err = nl80211_crypto_settings(rdev, info, &req.crypto, 1);
if (!err) {
wdev_lock(dev->ieee80211_ptr);
- err = cfg80211_mlme_assoc(rdev, dev, chan, bssid,
- ssid, ssid_len, &req);
+ err = cfg80211_mlme_assoc(rdev, dev, &req);
if (!err && info->attrs[NL80211_ATTR_SOCKET_OWNER]) {
dev->ieee80211_ptr->conn_owner_nlportid =
@@ -10456,6 +10740,12 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
wdev_unlock(dev->ieee80211_ptr);
}
+free:
+ for (link_id = 0; link_id < ARRAY_SIZE(req.links); link_id++)
+ cfg80211_put_bss(&rdev->wiphy, req.links[link_id].bss);
+ cfg80211_put_bss(&rdev->wiphy, req.bss);
+ kfree(attrs);
+
return err;
}
@@ -11269,6 +11559,9 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
connect.flags |= CONNECT_REQ_EXTERNAL_AUTH_SUPPORT;
}
+ if (nla_get_flag(info->attrs[NL80211_ATTR_MLO_SUPPORT]))
+ connect.flags |= CONNECT_REQ_MLO_SUPPORT;
+
wdev_lock(dev->ieee80211_ptr);
err = cfg80211_connect(rdev, dev, &connect, connkeys,
@@ -11362,7 +11655,7 @@ static int nl80211_update_connect_params(struct sk_buff *skb,
}
wdev_lock(dev->ieee80211_ptr);
- if (!wdev->current_bss)
+ if (!wdev->connected)
ret = -ENOLINK;
else
ret = rdev_update_connect_params(rdev, dev, &connect, changed);
@@ -11575,9 +11868,9 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ unsigned int link_id = nl80211_link_id(info->attrs);
struct wireless_dev *wdev = info->user_ptr[1];
struct cfg80211_chan_def chandef;
- const struct cfg80211_chan_def *compat_chandef;
struct sk_buff *msg;
void *hdr;
u64 cookie;
@@ -11607,10 +11900,22 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
return err;
wdev_lock(wdev);
- if (!cfg80211_off_channel_oper_allowed(wdev) &&
- !cfg80211_chandef_identical(&wdev->chandef, &chandef)) {
- compat_chandef = cfg80211_chandef_compatible(&wdev->chandef,
- &chandef);
+ if (!cfg80211_off_channel_oper_allowed(wdev, chandef.chan)) {
+ const struct cfg80211_chan_def *oper_chandef, *compat_chandef;
+
+ oper_chandef = wdev_chandef(wdev, link_id);
+
+ if (WARN_ON(!oper_chandef)) {
+ /* cannot happen since we must beacon to get here */
+ WARN_ON(1);
+ wdev_unlock(wdev);
+ return -EBUSY;
+ }
+
+ /* note: returns first one if identical chandefs */
+ compat_chandef = cfg80211_chandef_compatible(&chandef,
+ oper_chandef);
+
if (compat_chandef != &chandef) {
wdev_unlock(wdev);
return -EBUSY;
@@ -11672,6 +11977,7 @@ static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
struct genl_info *info)
{
struct cfg80211_bitrate_mask mask;
+ unsigned int link_id = nl80211_link_id(info->attrs);
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -11683,11 +11989,11 @@ static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
wdev_lock(wdev);
err = nl80211_parse_tx_bitrate_mask(info, info->attrs,
NL80211_ATTR_TX_RATES, &mask,
- dev, true);
+ dev, true, link_id);
if (err)
goto out;
- err = rdev_set_bitrate_mask(rdev, dev, NULL, &mask);
+ err = rdev_set_bitrate_mask(rdev, dev, link_id, NULL, &mask);
out:
wdev_unlock(wdev);
return err;
@@ -11812,7 +12118,8 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
wdev_lock(wdev);
- if (params.offchan && !cfg80211_off_channel_oper_allowed(wdev)) {
+ if (params.offchan &&
+ !cfg80211_off_channel_oper_allowed(wdev, chandef.chan)) {
wdev_unlock(wdev);
return -EBUSY;
}
@@ -12030,12 +12337,13 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
* connection is established and enough beacons received to calculate
* the average.
*/
- if (!wdev->cqm_config->last_rssi_event_value && wdev->current_bss &&
+ if (!wdev->cqm_config->last_rssi_event_value &&
+ wdev->links[0].client.current_bss &&
rdev->ops->get_station) {
struct station_info sinfo = {};
u8 *mac_addr;
- mac_addr = wdev->current_bss->pub.bssid;
+ mac_addr = wdev->links[0].client.current_bss->pub.bssid;
err = rdev_get_station(rdev, dev, mac_addr, &sinfo);
if (err)
@@ -12298,7 +12606,7 @@ static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info)
err = nl80211_parse_tx_bitrate_mask(info, info->attrs,
NL80211_ATTR_TX_RATES,
&setup.beacon_rate,
- dev, false);
+ dev, false, 0);
if (err)
return err;
@@ -13268,7 +13576,7 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info)
rekey_data.akm = nla_get_u32(tb[NL80211_REKEY_DATA_AKM]);
wdev_lock(wdev);
- if (!wdev->current_bss) {
+ if (!wdev->connected) {
err = -ENOTCONN;
goto out;
}
@@ -14537,7 +14845,7 @@ static int nl80211_add_tx_ts(struct sk_buff *skb, struct genl_info *info)
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
- if (wdev->current_bss)
+ if (wdev->connected)
break;
err = -ENOTCONN;
goto out;
@@ -14710,13 +15018,13 @@ static int nl80211_set_pmk(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
wdev_lock(wdev);
- if (!wdev->current_bss) {
+ if (!wdev->connected) {
ret = -ENOTCONN;
goto out;
}
pmk_conf.aa = nla_data(info->attrs[NL80211_ATTR_MAC]);
- if (memcmp(pmk_conf.aa, wdev->current_bss->pub.bssid, ETH_ALEN)) {
+ if (memcmp(pmk_conf.aa, wdev->u.client.connected_addr, ETH_ALEN)) {
ret = -EINVAL;
goto out;
}
@@ -14844,9 +15152,13 @@ static int nl80211_tx_control_port(struct sk_buff *skb, struct genl_info *info)
case NL80211_IFTYPE_MESH_POINT:
break;
case NL80211_IFTYPE_ADHOC:
+ if (wdev->u.ibss.current_bss)
+ break;
+ err = -ENOTCONN;
+ goto out;
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
- if (wdev->current_bss)
+ if (wdev->connected)
break;
err = -ENOTCONN;
goto out;
@@ -14882,12 +15194,14 @@ static int nl80211_get_ftm_responder_stats(struct sk_buff *skb,
struct net_device *dev = info->user_ptr[1];
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_ftm_responder_stats ftm_stats = {};
+ unsigned int link_id = nl80211_link_id(info->attrs);
struct sk_buff *msg;
void *hdr;
struct nlattr *ftm_stats_attr;
int err;
- if (wdev->iftype != NL80211_IFTYPE_AP || !wdev->beacon_interval)
+ if (wdev->iftype != NL80211_IFTYPE_AP ||
+ !wdev->links[link_id].ap.beacon_interval)
return -EOPNOTSUPP;
err = rdev_get_ftm_responder_stats(rdev, dev, &ftm_stats);
@@ -15017,7 +15331,8 @@ static int nl80211_probe_mesh_link(struct sk_buff *skb, struct genl_info *info)
static int parse_tid_conf(struct cfg80211_registered_device *rdev,
struct nlattr *attrs[], struct net_device *dev,
struct cfg80211_tid_cfg *tid_conf,
- struct genl_info *info, const u8 *peer)
+ struct genl_info *info, const u8 *peer,
+ unsigned int link_id)
{
struct netlink_ext_ack *extack = info->extack;
u64 mask;
@@ -15092,7 +15407,7 @@ static int parse_tid_conf(struct cfg80211_registered_device *rdev,
attr = NL80211_TID_CONFIG_ATTR_TX_RATE;
err = nl80211_parse_tx_bitrate_mask(info, attrs, attr,
&tid_conf->txrate_mask, dev,
- true);
+ true, link_id);
if (err)
return err;
@@ -15119,6 +15434,7 @@ static int nl80211_set_tid_config(struct sk_buff *skb,
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct nlattr *attrs[NL80211_TID_CONFIG_ATTR_MAX + 1];
+ unsigned int link_id = nl80211_link_id(info->attrs);
struct net_device *dev = info->user_ptr[1];
struct cfg80211_tid_config *tid_config;
struct nlattr *tid;
@@ -15146,6 +15462,8 @@ static int nl80211_set_tid_config(struct sk_buff *skb,
if (info->attrs[NL80211_ATTR_MAC])
tid_config->peer = nla_data(info->attrs[NL80211_ATTR_MAC]);
+ wdev_lock(dev->ieee80211_ptr);
+
nla_for_each_nested(tid, info->attrs[NL80211_ATTR_TID_CONFIG],
rem_conf) {
ret = nla_parse_nested(attrs, NL80211_TID_CONFIG_ATTR_MAX,
@@ -15156,7 +15474,7 @@ static int nl80211_set_tid_config(struct sk_buff *skb,
ret = parse_tid_conf(rdev, attrs, dev,
&tid_config->tid_conf[conf_idx],
- info, tid_config->peer);
+ info, tid_config->peer, link_id);
if (ret)
goto bad_tid_conf;
@@ -15167,6 +15485,7 @@ static int nl80211_set_tid_config(struct sk_buff *skb,
bad_tid_conf:
kfree(tid_config);
+ wdev_unlock(dev->ieee80211_ptr);
return ret;
}
@@ -15295,6 +15614,74 @@ static int nl80211_set_fils_aad(struct sk_buff *skb,
return rdev_set_fils_aad(rdev, dev, &fils_aad);
}
+static int nl80211_add_link(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ unsigned int link_id = nl80211_link_id(info->attrs);
+ struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ int ret;
+
+ if (!(wdev->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO))
+ return -EINVAL;
+
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_AP:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!info->attrs[NL80211_ATTR_MAC] ||
+ !is_valid_ether_addr(nla_data(info->attrs[NL80211_ATTR_MAC])))
+ return -EINVAL;
+
+ wdev_lock(wdev);
+ wdev->valid_links |= BIT(link_id);
+ ether_addr_copy(wdev->links[link_id].addr,
+ nla_data(info->attrs[NL80211_ATTR_MAC]));
+
+ ret = rdev_add_intf_link(rdev, wdev, link_id);
+ if (ret) {
+ wdev->valid_links &= ~BIT(link_id);
+ eth_zero_addr(wdev->links[link_id].addr);
+ }
+ wdev_unlock(wdev);
+
+ return ret;
+}
+
+static int nl80211_remove_link(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ unsigned int link_id = nl80211_link_id(info->attrs);
+ struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+
+ /* cannot remove if there's no link */
+ if (!info->attrs[NL80211_ATTR_MLO_LINK_ID])
+ return -EINVAL;
+
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_AP:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* FIXME: stop the link operations first */
+
+ wdev_lock(wdev);
+ wdev->valid_links &= ~BIT(link_id);
+
+ rdev_del_intf_link(rdev, wdev, link_id);
+
+ eth_zero_addr(wdev->links[link_id].addr);
+ wdev_unlock(wdev);
+
+ return 0;
+}
+
#define NL80211_FLAG_NEED_WIPHY 0x01
#define NL80211_FLAG_NEED_NETDEV 0x02
#define NL80211_FLAG_NEED_RTNL 0x04
@@ -15307,6 +15694,8 @@ static int nl80211_set_fils_aad(struct sk_buff *skb,
NL80211_FLAG_CHECK_NETDEV_UP)
#define NL80211_FLAG_CLEAR_SKB 0x20
#define NL80211_FLAG_NO_WIPHY_MTX 0x40
+#define NL80211_FLAG_MLO_VALID_LINK_ID 0x80
+#define NL80211_FLAG_MLO_UNSUPPORTED 0x100
#define INTERNAL_FLAG_SELECTORS(__sel) \
SELECTOR(__sel, NONE, 0) /* must be first */ \
@@ -15316,6 +15705,12 @@ static int nl80211_set_fils_aad(struct sk_buff *skb,
NL80211_FLAG_NEED_WDEV) \
SELECTOR(__sel, NETDEV, \
NL80211_FLAG_NEED_NETDEV) \
+ SELECTOR(__sel, NETDEV_LINK, \
+ NL80211_FLAG_NEED_NETDEV | \
+ NL80211_FLAG_MLO_VALID_LINK_ID) \
+ SELECTOR(__sel, NETDEV_NO_MLO, \
+ NL80211_FLAG_NEED_NETDEV | \
+ NL80211_FLAG_MLO_UNSUPPORTED) \
SELECTOR(__sel, WIPHY_RTNL, \
NL80211_FLAG_NEED_WIPHY | \
NL80211_FLAG_NEED_RTNL) \
@@ -15331,14 +15726,31 @@ static int nl80211_set_fils_aad(struct sk_buff *skb,
NL80211_FLAG_NEED_RTNL) \
SELECTOR(__sel, NETDEV_UP, \
NL80211_FLAG_NEED_NETDEV_UP) \
+ SELECTOR(__sel, NETDEV_UP_LINK, \
+ NL80211_FLAG_NEED_NETDEV_UP | \
+ NL80211_FLAG_MLO_VALID_LINK_ID) \
+ SELECTOR(__sel, NETDEV_UP_NO_MLO, \
+ NL80211_FLAG_NEED_NETDEV_UP | \
+ NL80211_FLAG_MLO_UNSUPPORTED) \
+ SELECTOR(__sel, NETDEV_UP_NO_MLO_CLEAR, \
+ NL80211_FLAG_NEED_NETDEV_UP | \
+ NL80211_FLAG_CLEAR_SKB | \
+ NL80211_FLAG_MLO_UNSUPPORTED) \
SELECTOR(__sel, NETDEV_UP_NOTMX, \
NL80211_FLAG_NEED_NETDEV_UP | \
NL80211_FLAG_NO_WIPHY_MTX) \
+ SELECTOR(__sel, NETDEV_UP_NOTMX_NOMLO, \
+ NL80211_FLAG_NEED_NETDEV_UP | \
+ NL80211_FLAG_NO_WIPHY_MTX | \
+ NL80211_FLAG_MLO_UNSUPPORTED) \
SELECTOR(__sel, NETDEV_UP_CLEAR, \
NL80211_FLAG_NEED_NETDEV_UP | \
NL80211_FLAG_CLEAR_SKB) \
SELECTOR(__sel, WDEV_UP, \
NL80211_FLAG_NEED_WDEV_UP) \
+ SELECTOR(__sel, WDEV_UP_LINK, \
+ NL80211_FLAG_NEED_WDEV_UP | \
+ NL80211_FLAG_MLO_VALID_LINK_ID) \
SELECTOR(__sel, WDEV_UP_RTNL, \
NL80211_FLAG_NEED_WDEV_UP | \
NL80211_FLAG_NEED_RTNL) \
@@ -15362,9 +15774,10 @@ static int nl80211_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
struct genl_info *info)
{
struct cfg80211_registered_device *rdev = NULL;
- struct wireless_dev *wdev;
- struct net_device *dev;
+ struct wireless_dev *wdev = NULL;
+ struct net_device *dev = NULL;
u32 internal_flags;
+ int err;
if (WARN_ON(ops->internal_flags >= ARRAY_SIZE(nl80211_internal_flags)))
return -EINVAL;
@@ -15375,8 +15788,8 @@ static int nl80211_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
if (internal_flags & NL80211_FLAG_NEED_WIPHY) {
rdev = cfg80211_get_dev_from_info(genl_info_net(info), info);
if (IS_ERR(rdev)) {
- rtnl_unlock();
- return PTR_ERR(rdev);
+ err = PTR_ERR(rdev);
+ goto out_unlock;
}
info->user_ptr[0] = rdev;
} else if (internal_flags & NL80211_FLAG_NEED_NETDEV ||
@@ -15384,17 +15797,18 @@ static int nl80211_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
wdev = __cfg80211_wdev_from_attrs(NULL, genl_info_net(info),
info->attrs);
if (IS_ERR(wdev)) {
- rtnl_unlock();
- return PTR_ERR(wdev);
+ err = PTR_ERR(wdev);
+ goto out_unlock;
}
dev = wdev->netdev;
+ dev_hold(dev);
rdev = wiphy_to_rdev(wdev->wiphy);
if (internal_flags & NL80211_FLAG_NEED_NETDEV) {
if (!dev) {
- rtnl_unlock();
- return -EINVAL;
+ err = -EINVAL;
+ goto out_unlock;
}
info->user_ptr[1] = dev;
@@ -15404,14 +15818,44 @@ static int nl80211_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
if (internal_flags & NL80211_FLAG_CHECK_NETDEV_UP &&
!wdev_running(wdev)) {
- rtnl_unlock();
- return -ENETDOWN;
+ err = -ENETDOWN;
+ goto out_unlock;
}
- dev_hold(dev);
info->user_ptr[0] = rdev;
}
+ if (internal_flags & NL80211_FLAG_MLO_VALID_LINK_ID) {
+ struct nlattr *link_id = info->attrs[NL80211_ATTR_MLO_LINK_ID];
+
+ if (!wdev) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ /* MLO -> require valid link ID */
+ if (wdev->valid_links &&
+ (!link_id ||
+ !(wdev->valid_links & BIT(nla_get_u8(link_id))))) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ /* non-MLO -> no link ID attribute accepted */
+ if (!wdev->valid_links && link_id) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
+ }
+
+ if (internal_flags & NL80211_FLAG_MLO_UNSUPPORTED) {
+ if (info->attrs[NL80211_ATTR_MLO_LINK_ID] ||
+ (wdev && wdev->valid_links)) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
+ }
+
if (rdev && !(internal_flags & NL80211_FLAG_NO_WIPHY_MTX)) {
wiphy_lock(&rdev->wiphy);
/* we keep the mutex locked until post_doit */
@@ -15421,6 +15865,10 @@ static int nl80211_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
rtnl_unlock();
return 0;
+out_unlock:
+ rtnl_unlock();
+ dev_put(dev);
+ return err;
}
static void nl80211_post_doit(const struct genl_ops *ops, struct sk_buff *skb,
@@ -15636,6 +16084,7 @@ static const struct genl_small_ops nl80211_small_ops[] = {
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_set_key,
.flags = GENL_UNS_ADMIN_PERM,
+ /* cannot use NL80211_FLAG_MLO_VALID_LINK_ID, depends on key */
.internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_CLEAR_SKB),
},
@@ -15659,21 +16108,24 @@ static const struct genl_small_ops nl80211_small_ops[] = {
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_UNS_ADMIN_PERM,
.doit = nl80211_set_beacon,
- .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_MLO_VALID_LINK_ID),
},
{
.cmd = NL80211_CMD_START_AP,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_UNS_ADMIN_PERM,
.doit = nl80211_start_ap,
- .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_MLO_VALID_LINK_ID),
},
{
.cmd = NL80211_CMD_STOP_AP,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_UNS_ADMIN_PERM,
.doit = nl80211_stop_ap,
- .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_MLO_VALID_LINK_ID),
},
{
.cmd = NL80211_CMD_GET_STATION,
@@ -15939,7 +16391,9 @@ static const struct genl_small_ops nl80211_small_ops[] = {
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_remain_on_channel,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV_UP),
+ /* FIXME: requiring a link ID here is probably not good */
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV_UP |
+ NL80211_FLAG_MLO_VALID_LINK_ID),
},
{
.cmd = NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL,
@@ -15953,7 +16407,8 @@ static const struct genl_small_ops nl80211_small_ops[] = {
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_set_tx_bitrate_mask,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV),
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV |
+ NL80211_FLAG_MLO_VALID_LINK_ID),
},
{
.cmd = NL80211_CMD_REGISTER_FRAME,
@@ -16002,7 +16457,8 @@ static const struct genl_small_ops nl80211_small_ops[] = {
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_set_channel,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV),
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV |
+ NL80211_FLAG_MLO_VALID_LINK_ID),
},
{
.cmd = NL80211_CMD_JOIN_MESH,
@@ -16163,7 +16619,8 @@ static const struct genl_small_ops nl80211_small_ops[] = {
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_set_mac_acl,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV),
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV |
+ NL80211_FLAG_MLO_UNSUPPORTED),
},
{
.cmd = NL80211_CMD_RADAR_DETECT,
@@ -16171,7 +16628,8 @@ static const struct genl_small_ops nl80211_small_ops[] = {
.doit = nl80211_start_radar_detection,
.flags = GENL_UNS_ADMIN_PERM,
.internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP |
- NL80211_FLAG_NO_WIPHY_MTX),
+ NL80211_FLAG_NO_WIPHY_MTX |
+ NL80211_FLAG_MLO_UNSUPPORTED),
},
{
.cmd = NL80211_CMD_GET_PROTOCOL_FEATURES,
@@ -16217,7 +16675,8 @@ static const struct genl_small_ops nl80211_small_ops[] = {
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_channel_switch,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_MLO_VALID_LINK_ID),
},
{
.cmd = NL80211_CMD_VENDOR,
@@ -16240,7 +16699,8 @@ static const struct genl_small_ops nl80211_small_ops[] = {
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_add_tx_ts,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_MLO_UNSUPPORTED),
},
{
.cmd = NL80211_CMD_DEL_TX_TS,
@@ -16301,7 +16761,8 @@ static const struct genl_small_ops nl80211_small_ops[] = {
.cmd = NL80211_CMD_GET_FTM_RESPONDER_STATS,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_get_ftm_responder_stats,
- .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV),
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV |
+ NL80211_FLAG_MLO_VALID_LINK_ID),
},
{
.cmd = NL80211_CMD_PEER_MEASUREMENT_START,
@@ -16333,7 +16794,8 @@ static const struct genl_small_ops nl80211_small_ops[] = {
.cmd = NL80211_CMD_SET_TID_CONFIG,
.doit = nl80211_set_tid_config,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV),
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV |
+ NL80211_FLAG_MLO_VALID_LINK_ID),
},
{
.cmd = NL80211_CMD_SET_SAR_SPECS,
@@ -16357,6 +16819,19 @@ static const struct genl_small_ops nl80211_small_ops[] = {
.flags = GENL_UNS_ADMIN_PERM,
.internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
+ {
+ .cmd = NL80211_CMD_ADD_LINK,
+ .doit = nl80211_add_link,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
+ },
+ {
+ .cmd = NL80211_CMD_REMOVE_LINK,
+ .doit = nl80211_remove_link,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_MLO_VALID_LINK_ID),
+ },
};
static struct genl_family nl80211_fam __ro_after_init = {
@@ -16865,10 +17340,29 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
{
struct sk_buff *msg;
void *hdr;
+ unsigned int link;
+ size_t link_info_size = 0;
+ const u8 *connected_addr = cr->valid_links ?
+ cr->ap_mld_addr : cr->links[0].bssid;
+
+ if (cr->valid_links) {
+ for_each_valid_link(cr, link) {
+ /* Nested attribute header */
+ link_info_size += NLA_HDRLEN;
+ /* Link ID */
+ link_info_size += nla_total_size(sizeof(u8));
+ link_info_size += cr->links[link].addr ?
+ nla_total_size(ETH_ALEN) : 0;
+ link_info_size += (cr->links[link].bssid ||
+ cr->links[link].bss) ?
+ nla_total_size(ETH_ALEN) : 0;
+ }
+ }
msg = nlmsg_new(100 + cr->req_ie_len + cr->resp_ie_len +
cr->fils.kek_len + cr->fils.pmk_len +
- (cr->fils.pmkid ? WLAN_PMKID_LEN : 0), gfp);
+ (cr->fils.pmkid ? WLAN_PMKID_LEN : 0) + link_info_size,
+ gfp);
if (!msg)
return;
@@ -16880,8 +17374,8 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
- (cr->bssid &&
- nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, cr->bssid)) ||
+ (connected_addr &&
+ nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, connected_addr)) ||
nla_put_u16(msg, NL80211_ATTR_STATUS_CODE,
cr->status < 0 ? WLAN_STATUS_UNSPECIFIED_FAILURE :
cr->status) ||
@@ -16907,6 +17401,38 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
nla_put(msg, NL80211_ATTR_PMKID, WLAN_PMKID_LEN, cr->fils.pmkid)))))
goto nla_put_failure;
+ if (cr->valid_links) {
+ int i = 1;
+ struct nlattr *nested;
+
+ nested = nla_nest_start(msg, NL80211_ATTR_MLO_LINKS);
+ if (!nested)
+ goto nla_put_failure;
+
+ for_each_valid_link(cr, link) {
+ struct nlattr *nested_mlo_links;
+ const u8 *bssid = cr->links[link].bss ?
+ cr->links[link].bss->bssid :
+ cr->links[link].bssid;
+
+ nested_mlo_links = nla_nest_start(msg, i);
+ if (!nested_mlo_links)
+ goto nla_put_failure;
+
+ if (nla_put_u8(msg, NL80211_ATTR_MLO_LINK_ID, link) ||
+ (bssid &&
+ nla_put(msg, NL80211_ATTR_BSSID, ETH_ALEN, bssid)) ||
+ (cr->links[link].addr &&
+ nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN,
+ cr->links[link].addr)))
+ goto nla_put_failure;
+
+ nla_nest_end(msg, nested_mlo_links);
+ i++;
+ }
+ nla_nest_end(msg, nested);
+ }
+
genlmsg_end(msg, hdr);
genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
@@ -16923,11 +17449,32 @@ void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
{
struct sk_buff *msg;
void *hdr;
- const u8 *bssid = info->bss ? info->bss->bssid : info->bssid;
+ size_t link_info_size = 0;
+ unsigned int link;
+ const u8 *connected_addr = info->ap_mld_addr ?
+ info->ap_mld_addr :
+ (info->links[0].bss ?
+ info->links[0].bss->bssid :
+ info->links[0].bssid);
+
+ if (info->valid_links) {
+ for_each_valid_link(info, link) {
+ /* Nested attribute header */
+ link_info_size += NLA_HDRLEN;
+ /* Link ID */
+ link_info_size += nla_total_size(sizeof(u8));
+ link_info_size += info->links[link].addr ?
+ nla_total_size(ETH_ALEN) : 0;
+ link_info_size += (info->links[link].bssid ||
+ info->links[link].bss) ?
+ nla_total_size(ETH_ALEN) : 0;
+ }
+ }
msg = nlmsg_new(100 + info->req_ie_len + info->resp_ie_len +
info->fils.kek_len + info->fils.pmk_len +
- (info->fils.pmkid ? WLAN_PMKID_LEN : 0), gfp);
+ (info->fils.pmkid ? WLAN_PMKID_LEN : 0) +
+ link_info_size, gfp);
if (!msg)
return;
@@ -16939,7 +17486,7 @@ void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
- nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid) ||
+ nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, connected_addr) ||
(info->req_ie &&
nla_put(msg, NL80211_ATTR_REQ_IE, info->req_ie_len,
info->req_ie)) ||
@@ -16958,6 +17505,38 @@ void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
nla_put(msg, NL80211_ATTR_PMKID, WLAN_PMKID_LEN, info->fils.pmkid)))
goto nla_put_failure;
+ if (info->valid_links) {
+ int i = 1;
+ struct nlattr *nested;
+
+ nested = nla_nest_start(msg, NL80211_ATTR_MLO_LINKS);
+ if (!nested)
+ goto nla_put_failure;
+
+ for_each_valid_link(info, link) {
+ struct nlattr *nested_mlo_links;
+ const u8 *bssid = info->links[link].bss ?
+ info->links[link].bss->bssid :
+ info->links[link].bssid;
+
+ nested_mlo_links = nla_nest_start(msg, i);
+ if (!nested_mlo_links)
+ goto nla_put_failure;
+
+ if (nla_put_u8(msg, NL80211_ATTR_MLO_LINK_ID, link) ||
+ (bssid &&
+ nla_put(msg, NL80211_ATTR_BSSID, ETH_ALEN, bssid)) ||
+ (info->links[link].addr &&
+ nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN,
+ info->links[link].addr)))
+ goto nla_put_failure;
+
+ nla_nest_end(msg, nested_mlo_links);
+ i++;
+ }
+ nla_nest_end(msg, nested);
+ }
+
genlmsg_end(msg, hdr);
genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
@@ -17984,23 +18563,40 @@ static void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
}
void cfg80211_ch_switch_notify(struct net_device *dev,
- struct cfg80211_chan_def *chandef)
+ struct cfg80211_chan_def *chandef,
+ unsigned int link_id)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct wiphy *wiphy = wdev->wiphy;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
ASSERT_WDEV_LOCK(wdev);
+ WARN_INVALID_LINK_ID(wdev, link_id);
- trace_cfg80211_ch_switch_notify(dev, chandef);
-
- wdev->chandef = *chandef;
- wdev->preset_chandef = *chandef;
+ trace_cfg80211_ch_switch_notify(dev, chandef, link_id);
- if ((wdev->iftype == NL80211_IFTYPE_STATION ||
- wdev->iftype == NL80211_IFTYPE_P2P_CLIENT) &&
- !WARN_ON(!wdev->current_bss))
- cfg80211_update_assoc_bss_entry(wdev, chandef->chan);
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ if (!WARN_ON(!wdev->links[link_id].client.current_bss))
+ cfg80211_update_assoc_bss_entry(wdev, link_id,
+ chandef->chan);
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ wdev->u.mesh.chandef = *chandef;
+ wdev->u.mesh.preset_chandef = *chandef;
+ break;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ wdev->links[link_id].ap.chandef = *chandef;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ wdev->u.ibss.chandef = *chandef;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
cfg80211_sched_dfs_chan_update(rdev);
diff --git a/net/wireless/ocb.c b/net/wireless/ocb.c
index 2d26a6d980bf..27a1732264f9 100644
--- a/net/wireless/ocb.c
+++ b/net/wireless/ocb.c
@@ -4,6 +4,7 @@
*
* Copyright: (c) 2014 Czech Technical University in Prague
* (c) 2014 Volkswagen Group Research
+ * Copyright (C) 2022 Intel Corporation
* Author: Rostislav Lisovy <rostislav.lisovy@fel.cvut.cz>
* Funded by: Volkswagen Group Research
*/
@@ -34,7 +35,7 @@ int __cfg80211_join_ocb(struct cfg80211_registered_device *rdev,
err = rdev_join_ocb(rdev, dev, setup);
if (!err)
- wdev->chandef = setup->chandef;
+ wdev->u.ocb.chandef = setup->chandef;
return err;
}
@@ -69,7 +70,7 @@ int __cfg80211_leave_ocb(struct cfg80211_registered_device *rdev,
err = rdev_leave_ocb(rdev, dev);
if (!err)
- memset(&wdev->chandef, 0, sizeof(wdev->chandef));
+ memset(&wdev->u.ocb.chandef, 0, sizeof(wdev->u.ocb.chandef));
return err;
}
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index 439bcf52369c..a329ba036989 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -1,4 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Portions of this file
+ * Copyright(c) 2016-2017 Intel Deutschland GmbH
+ * Copyright (C) 2018, 2021-2022 Intel Corporation
+ */
#ifndef __CFG80211_RDEV_OPS
#define __CFG80211_RDEV_OPS
@@ -172,11 +177,11 @@ static inline int rdev_change_beacon(struct cfg80211_registered_device *rdev,
}
static inline int rdev_stop_ap(struct cfg80211_registered_device *rdev,
- struct net_device *dev)
+ struct net_device *dev, unsigned int link_id)
{
int ret;
- trace_rdev_stop_ap(&rdev->wiphy, dev);
- ret = rdev->ops->stop_ap(&rdev->wiphy, dev);
+ trace_rdev_stop_ap(&rdev->wiphy, dev, link_id);
+ ret = rdev->ops->stop_ap(&rdev->wiphy, dev, link_id);
trace_rdev_return_int(&rdev->wiphy, ret);
return ret;
}
@@ -651,12 +656,14 @@ static inline int rdev_testmode_dump(struct cfg80211_registered_device *rdev,
static inline int
rdev_set_bitrate_mask(struct cfg80211_registered_device *rdev,
- struct net_device *dev, const u8 *peer,
+ struct net_device *dev, unsigned int link_id,
+ const u8 *peer,
const struct cfg80211_bitrate_mask *mask)
{
int ret;
- trace_rdev_set_bitrate_mask(&rdev->wiphy, dev, peer, mask);
- ret = rdev->ops->set_bitrate_mask(&rdev->wiphy, dev, peer, mask);
+ trace_rdev_set_bitrate_mask(&rdev->wiphy, dev, link_id, peer, mask);
+ ret = rdev->ops->set_bitrate_mask(&rdev->wiphy, dev, link_id,
+ peer, mask);
trace_rdev_return_int(&rdev->wiphy, ret);
return ret;
}
@@ -944,12 +951,13 @@ static inline int rdev_set_noack_map(struct cfg80211_registered_device *rdev,
static inline int
rdev_get_channel(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev,
+ unsigned int link_id,
struct cfg80211_chan_def *chandef)
{
int ret;
- trace_rdev_get_channel(&rdev->wiphy, wdev);
- ret = rdev->ops->get_channel(&rdev->wiphy, wdev, chandef);
+ trace_rdev_get_channel(&rdev->wiphy, wdev, link_id);
+ ret = rdev->ops->get_channel(&rdev->wiphy, wdev, link_id, chandef);
trace_rdev_return_chandef(&rdev->wiphy, ret, chandef);
return ret;
@@ -1107,12 +1115,14 @@ static inline int rdev_set_qos_map(struct cfg80211_registered_device *rdev,
static inline int
rdev_set_ap_chanwidth(struct cfg80211_registered_device *rdev,
- struct net_device *dev, struct cfg80211_chan_def *chandef)
+ struct net_device *dev,
+ unsigned int link_id,
+ struct cfg80211_chan_def *chandef)
{
int ret;
- trace_rdev_set_ap_chanwidth(&rdev->wiphy, dev, chandef);
- ret = rdev->ops->set_ap_chanwidth(&rdev->wiphy, dev, chandef);
+ trace_rdev_set_ap_chanwidth(&rdev->wiphy, dev, link_id, chandef);
+ ret = rdev->ops->set_ap_chanwidth(&rdev->wiphy, dev, link_id, chandef);
trace_rdev_return_int(&rdev->wiphy, ret);
return ret;
@@ -1412,4 +1422,30 @@ rdev_set_radar_background(struct cfg80211_registered_device *rdev,
return ret;
}
+static inline int
+rdev_add_intf_link(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev,
+ unsigned int link_id)
+{
+ int ret = 0;
+
+ trace_rdev_add_intf_link(&rdev->wiphy, wdev, link_id);
+ if (rdev->ops->add_intf_link)
+ ret = rdev->ops->add_intf_link(&rdev->wiphy, wdev, link_id);
+ trace_rdev_return_int(&rdev->wiphy, ret);
+
+ return ret;
+}
+
+static inline void
+rdev_del_intf_link(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev,
+ unsigned int link_id)
+{
+ trace_rdev_del_intf_link(&rdev->wiphy, wdev, link_id);
+ if (rdev->ops->add_intf_link)
+ rdev->ops->add_intf_link(&rdev->wiphy, wdev, link_id);
+ trace_rdev_return_void(&rdev->wiphy);
+}
+
#endif /* __CFG80211_RDEV_OPS */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 58e83ce642ad..c7383ede794f 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -5,7 +5,7 @@
* Copyright 2008-2011 Luis R. Rodriguez <mcgrof@qca.qualcomm.com>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2021 Intel Corporation
+ * Copyright (C) 2018 - 2022 Intel Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -2370,6 +2370,7 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
enum nl80211_iftype iftype;
bool ret;
+ int link;
wdev_lock(wdev);
iftype = wdev->iftype;
@@ -2378,62 +2379,83 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
if (!wdev->netdev || !netif_running(wdev->netdev))
goto wdev_inactive_unlock;
- switch (iftype) {
- case NL80211_IFTYPE_AP:
- case NL80211_IFTYPE_P2P_GO:
- case NL80211_IFTYPE_MESH_POINT:
- if (!wdev->beacon_interval)
- goto wdev_inactive_unlock;
- chandef = wdev->chandef;
- break;
- case NL80211_IFTYPE_ADHOC:
- if (!wdev->ssid_len)
- goto wdev_inactive_unlock;
- chandef = wdev->chandef;
- break;
- case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_P2P_CLIENT:
- if (!wdev->current_bss ||
- !wdev->current_bss->pub.channel)
- goto wdev_inactive_unlock;
-
- if (!rdev->ops->get_channel ||
- rdev_get_channel(rdev, wdev, &chandef))
- cfg80211_chandef_create(&chandef,
- wdev->current_bss->pub.channel,
- NL80211_CHAN_NO_HT);
- break;
- case NL80211_IFTYPE_MONITOR:
- case NL80211_IFTYPE_AP_VLAN:
- case NL80211_IFTYPE_P2P_DEVICE:
- /* no enforcement required */
- break;
- default:
- /* others not implemented for now */
- WARN_ON(1);
- break;
- }
+ for (link = 0; link < ARRAY_SIZE(wdev->links); link++) {
+ struct ieee80211_channel *chan;
- wdev_unlock(wdev);
+ if (!wdev->valid_links && link > 0)
+ break;
+ if (!(wdev->valid_links & BIT(link)))
+ continue;
+ switch (iftype) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ case NL80211_IFTYPE_MESH_POINT:
+ if (!wdev->u.mesh.beacon_interval)
+ continue;
+ chandef = wdev->u.mesh.chandef;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ if (!wdev->u.ibss.ssid_len)
+ continue;
+ chandef = wdev->u.ibss.chandef;
+ break;
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ /* Maybe we could consider disabling that link only? */
+ if (!wdev->links[link].client.current_bss)
+ continue;
- switch (iftype) {
- case NL80211_IFTYPE_AP:
- case NL80211_IFTYPE_P2P_GO:
- case NL80211_IFTYPE_ADHOC:
- case NL80211_IFTYPE_MESH_POINT:
- wiphy_lock(wiphy);
- ret = cfg80211_reg_can_beacon_relax(wiphy, &chandef, iftype);
- wiphy_unlock(wiphy);
+ chan = wdev->links[link].client.current_bss->pub.channel;
+ if (!chan)
+ continue;
- return ret;
- case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_P2P_CLIENT:
- return cfg80211_chandef_usable(wiphy, &chandef,
- IEEE80211_CHAN_DISABLED);
- default:
- break;
+ if (!rdev->ops->get_channel ||
+ rdev_get_channel(rdev, wdev, link, &chandef))
+ cfg80211_chandef_create(&chandef, chan,
+ NL80211_CHAN_NO_HT);
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_P2P_DEVICE:
+ /* no enforcement required */
+ break;
+ default:
+ /* others not implemented for now */
+ WARN_ON(1);
+ break;
+ }
+
+ wdev_unlock(wdev);
+
+ switch (iftype) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_MESH_POINT:
+ wiphy_lock(wiphy);
+ ret = cfg80211_reg_can_beacon_relax(wiphy, &chandef,
+ iftype);
+ wiphy_unlock(wiphy);
+
+ if (!ret)
+ return ret;
+ break;
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ ret = cfg80211_chandef_usable(wiphy, &chandef,
+ IEEE80211_CHAN_DISABLED);
+ if (!ret)
+ return ret;
+ break;
+ default:
+ break;
+ }
+
+ wdev_lock(wdev);
}
+ wdev_unlock(wdev);
+
return true;
wdev_inactive_unlock:
@@ -4215,8 +4237,17 @@ static void cfg80211_check_and_end_cac(struct cfg80211_registered_device *rdev)
* In both cases we should end the CAC on the wdev.
*/
list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
- if (wdev->cac_started &&
- !cfg80211_chandef_dfs_usable(&rdev->wiphy, &wdev->chandef))
+ struct cfg80211_chan_def *chandef;
+
+ if (!wdev->cac_started)
+ continue;
+
+ /* FIXME: radar detection is tied to link 0 for now */
+ chandef = wdev_chandef(wdev, 0);
+ if (!chandef)
+ continue;
+
+ if (!cfg80211_chandef_dfs_usable(&rdev->wiphy, chandef))
rdev_end_cac(rdev, wdev->netdev);
}
}
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 6d82bd9eaf8c..0134e5d5c81a 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -5,7 +5,7 @@
* Copyright 2008 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2016 Intel Deutschland GmbH
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -2617,7 +2617,8 @@ void cfg80211_bss_iter(struct wiphy *wiphy,
spin_lock_bh(&rdev->bss_lock);
list_for_each_entry(bss, &rdev->bss_list, list) {
- if (!chandef || cfg80211_is_sub_chan(chandef, bss->pub.channel))
+ if (!chandef || cfg80211_is_sub_chan(chandef, bss->pub.channel,
+ false))
iter(wiphy, &bss->pub, iter_data);
}
@@ -2626,11 +2627,12 @@ void cfg80211_bss_iter(struct wiphy *wiphy,
EXPORT_SYMBOL(cfg80211_bss_iter);
void cfg80211_update_assoc_bss_entry(struct wireless_dev *wdev,
+ unsigned int link_id,
struct ieee80211_channel *chan)
{
struct wiphy *wiphy = wdev->wiphy;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
- struct cfg80211_internal_bss *cbss = wdev->current_bss;
+ struct cfg80211_internal_bss *cbss = wdev->links[link_id].client.current_bss;
struct cfg80211_internal_bss *new = NULL;
struct cfg80211_internal_bss *bss;
struct cfg80211_bss *nontrans_bss;
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 607a68911047..22996d63c15f 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -5,7 +5,7 @@
* (for nl80211's connect() and wext)
*
* Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
- * Copyright (C) 2009, 2020 Intel Corporation. All rights reserved.
+ * Copyright (C) 2009, 2020, 2022 Intel Corporation. All rights reserved.
* Copyright 2017 Intel Deutschland GmbH
*/
@@ -147,6 +147,7 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev,
{
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
struct cfg80211_connect_params *params;
+ struct cfg80211_auth_request auth_req = {};
struct cfg80211_assoc_request req = {};
int err;
@@ -167,13 +168,19 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev,
if (WARN_ON(!rdev->ops->auth))
return -EOPNOTSUPP;
wdev->conn->state = CFG80211_CONN_AUTHENTICATING;
- return cfg80211_mlme_auth(rdev, wdev->netdev,
- params->channel, params->auth_type,
- params->bssid,
- params->ssid, params->ssid_len,
- NULL, 0,
- params->key, params->key_len,
- params->key_idx, NULL, 0);
+ auth_req.key = params->key;
+ auth_req.key_len = params->key_len;
+ auth_req.key_idx = params->key_idx;
+ auth_req.auth_type = params->auth_type;
+ auth_req.bss = cfg80211_get_bss(&rdev->wiphy, params->channel,
+ params->bssid,
+ params->ssid, params->ssid_len,
+ IEEE80211_BSS_TYPE_ESS,
+ IEEE80211_PRIVACY_ANY);
+ auth_req.link_id = -1;
+ err = cfg80211_mlme_auth(rdev, wdev->netdev, &auth_req);
+ cfg80211_put_bss(&rdev->wiphy, auth_req.bss);
+ return err;
case CFG80211_CONN_AUTH_FAILED_TIMEOUT:
*treason = NL80211_TIMEOUT_AUTH;
return -ENOTCONN;
@@ -192,10 +199,20 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev,
req.ht_capa_mask = params->ht_capa_mask;
req.vht_capa = params->vht_capa;
req.vht_capa_mask = params->vht_capa_mask;
+ req.link_id = -1;
+
+ req.bss = cfg80211_get_bss(&rdev->wiphy, params->channel,
+ params->bssid,
+ params->ssid, params->ssid_len,
+ IEEE80211_BSS_TYPE_ESS,
+ IEEE80211_PRIVACY_ANY);
+ if (!req.bss) {
+ err = -ENOENT;
+ } else {
+ err = cfg80211_mlme_assoc(rdev, wdev->netdev, &req);
+ cfg80211_put_bss(&rdev->wiphy, req.bss);
+ }
- err = cfg80211_mlme_assoc(rdev, wdev->netdev, params->channel,
- params->bssid, params->ssid,
- params->ssid_len, &req);
if (err)
cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid,
NULL, 0,
@@ -258,7 +275,7 @@ void cfg80211_conn_work(struct work_struct *work)
memset(&cr, 0, sizeof(cr));
cr.status = -1;
- cr.bssid = bssid;
+ cr.links[0].bssid = bssid;
cr.timeout_reason = treason;
__cfg80211_connect_result(wdev->netdev, &cr, false);
}
@@ -367,7 +384,7 @@ void cfg80211_sme_rx_auth(struct wireless_dev *wdev, const u8 *buf, size_t len)
memset(&cr, 0, sizeof(cr));
cr.status = status_code;
- cr.bssid = mgmt->bssid;
+ cr.links[0].bssid = mgmt->bssid;
cr.timeout_reason = NL80211_TIMEOUT_UNSPECIFIED;
__cfg80211_connect_result(wdev->netdev, &cr, false);
} else if (wdev->conn->state == CFG80211_CONN_AUTHENTICATING) {
@@ -454,6 +471,20 @@ void cfg80211_sme_abandon_assoc(struct wireless_dev *wdev)
schedule_work(&rdev->conn_work);
}
+static void cfg80211_wdev_release_bsses(struct wireless_dev *wdev)
+{
+ unsigned int link;
+
+ for_each_valid_link(wdev, link) {
+ if (!wdev->links[link].client.current_bss)
+ continue;
+ cfg80211_unhold_bss(wdev->links[link].client.current_bss);
+ cfg80211_put_bss(wdev->wiphy,
+ &wdev->links[link].client.current_bss->pub);
+ wdev->links[link].client.current_bss = NULL;
+ }
+}
+
static int cfg80211_sme_get_conn_ies(struct wireless_dev *wdev,
const u8 *ies, size_t ies_len,
const u8 **out_ies, size_t *out_ies_len)
@@ -521,12 +552,11 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
if (!rdev->ops->auth || !rdev->ops->assoc)
return -EOPNOTSUPP;
- if (wdev->current_bss) {
- cfg80211_unhold_bss(wdev->current_bss);
- cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
- wdev->current_bss = NULL;
+ cfg80211_wdev_release_bsses(wdev);
+ if (wdev->connected) {
cfg80211_sme_free(wdev);
+ wdev->connected = false;
}
if (wdev->conn)
@@ -563,8 +593,8 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
wdev->conn->auto_auth = false;
}
- wdev->conn->params.ssid = wdev->ssid;
- wdev->conn->params.ssid_len = wdev->ssid_len;
+ wdev->conn->params.ssid = wdev->u.client.ssid;
+ wdev->conn->params.ssid_len = wdev->u.client.ssid_len;
/* see if we have the bss already */
bss = cfg80211_get_conn_bss(wdev);
@@ -648,7 +678,7 @@ static bool cfg80211_is_all_idle(void)
list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
wdev_lock(wdev);
- if (wdev->conn || wdev->current_bss ||
+ if (wdev->conn || wdev->connected ||
cfg80211_beaconing_iface_active(wdev))
is_all_idle = false;
wdev_unlock(wdev);
@@ -668,6 +698,19 @@ static void disconnect_work(struct work_struct *work)
DECLARE_WORK(cfg80211_disconnect_work, disconnect_work);
+static void
+cfg80211_connect_result_release_bsses(struct wireless_dev *wdev,
+ struct cfg80211_connect_resp_params *cr)
+{
+ unsigned int link;
+
+ for_each_valid_link(cr, link) {
+ if (!cr->links[link].bss)
+ continue;
+ cfg80211_unhold_bss(bss_from_pub(cr->links[link].bss));
+ cfg80211_put_bss(wdev->wiphy, cr->links[link].bss);
+ }
+}
/*
* API calls for drivers implementing connect/disconnect and
@@ -686,21 +729,33 @@ void __cfg80211_connect_result(struct net_device *dev,
#ifdef CONFIG_CFG80211_WEXT
union iwreq_data wrqu;
#endif
+ unsigned int link;
+ const u8 *connected_addr;
+ bool bss_not_found = false;
ASSERT_WDEV_LOCK(wdev);
if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION &&
- wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) {
- cfg80211_put_bss(wdev->wiphy, cr->bss);
- return;
+ wdev->iftype != NL80211_IFTYPE_P2P_CLIENT))
+ goto out;
+
+ if (cr->valid_links) {
+ if (WARN_ON(!cr->ap_mld_addr))
+ goto out;
+
+ for_each_valid_link(cr, link) {
+ if (WARN_ON(!cr->links[link].addr))
+ goto out;
+ }
}
wdev->unprot_beacon_reported = 0;
nl80211_send_connect_result(wiphy_to_rdev(wdev->wiphy), dev, cr,
GFP_KERNEL);
+ connected_addr = cr->valid_links ? cr->ap_mld_addr : cr->links[0].bssid;
#ifdef CONFIG_CFG80211_WEXT
- if (wextev) {
+ if (wextev && !cr->valid_links) {
if (cr->req_ie && cr->status == WLAN_STATUS_SUCCESS) {
memset(&wrqu, 0, sizeof(wrqu));
wrqu.data.length = cr->req_ie_len;
@@ -717,54 +772,81 @@ void __cfg80211_connect_result(struct net_device *dev,
memset(&wrqu, 0, sizeof(wrqu));
wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- if (cr->bssid && cr->status == WLAN_STATUS_SUCCESS) {
- memcpy(wrqu.ap_addr.sa_data, cr->bssid, ETH_ALEN);
- memcpy(wdev->wext.prev_bssid, cr->bssid, ETH_ALEN);
+ if (connected_addr && cr->status == WLAN_STATUS_SUCCESS) {
+ memcpy(wrqu.ap_addr.sa_data, connected_addr, ETH_ALEN);
+ memcpy(wdev->wext.prev_bssid, connected_addr, ETH_ALEN);
wdev->wext.prev_bssid_valid = true;
}
wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
}
#endif
- if (!cr->bss && (cr->status == WLAN_STATUS_SUCCESS)) {
- WARN_ON_ONCE(!wiphy_to_rdev(wdev->wiphy)->ops->connect);
- cr->bss = cfg80211_get_bss(wdev->wiphy, NULL, cr->bssid,
- wdev->ssid, wdev->ssid_len,
- wdev->conn_bss_type,
- IEEE80211_PRIVACY_ANY);
- if (cr->bss)
- cfg80211_hold_bss(bss_from_pub(cr->bss));
- }
+ if (cr->status == WLAN_STATUS_SUCCESS) {
+ for_each_valid_link(cr, link) {
+ if (WARN_ON_ONCE(!cr->links[link].bss))
+ break;
+ }
- if (wdev->current_bss) {
- cfg80211_unhold_bss(wdev->current_bss);
- cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
- wdev->current_bss = NULL;
+ for_each_valid_link(cr, link) {
+ if (cr->links[link].bss)
+ continue;
+
+ cr->links[link].bss =
+ cfg80211_get_bss(wdev->wiphy, NULL,
+ cr->links[link].bssid,
+ wdev->u.client.ssid,
+ wdev->u.client.ssid_len,
+ wdev->conn_bss_type,
+ IEEE80211_PRIVACY_ANY);
+ if (!cr->links[link].bss) {
+ bss_not_found = true;
+ break;
+ }
+ cfg80211_hold_bss(bss_from_pub(cr->links[link].bss));
+ }
}
+ cfg80211_wdev_release_bsses(wdev);
+
if (cr->status != WLAN_STATUS_SUCCESS) {
kfree_sensitive(wdev->connect_keys);
wdev->connect_keys = NULL;
- wdev->ssid_len = 0;
+ wdev->u.client.ssid_len = 0;
wdev->conn_owner_nlportid = 0;
- if (cr->bss) {
- cfg80211_unhold_bss(bss_from_pub(cr->bss));
- cfg80211_put_bss(wdev->wiphy, cr->bss);
- }
+ cfg80211_connect_result_release_bsses(wdev, cr);
cfg80211_sme_free(wdev);
return;
}
- if (WARN_ON(!cr->bss))
+ if (WARN_ON(bss_not_found)) {
+ cfg80211_connect_result_release_bsses(wdev, cr);
return;
+ }
- wdev->current_bss = bss_from_pub(cr->bss);
+ memset(wdev->links, 0, sizeof(wdev->links));
+ wdev->valid_links = cr->valid_links;
+ for_each_valid_link(cr, link)
+ wdev->links[link].client.current_bss =
+ bss_from_pub(cr->links[link].bss);
+ wdev->connected = true;
+ ether_addr_copy(wdev->u.client.connected_addr, connected_addr);
+ if (cr->valid_links) {
+ for_each_valid_link(cr, link)
+ memcpy(wdev->links[link].addr, cr->links[link].addr,
+ ETH_ALEN);
+ }
if (!(wdev->wiphy->flags & WIPHY_FLAG_HAS_STATIC_WEP))
cfg80211_upload_connect_keys(wdev);
rcu_read_lock();
- country_elem = ieee80211_bss_get_elem(cr->bss, WLAN_EID_COUNTRY);
+ for_each_valid_link(cr, link) {
+ country_elem =
+ ieee80211_bss_get_elem(cr->links[link].bss,
+ WLAN_EID_COUNTRY);
+ if (country_elem)
+ break;
+ }
if (!country_elem) {
rcu_read_unlock();
return;
@@ -777,12 +859,60 @@ void __cfg80211_connect_result(struct net_device *dev,
if (!country_data)
return;
- regulatory_hint_country_ie(wdev->wiphy, cr->bss->channel->band,
+ regulatory_hint_country_ie(wdev->wiphy,
+ cr->links[link].bss->channel->band,
country_data, country_datalen);
kfree(country_data);
+
+ return;
+out:
+ for_each_valid_link(cr, link)
+ cfg80211_put_bss(wdev->wiphy, cr->links[link].bss);
}
-/* Consumes bss object one way or another */
+static void cfg80211_update_link_bss(struct wireless_dev *wdev,
+ struct cfg80211_bss **bss)
+{
+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
+ struct cfg80211_internal_bss *ibss;
+
+ if (!*bss)
+ return;
+
+ ibss = bss_from_pub(*bss);
+ if (list_empty(&ibss->list)) {
+ struct cfg80211_bss *found = NULL, *tmp = *bss;
+
+ found = cfg80211_get_bss(wdev->wiphy, NULL,
+ (*bss)->bssid,
+ wdev->u.client.ssid,
+ wdev->u.client.ssid_len,
+ wdev->conn_bss_type,
+ IEEE80211_PRIVACY_ANY);
+ if (found) {
+ /* The same BSS is already updated so use it
+ * instead, as it has latest info.
+ */
+ *bss = found;
+ } else {
+ /* Update with BSS provided by driver, it will
+ * be freshly added and ref cnted, we can free
+ * the old one.
+ *
+ * signal_valid can be false, as we are not
+ * expecting the BSS to be found.
+ *
+ * keep the old timestamp to avoid confusion
+ */
+ cfg80211_bss_update(rdev, ibss, false,
+ ibss->ts);
+ }
+
+ cfg80211_put_bss(wdev->wiphy, tmp);
+ }
+}
+
+/* Consumes bss object(s) one way or another */
void cfg80211_connect_done(struct net_device *dev,
struct cfg80211_connect_resp_params *params,
gfp_t gfp)
@@ -792,55 +922,34 @@ void cfg80211_connect_done(struct net_device *dev,
struct cfg80211_event *ev;
unsigned long flags;
u8 *next;
+ size_t link_info_size = 0;
+ unsigned int link;
- if (params->bss) {
- struct cfg80211_internal_bss *ibss = bss_from_pub(params->bss);
-
- if (list_empty(&ibss->list)) {
- struct cfg80211_bss *found = NULL, *tmp = params->bss;
-
- found = cfg80211_get_bss(wdev->wiphy, NULL,
- params->bss->bssid,
- wdev->ssid, wdev->ssid_len,
- wdev->conn_bss_type,
- IEEE80211_PRIVACY_ANY);
- if (found) {
- /* The same BSS is already updated so use it
- * instead, as it has latest info.
- */
- params->bss = found;
- } else {
- /* Update with BSS provided by driver, it will
- * be freshly added and ref cnted, we can free
- * the old one.
- *
- * signal_valid can be false, as we are not
- * expecting the BSS to be found.
- *
- * keep the old timestamp to avoid confusion
- */
- cfg80211_bss_update(rdev, ibss, false,
- ibss->ts);
- }
-
- cfg80211_put_bss(wdev->wiphy, tmp);
- }
+ for_each_valid_link(params, link) {
+ cfg80211_update_link_bss(wdev, &params->links[link].bss);
+ link_info_size += params->links[link].bssid ? ETH_ALEN : 0;
+ link_info_size += params->links[link].addr ? ETH_ALEN : 0;
}
- ev = kzalloc(sizeof(*ev) + (params->bssid ? ETH_ALEN : 0) +
+ ev = kzalloc(sizeof(*ev) + (params->ap_mld_addr ? ETH_ALEN : 0) +
params->req_ie_len + params->resp_ie_len +
params->fils.kek_len + params->fils.pmk_len +
- (params->fils.pmkid ? WLAN_PMKID_LEN : 0), gfp);
+ (params->fils.pmkid ? WLAN_PMKID_LEN : 0) + link_info_size,
+ gfp);
+
if (!ev) {
- cfg80211_put_bss(wdev->wiphy, params->bss);
+ for_each_valid_link(params, link)
+ cfg80211_put_bss(wdev->wiphy,
+ params->links[link].bss);
return;
}
ev->type = EVENT_CONNECT_RESULT;
next = ((u8 *)ev) + sizeof(*ev);
- if (params->bssid) {
- ev->cr.bssid = next;
- memcpy((void *)ev->cr.bssid, params->bssid, ETH_ALEN);
+ if (params->ap_mld_addr) {
+ ev->cr.ap_mld_addr = next;
+ memcpy((void *)ev->cr.ap_mld_addr, params->ap_mld_addr,
+ ETH_ALEN);
next += ETH_ALEN;
}
if (params->req_ie_len) {
@@ -880,9 +989,28 @@ void cfg80211_connect_done(struct net_device *dev,
ev->cr.fils.update_erp_next_seq_num = params->fils.update_erp_next_seq_num;
if (params->fils.update_erp_next_seq_num)
ev->cr.fils.erp_next_seq_num = params->fils.erp_next_seq_num;
- if (params->bss)
- cfg80211_hold_bss(bss_from_pub(params->bss));
- ev->cr.bss = params->bss;
+ ev->cr.valid_links = params->valid_links;
+ for_each_valid_link(params, link) {
+ if (params->links[link].bss)
+ cfg80211_hold_bss(
+ bss_from_pub(params->links[link].bss));
+ ev->cr.links[link].bss = params->links[link].bss;
+
+ if (params->links[link].addr) {
+ ev->cr.links[link].addr = next;
+ memcpy((void *)ev->cr.links[link].addr,
+ params->links[link].addr,
+ ETH_ALEN);
+ next += ETH_ALEN;
+ }
+ if (params->links[link].bssid) {
+ ev->cr.links[link].bssid = next;
+ memcpy((void *)ev->cr.links[link].bssid,
+ params->links[link].bssid,
+ ETH_ALEN);
+ next += ETH_ALEN;
+ }
+ }
ev->cr.status = params->status;
ev->cr.timeout_reason = params->timeout_reason;
@@ -900,58 +1028,88 @@ void __cfg80211_roamed(struct wireless_dev *wdev,
#ifdef CONFIG_CFG80211_WEXT
union iwreq_data wrqu;
#endif
+ unsigned int link;
+ const u8 *connected_addr;
+
ASSERT_WDEV_LOCK(wdev);
if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION &&
wdev->iftype != NL80211_IFTYPE_P2P_CLIENT))
goto out;
- if (WARN_ON(!wdev->current_bss))
+ if (WARN_ON(!wdev->connected))
goto out;
- cfg80211_unhold_bss(wdev->current_bss);
- cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
- wdev->current_bss = NULL;
+ if (info->valid_links) {
+ if (WARN_ON(!info->ap_mld_addr))
+ goto out;
- if (WARN_ON(!info->bss))
- return;
+ for_each_valid_link(info, link) {
+ if (WARN_ON(!info->links[link].addr))
+ goto out;
+ }
+ }
- cfg80211_hold_bss(bss_from_pub(info->bss));
- wdev->current_bss = bss_from_pub(info->bss);
+ cfg80211_wdev_release_bsses(wdev);
+ for_each_valid_link(info, link) {
+ if (WARN_ON(!info->links[link].bss))
+ goto out;
+ }
+
+ memset(wdev->links, 0, sizeof(wdev->links));
+ wdev->valid_links = info->valid_links;
+ for_each_valid_link(info, link) {
+ cfg80211_hold_bss(bss_from_pub(info->links[link].bss));
+ wdev->links[link].client.current_bss =
+ bss_from_pub(info->links[link].bss);
+ }
+
+ connected_addr = info->valid_links ?
+ info->ap_mld_addr :
+ info->links[0].bss->bssid;
+ ether_addr_copy(wdev->u.client.connected_addr, connected_addr);
+ if (info->valid_links) {
+ for_each_valid_link(info, link)
+ memcpy(wdev->links[link].addr, info->links[link].addr,
+ ETH_ALEN);
+ }
wdev->unprot_beacon_reported = 0;
nl80211_send_roamed(wiphy_to_rdev(wdev->wiphy),
wdev->netdev, info, GFP_KERNEL);
#ifdef CONFIG_CFG80211_WEXT
- if (info->req_ie) {
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = info->req_ie_len;
- wireless_send_event(wdev->netdev, IWEVASSOCREQIE,
- &wrqu, info->req_ie);
- }
+ if (!info->valid_links) {
+ if (info->req_ie) {
+ memset(&wrqu, 0, sizeof(wrqu));
+ wrqu.data.length = info->req_ie_len;
+ wireless_send_event(wdev->netdev, IWEVASSOCREQIE,
+ &wrqu, info->req_ie);
+ }
+
+ if (info->resp_ie) {
+ memset(&wrqu, 0, sizeof(wrqu));
+ wrqu.data.length = info->resp_ie_len;
+ wireless_send_event(wdev->netdev, IWEVASSOCRESPIE,
+ &wrqu, info->resp_ie);
+ }
- if (info->resp_ie) {
memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = info->resp_ie_len;
- wireless_send_event(wdev->netdev, IWEVASSOCRESPIE,
- &wrqu, info->resp_ie);
+ wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+ memcpy(wrqu.ap_addr.sa_data, connected_addr, ETH_ALEN);
+ memcpy(wdev->wext.prev_bssid, connected_addr, ETH_ALEN);
+ wdev->wext.prev_bssid_valid = true;
+ wireless_send_event(wdev->netdev, SIOCGIWAP, &wrqu, NULL);
}
-
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- memcpy(wrqu.ap_addr.sa_data, info->bss->bssid, ETH_ALEN);
- memcpy(wdev->wext.prev_bssid, info->bss->bssid, ETH_ALEN);
- wdev->wext.prev_bssid_valid = true;
- wireless_send_event(wdev->netdev, SIOCGIWAP, &wrqu, NULL);
#endif
return;
out:
- cfg80211_put_bss(wdev->wiphy, info->bss);
+ for_each_valid_link(info, link)
+ cfg80211_put_bss(wdev->wiphy, info->links[link].bss);
}
-/* Consumes info->bss object one way or another */
+/* Consumes info->links.bss object(s) one way or another */
void cfg80211_roamed(struct net_device *dev, struct cfg80211_roam_info *info,
gfp_t gfp)
{
@@ -960,25 +1118,41 @@ void cfg80211_roamed(struct net_device *dev, struct cfg80211_roam_info *info,
struct cfg80211_event *ev;
unsigned long flags;
u8 *next;
+ unsigned int link;
+ size_t link_info_size = 0;
+ bool bss_not_found = false;
- if (!info->bss) {
- info->bss = cfg80211_get_bss(wdev->wiphy, info->channel,
- info->bssid, wdev->ssid,
- wdev->ssid_len,
- wdev->conn_bss_type,
- IEEE80211_PRIVACY_ANY);
+ for_each_valid_link(info, link) {
+ link_info_size += info->links[link].addr ? ETH_ALEN : 0;
+ link_info_size += info->links[link].bssid ? ETH_ALEN : 0;
+
+ if (info->links[link].bss)
+ continue;
+
+ info->links[link].bss =
+ cfg80211_get_bss(wdev->wiphy,
+ info->links[link].channel,
+ info->links[link].bssid,
+ wdev->u.client.ssid,
+ wdev->u.client.ssid_len,
+ wdev->conn_bss_type,
+ IEEE80211_PRIVACY_ANY);
+
+ if (!info->links[link].bss) {
+ bss_not_found = true;
+ break;
+ }
}
- if (WARN_ON(!info->bss))
- return;
+ if (WARN_ON(bss_not_found))
+ goto out;
ev = kzalloc(sizeof(*ev) + info->req_ie_len + info->resp_ie_len +
info->fils.kek_len + info->fils.pmk_len +
- (info->fils.pmkid ? WLAN_PMKID_LEN : 0), gfp);
- if (!ev) {
- cfg80211_put_bss(wdev->wiphy, info->bss);
- return;
- }
+ (info->fils.pmkid ? WLAN_PMKID_LEN : 0) +
+ (info->ap_mld_addr ? ETH_ALEN : 0) + link_info_size, gfp);
+ if (!ev)
+ goto out;
ev->type = EVENT_ROAMED;
next = ((u8 *)ev) + sizeof(*ev);
@@ -1018,12 +1192,43 @@ void cfg80211_roamed(struct net_device *dev, struct cfg80211_roam_info *info,
ev->rm.fils.update_erp_next_seq_num = info->fils.update_erp_next_seq_num;
if (info->fils.update_erp_next_seq_num)
ev->rm.fils.erp_next_seq_num = info->fils.erp_next_seq_num;
- ev->rm.bss = info->bss;
+ if (info->ap_mld_addr) {
+ ev->rm.ap_mld_addr = next;
+ memcpy((void *)ev->rm.ap_mld_addr, info->ap_mld_addr,
+ ETH_ALEN);
+ next += ETH_ALEN;
+ }
+ ev->rm.valid_links = info->valid_links;
+ for_each_valid_link(info, link) {
+ ev->rm.links[link].bss = info->links[link].bss;
+
+ if (info->links[link].addr) {
+ ev->rm.links[link].addr = next;
+ memcpy((void *)ev->rm.links[link].addr,
+ info->links[link].addr,
+ ETH_ALEN);
+ next += ETH_ALEN;
+ }
+
+ if (info->links[link].bssid) {
+ ev->rm.links[link].bssid = next;
+ memcpy((void *)ev->rm.links[link].bssid,
+ info->links[link].bssid,
+ ETH_ALEN);
+ next += ETH_ALEN;
+ }
+ }
spin_lock_irqsave(&wdev->event_lock, flags);
list_add_tail(&ev->list, &wdev->event_list);
spin_unlock_irqrestore(&wdev->event_lock, flags);
queue_work(cfg80211_wq, &rdev->event_work);
+
+ return;
+out:
+ for_each_valid_link(info, link)
+ cfg80211_put_bss(wdev->wiphy, info->links[link].bss);
+
}
EXPORT_SYMBOL(cfg80211_roamed);
@@ -1035,8 +1240,8 @@ void __cfg80211_port_authorized(struct wireless_dev *wdev, const u8 *bssid)
wdev->iftype != NL80211_IFTYPE_P2P_CLIENT))
return;
- if (WARN_ON(!wdev->current_bss) ||
- WARN_ON(!ether_addr_equal(wdev->current_bss->pub.bssid, bssid)))
+ if (WARN_ON(!wdev->connected) ||
+ WARN_ON(!ether_addr_equal(wdev->u.client.connected_addr, bssid)))
return;
nl80211_send_port_authorized(wiphy_to_rdev(wdev->wiphy), wdev->netdev,
@@ -1088,13 +1293,9 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
wdev->iftype != NL80211_IFTYPE_P2P_CLIENT))
return;
- if (wdev->current_bss) {
- cfg80211_unhold_bss(wdev->current_bss);
- cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
- }
-
- wdev->current_bss = NULL;
- wdev->ssid_len = 0;
+ cfg80211_wdev_release_bsses(wdev);
+ wdev->connected = false;
+ wdev->u.client.ssid_len = 0;
wdev->conn_owner_nlportid = 0;
kfree_sensitive(wdev->connect_keys);
wdev->connect_keys = NULL;
@@ -1183,19 +1384,20 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
* already connected, so reject a new SSID unless it's the
* same (which is the case for re-association.)
*/
- if (wdev->ssid_len &&
- (wdev->ssid_len != connect->ssid_len ||
- memcmp(wdev->ssid, connect->ssid, wdev->ssid_len)))
+ if (wdev->u.client.ssid_len &&
+ (wdev->u.client.ssid_len != connect->ssid_len ||
+ memcmp(wdev->u.client.ssid, connect->ssid, wdev->u.client.ssid_len)))
return -EALREADY;
/*
* If connected, reject (re-)association unless prev_bssid
* matches the current BSSID.
*/
- if (wdev->current_bss) {
+ if (wdev->connected) {
if (!prev_bssid)
return -EALREADY;
- if (!ether_addr_equal(prev_bssid, wdev->current_bss->pub.bssid))
+ if (!ether_addr_equal(prev_bssid,
+ wdev->u.client.connected_addr))
return -ENOTCONN;
}
@@ -1246,8 +1448,8 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
}
wdev->connect_keys = connkeys;
- memcpy(wdev->ssid, connect->ssid, connect->ssid_len);
- wdev->ssid_len = connect->ssid_len;
+ memcpy(wdev->u.client.ssid, connect->ssid, connect->ssid_len);
+ wdev->u.client.ssid_len = connect->ssid_len;
wdev->conn_bss_type = connect->pbss ? IEEE80211_BSS_TYPE_PBSS :
IEEE80211_BSS_TYPE_ESS;
@@ -1263,8 +1465,8 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
* This could be reassoc getting refused, don't clear
* ssid_len in that case.
*/
- if (!wdev->current_bss)
- wdev->ssid_len = 0;
+ if (!wdev->connected)
+ wdev->u.client.ssid_len = 0;
return err;
}
@@ -1288,7 +1490,7 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
err = cfg80211_sme_disconnect(wdev, reason);
else if (!rdev->ops->disconnect)
cfg80211_mlme_down(rdev, dev);
- else if (wdev->ssid_len)
+ else if (wdev->u.client.ssid_len)
err = rdev_disconnect(rdev, dev, reason);
/*
@@ -1296,8 +1498,8 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
* in which case cfg80211_disconnected() will take care of
* this later.
*/
- if (!wdev->current_bss)
- wdev->ssid_len = 0;
+ if (!wdev->connected)
+ wdev->u.client.ssid_len = 0;
return err;
}
@@ -1321,7 +1523,7 @@ void cfg80211_autodisconnect_wk(struct work_struct *work)
break;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
- __cfg80211_stop_ap(rdev, wdev->netdev, false);
+ __cfg80211_stop_ap(rdev, wdev->netdev, -1, false);
break;
case NL80211_IFTYPE_MESH_POINT:
__cfg80211_leave_mesh(rdev, wdev->netdev);
@@ -1333,7 +1535,7 @@ void cfg80211_autodisconnect_wk(struct work_struct *work)
* ops->disconnect not implemented. Otherwise we can
* use cfg80211_disconnect.
*/
- if (rdev->ops->disconnect || wdev->current_bss)
+ if (rdev->ops->disconnect || wdev->connected)
cfg80211_disconnect(rdev, wdev->netdev,
WLAN_REASON_DEAUTH_LEAVING,
true);
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 228079d7690a..65f8b814ecd0 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -569,6 +569,7 @@ TRACE_EVENT(rdev_start_ap,
__field(bool, privacy)
__field(enum nl80211_auth_type, auth_type)
__field(int, inactivity_timeout)
+ __field(unsigned int, link_id)
),
TP_fast_assign(
WIPHY_ASSIGN;
@@ -583,16 +584,17 @@ TRACE_EVENT(rdev_start_ap,
__entry->inactivity_timeout = settings->inactivity_timeout;
memset(__entry->ssid, 0, IEEE80211_MAX_SSID_LEN + 1);
memcpy(__entry->ssid, settings->ssid, settings->ssid_len);
+ __entry->link_id = settings->beacon.link_id;
),
TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", AP settings - ssid: %s, "
CHAN_DEF_PR_FMT ", beacon interval: %d, dtim period: %d, "
"hidden ssid: %d, wpa versions: %u, privacy: %s, "
- "auth type: %d, inactivity timeout: %d",
+ "auth type: %d, inactivity timeout: %d, link_id: %d",
WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->ssid, CHAN_DEF_PR_ARG,
__entry->beacon_interval, __entry->dtim_period,
__entry->hidden_ssid, __entry->wpa_ver,
BOOL_TO_STR(__entry->privacy), __entry->auth_type,
- __entry->inactivity_timeout)
+ __entry->inactivity_timeout, __entry->link_id)
);
TRACE_EVENT(rdev_change_beacon,
@@ -602,6 +604,7 @@ TRACE_EVENT(rdev_change_beacon,
TP_STRUCT__entry(
WIPHY_ENTRY
NETDEV_ENTRY
+ __field(int, link_id)
__dynamic_array(u8, head, info ? info->head_len : 0)
__dynamic_array(u8, tail, info ? info->tail_len : 0)
__dynamic_array(u8, beacon_ies, info ? info->beacon_ies_len : 0)
@@ -615,6 +618,7 @@ TRACE_EVENT(rdev_change_beacon,
WIPHY_ASSIGN;
NETDEV_ASSIGN;
if (info) {
+ __entry->link_id = info->link_id;
if (info->head)
memcpy(__get_dynamic_array(head), info->head,
info->head_len);
@@ -635,9 +639,30 @@ TRACE_EVENT(rdev_change_beacon,
if (info->probe_resp)
memcpy(__get_dynamic_array(probe_resp),
info->probe_resp, info->probe_resp_len);
+ } else {
+ __entry->link_id = -1;
}
),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG)
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", link_id:%d",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->link_id)
+);
+
+TRACE_EVENT(rdev_stop_ap,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ unsigned int link_id),
+ TP_ARGS(wiphy, netdev, link_id),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ __field(unsigned int, link_id)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ __entry->link_id = link_id;
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", link_id: %d",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->link_id)
);
DECLARE_EVENT_CLASS(wiphy_netdev_evt,
@@ -654,11 +679,6 @@ DECLARE_EVENT_CLASS(wiphy_netdev_evt,
TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG)
);
-DEFINE_EVENT(wiphy_netdev_evt, rdev_stop_ap,
- TP_PROTO(struct wiphy *wiphy, struct net_device *netdev),
- TP_ARGS(wiphy, netdev)
-);
-
DEFINE_EVENT(wiphy_netdev_evt, rdev_set_rekey_data,
TP_PROTO(struct wiphy *wiphy, struct net_device *netdev),
TP_ARGS(wiphy, netdev)
@@ -1619,20 +1639,24 @@ TRACE_EVENT(rdev_testmode_dump,
TRACE_EVENT(rdev_set_bitrate_mask,
TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ unsigned int link_id,
const u8 *peer, const struct cfg80211_bitrate_mask *mask),
- TP_ARGS(wiphy, netdev, peer, mask),
+ TP_ARGS(wiphy, netdev, link_id, peer, mask),
TP_STRUCT__entry(
WIPHY_ENTRY
NETDEV_ENTRY
+ __field(unsigned int, link_id)
MAC_ENTRY(peer)
),
TP_fast_assign(
WIPHY_ASSIGN;
NETDEV_ASSIGN;
+ __entry->link_id = link_id;
MAC_ASSIGN(peer, peer);
),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: " MAC_PR_FMT,
- WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer))
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", link_id: %d, peer: " MAC_PR_FMT,
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->link_id,
+ MAC_PR_ARG(peer))
);
TRACE_EVENT(rdev_update_mgmt_frame_registrations,
@@ -2040,9 +2064,28 @@ TRACE_EVENT(rdev_set_noack_map,
WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->noack_map)
);
-DEFINE_EVENT(wiphy_wdev_evt, rdev_get_channel,
- TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
- TP_ARGS(wiphy, wdev)
+DECLARE_EVENT_CLASS(wiphy_wdev_link_evt,
+ TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev,
+ unsigned int link_id),
+ TP_ARGS(wiphy, wdev, link_id),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ WDEV_ENTRY
+ __field(unsigned int, link_id)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ WDEV_ASSIGN;
+ __entry->link_id = link_id;
+ ),
+ TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", link_id: %u",
+ WIPHY_PR_ARG, WDEV_PR_ARG, __entry->link_id)
+);
+
+DEFINE_EVENT(wiphy_wdev_link_evt, rdev_get_channel,
+ TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev,
+ unsigned int link_id),
+ TP_ARGS(wiphy, wdev, link_id)
);
TRACE_EVENT(rdev_return_chandef,
@@ -2296,20 +2339,24 @@ TRACE_EVENT(rdev_set_qos_map,
TRACE_EVENT(rdev_set_ap_chanwidth,
TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ unsigned int link_id,
struct cfg80211_chan_def *chandef),
- TP_ARGS(wiphy, netdev, chandef),
+ TP_ARGS(wiphy, netdev, link_id, chandef),
TP_STRUCT__entry(
WIPHY_ENTRY
NETDEV_ENTRY
CHAN_DEF_ENTRY
+ __field(unsigned int, link_id)
),
TP_fast_assign(
WIPHY_ASSIGN;
NETDEV_ASSIGN;
CHAN_DEF_ASSIGN(chandef);
+ __entry->link_id = link_id;
),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT,
- WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG)
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT ", link:%d",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG,
+ __entry->link_id)
);
TRACE_EVENT(rdev_add_tx_ts,
@@ -2645,6 +2692,155 @@ TRACE_EVENT(rdev_set_fils_aad,
__entry->kek_len)
);
+TRACE_EVENT(rdev_update_owe_info,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_update_owe_info *owe_info),
+ TP_ARGS(wiphy, netdev, owe_info),
+ TP_STRUCT__entry(WIPHY_ENTRY
+ NETDEV_ENTRY
+ MAC_ENTRY(peer)
+ __field(u16, status)
+ __dynamic_array(u8, ie, owe_info->ie_len)),
+ TP_fast_assign(WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ MAC_ASSIGN(peer, owe_info->peer);
+ __entry->status = owe_info->status;
+ memcpy(__get_dynamic_array(ie),
+ owe_info->ie, owe_info->ie_len);),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: " MAC_PR_FMT
+ " status %d", WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer),
+ __entry->status)
+);
+
+TRACE_EVENT(rdev_probe_mesh_link,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ const u8 *dest, const u8 *buf, size_t len),
+ TP_ARGS(wiphy, netdev, dest, buf, len),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ MAC_ENTRY(dest)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ MAC_ASSIGN(dest, dest);
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT,
+ WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(dest))
+);
+
+TRACE_EVENT(rdev_set_tid_config,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_tid_config *tid_conf),
+ TP_ARGS(wiphy, netdev, tid_conf),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ MAC_ENTRY(peer)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ MAC_ASSIGN(peer, tid_conf->peer);
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: " MAC_PR_FMT,
+ WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer))
+);
+
+TRACE_EVENT(rdev_reset_tid_config,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ const u8 *peer, u8 tids),
+ TP_ARGS(wiphy, netdev, peer, tids),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ MAC_ENTRY(peer)
+ __field(u8, tids)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ MAC_ASSIGN(peer, peer);
+ __entry->tids = tids;
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: " MAC_PR_FMT ", tids: 0x%x",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer), __entry->tids)
+);
+
+TRACE_EVENT(rdev_set_sar_specs,
+ TP_PROTO(struct wiphy *wiphy, struct cfg80211_sar_specs *sar),
+ TP_ARGS(wiphy, sar),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ __field(u16, type)
+ __field(u16, num)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ __entry->type = sar->type;
+ __entry->num = sar->num_sub_specs;
+
+ ),
+ TP_printk(WIPHY_PR_FMT ", Set type:%d, num_specs:%d",
+ WIPHY_PR_ARG, __entry->type, __entry->num)
+);
+
+TRACE_EVENT(rdev_color_change,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_color_change_settings *params),
+ TP_ARGS(wiphy, netdev, params),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ __field(u8, count)
+ __field(u16, bcn_ofs)
+ __field(u16, pres_ofs)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ __entry->count = params->count;
+ __entry->bcn_ofs = params->counter_offset_beacon;
+ __entry->pres_ofs = params->counter_offset_presp;
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT
+ ", count: %u",
+ WIPHY_PR_ARG, NETDEV_PR_ARG,
+ __entry->count)
+);
+
+TRACE_EVENT(rdev_set_radar_background,
+ TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef),
+
+ TP_ARGS(wiphy, chandef),
+
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ CHAN_DEF_ENTRY
+ ),
+
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ CHAN_DEF_ASSIGN(chandef)
+ ),
+
+ TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT,
+ WIPHY_PR_ARG, CHAN_DEF_PR_ARG)
+);
+
+DEFINE_EVENT(wiphy_wdev_link_evt, rdev_add_intf_link,
+ TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev,
+ unsigned int link_id),
+ TP_ARGS(wiphy, wdev, link_id)
+);
+
+DEFINE_EVENT(wiphy_wdev_link_evt, rdev_del_intf_link,
+ TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev,
+ unsigned int link_id),
+ TP_ARGS(wiphy, wdev, link_id)
+);
+
/*************************************************************
* cfg80211 exported functions traces *
*************************************************************/
@@ -3022,18 +3218,21 @@ TRACE_EVENT(cfg80211_chandef_dfs_required,
TRACE_EVENT(cfg80211_ch_switch_notify,
TP_PROTO(struct net_device *netdev,
- struct cfg80211_chan_def *chandef),
- TP_ARGS(netdev, chandef),
+ struct cfg80211_chan_def *chandef,
+ unsigned int link_id),
+ TP_ARGS(netdev, chandef, link_id),
TP_STRUCT__entry(
NETDEV_ENTRY
CHAN_DEF_ENTRY
+ __field(unsigned int, link_id)
),
TP_fast_assign(
NETDEV_ASSIGN;
CHAN_DEF_ASSIGN(chandef);
+ __entry->link_id = link_id;
),
- TP_printk(NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT,
- NETDEV_PR_ARG, CHAN_DEF_PR_ARG)
+ TP_printk(NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT ", link:%d",
+ NETDEV_PR_ARG, CHAN_DEF_PR_ARG, __entry->link_id)
);
TRACE_EVENT(cfg80211_ch_switch_started_notify,
@@ -3520,26 +3719,6 @@ TRACE_EVENT(cfg80211_pmsr_complete,
(unsigned long long)__entry->cookie)
);
-TRACE_EVENT(rdev_update_owe_info,
- TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
- struct cfg80211_update_owe_info *owe_info),
- TP_ARGS(wiphy, netdev, owe_info),
- TP_STRUCT__entry(WIPHY_ENTRY
- NETDEV_ENTRY
- MAC_ENTRY(peer)
- __field(u16, status)
- __dynamic_array(u8, ie, owe_info->ie_len)),
- TP_fast_assign(WIPHY_ASSIGN;
- NETDEV_ASSIGN;
- MAC_ASSIGN(peer, owe_info->peer);
- __entry->status = owe_info->status;
- memcpy(__get_dynamic_array(ie),
- owe_info->ie, owe_info->ie_len);),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: " MAC_PR_FMT
- " status %d", WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer),
- __entry->status)
-);
-
TRACE_EVENT(cfg80211_update_owe_info_event,
TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
struct cfg80211_update_owe_info *owe_info),
@@ -3557,104 +3736,6 @@ TRACE_EVENT(cfg80211_update_owe_info_event,
WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer))
);
-TRACE_EVENT(rdev_probe_mesh_link,
- TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
- const u8 *dest, const u8 *buf, size_t len),
- TP_ARGS(wiphy, netdev, dest, buf, len),
- TP_STRUCT__entry(
- WIPHY_ENTRY
- NETDEV_ENTRY
- MAC_ENTRY(dest)
- ),
- TP_fast_assign(
- WIPHY_ASSIGN;
- NETDEV_ASSIGN;
- MAC_ASSIGN(dest, dest);
- ),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT,
- WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(dest))
-);
-
-TRACE_EVENT(rdev_set_tid_config,
- TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
- struct cfg80211_tid_config *tid_conf),
- TP_ARGS(wiphy, netdev, tid_conf),
- TP_STRUCT__entry(
- WIPHY_ENTRY
- NETDEV_ENTRY
- MAC_ENTRY(peer)
- ),
- TP_fast_assign(
- WIPHY_ASSIGN;
- NETDEV_ASSIGN;
- MAC_ASSIGN(peer, tid_conf->peer);
- ),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: " MAC_PR_FMT,
- WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer))
-);
-
-TRACE_EVENT(rdev_reset_tid_config,
- TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
- const u8 *peer, u8 tids),
- TP_ARGS(wiphy, netdev, peer, tids),
- TP_STRUCT__entry(
- WIPHY_ENTRY
- NETDEV_ENTRY
- MAC_ENTRY(peer)
- __field(u8, tids)
- ),
- TP_fast_assign(
- WIPHY_ASSIGN;
- NETDEV_ASSIGN;
- MAC_ASSIGN(peer, peer);
- __entry->tids = tids;
- ),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: " MAC_PR_FMT ", tids: 0x%x",
- WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer), __entry->tids)
-);
-
-TRACE_EVENT(rdev_set_sar_specs,
- TP_PROTO(struct wiphy *wiphy, struct cfg80211_sar_specs *sar),
- TP_ARGS(wiphy, sar),
- TP_STRUCT__entry(
- WIPHY_ENTRY
- __field(u16, type)
- __field(u16, num)
- ),
- TP_fast_assign(
- WIPHY_ASSIGN;
- __entry->type = sar->type;
- __entry->num = sar->num_sub_specs;
-
- ),
- TP_printk(WIPHY_PR_FMT ", Set type:%d, num_specs:%d",
- WIPHY_PR_ARG, __entry->type, __entry->num)
-);
-
-TRACE_EVENT(rdev_color_change,
- TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
- struct cfg80211_color_change_settings *params),
- TP_ARGS(wiphy, netdev, params),
- TP_STRUCT__entry(
- WIPHY_ENTRY
- NETDEV_ENTRY
- __field(u8, count)
- __field(u16, bcn_ofs)
- __field(u16, pres_ofs)
- ),
- TP_fast_assign(
- WIPHY_ASSIGN;
- NETDEV_ASSIGN;
- __entry->count = params->count;
- __entry->bcn_ofs = params->counter_offset_beacon;
- __entry->pres_ofs = params->counter_offset_presp;
- ),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT
- ", count: %u",
- WIPHY_PR_ARG, NETDEV_PR_ARG,
- __entry->count)
-);
-
TRACE_EVENT(cfg80211_bss_color_notify,
TP_PROTO(struct net_device *netdev,
enum nl80211_commands cmd,
@@ -3677,25 +3758,6 @@ TRACE_EVENT(cfg80211_bss_color_notify,
__entry->color_bitmap)
);
-TRACE_EVENT(rdev_set_radar_background,
- TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef),
-
- TP_ARGS(wiphy, chandef),
-
- TP_STRUCT__entry(
- WIPHY_ENTRY
- CHAN_DEF_ENTRY
- ),
-
- TP_fast_assign(
- WIPHY_ASSIGN;
- CHAN_DEF_ASSIGN(chandef)
- ),
-
- TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT,
- WIPHY_PR_ARG, CHAN_DEF_PR_ARG)
-);
-
TRACE_EVENT(cfg80211_assoc_comeback,
TP_PROTO(struct wireless_dev *wdev, const u8 *bssid, u32 timeout),
TP_ARGS(wdev, bssid, timeout),
diff --git a/net/wireless/util.c b/net/wireless/util.c
index a60d7d638e72..b7257862e0fe 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -5,7 +5,7 @@
* Copyright 2007-2009 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#include <linux/export.h>
#include <linux/bitops.h>
@@ -1041,7 +1041,6 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
return -EBUSY;
dev->ieee80211_ptr->use_4addr = false;
- dev->ieee80211_ptr->mesh_id_up_len = 0;
wdev_lock(dev->ieee80211_ptr);
rdev_set_qos_map(rdev, dev, NULL);
wdev_unlock(dev->ieee80211_ptr);
@@ -1049,7 +1048,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
switch (otype) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
- cfg80211_stop_ap(rdev, dev, true);
+ cfg80211_stop_ap(rdev, dev, -1, true);
break;
case NL80211_IFTYPE_ADHOC:
cfg80211_leave_ibss(rdev, dev, false);
@@ -1073,6 +1072,11 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
cfg80211_process_rdev_events(rdev);
cfg80211_mlme_purge_registrations(dev->ieee80211_ptr);
+
+ memset(&dev->ieee80211_ptr->u, 0,
+ sizeof(dev->ieee80211_ptr->u));
+ memset(&dev->ieee80211_ptr->links, 0,
+ sizeof(dev->ieee80211_ptr->links));
}
err = rdev_change_virtual_intf(rdev, dev, ntype, params);
@@ -1930,6 +1934,24 @@ bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef,
}
EXPORT_SYMBOL(ieee80211_chandef_to_operating_class);
+static int cfg80211_wdev_bi(struct wireless_dev *wdev)
+{
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ WARN_ON(wdev->valid_links);
+ return wdev->links[0].ap.beacon_interval;
+ case NL80211_IFTYPE_MESH_POINT:
+ return wdev->u.mesh.beacon_interval;
+ case NL80211_IFTYPE_ADHOC:
+ return wdev->u.ibss.beacon_interval;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static void cfg80211_calculate_bi_data(struct wiphy *wiphy, u32 new_beacon_int,
u32 *beacon_int_gcd,
bool *beacon_int_different)
@@ -1940,19 +1962,27 @@ static void cfg80211_calculate_bi_data(struct wiphy *wiphy, u32 new_beacon_int,
*beacon_int_different = false;
list_for_each_entry(wdev, &wiphy->wdev_list, list) {
- if (!wdev->beacon_interval)
+ int wdev_bi;
+
+ /* this feature isn't supported with MLO */
+ if (wdev->valid_links)
+ continue;
+
+ wdev_bi = cfg80211_wdev_bi(wdev);
+
+ if (!wdev_bi)
continue;
if (!*beacon_int_gcd) {
- *beacon_int_gcd = wdev->beacon_interval;
+ *beacon_int_gcd = wdev_bi;
continue;
}
- if (wdev->beacon_interval == *beacon_int_gcd)
+ if (wdev_bi == *beacon_int_gcd)
continue;
*beacon_int_different = true;
- *beacon_int_gcd = gcd(*beacon_int_gcd, wdev->beacon_interval);
+ *beacon_int_gcd = gcd(*beacon_int_gcd, wdev_bi);
}
if (new_beacon_int && *beacon_int_gcd != new_beacon_int) {
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index a32065d600a1..a9767bfe7330 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -7,7 +7,7 @@
* we directly assign the wireless handlers of wireless interfaces.
*
* Copyright 2008-2009 Johannes Berg <johannes@sipsolutions.net>
- * Copyright (C) 2019-2021 Intel Corporation
+ * Copyright (C) 2019-2022 Intel Corporation
*/
#include <linux/export.h>
@@ -415,6 +415,9 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
int err, i;
bool rejoin = false;
+ if (wdev->valid_links)
+ return -EINVAL;
+
if (pairwise && !addr)
return -EINVAL;
@@ -437,7 +440,7 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
return -EOPNOTSUPP;
if (params->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
- if (!wdev->current_bss)
+ if (!wdev->connected)
return -ENOLINK;
if (!rdev->ops->set_default_mgmt_key)
@@ -450,7 +453,9 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
if (remove) {
err = 0;
- if (wdev->current_bss) {
+ if (wdev->connected ||
+ (wdev->iftype == NL80211_IFTYPE_ADHOC &&
+ wdev->u.ibss.current_bss)) {
/*
* If removing the current TX key, we will need to
* join a new IBSS without the privacy bit clear.
@@ -501,7 +506,9 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
return -EINVAL;
err = 0;
- if (wdev->current_bss)
+ if (wdev->connected ||
+ (wdev->iftype == NL80211_IFTYPE_ADHOC &&
+ wdev->u.ibss.current_bss))
err = rdev_add_key(rdev, dev, idx, pairwise, addr, params);
else if (params->cipher != WLAN_CIPHER_SUITE_WEP40 &&
params->cipher != WLAN_CIPHER_SUITE_WEP104)
@@ -526,7 +533,9 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
if ((params->cipher == WLAN_CIPHER_SUITE_WEP40 ||
params->cipher == WLAN_CIPHER_SUITE_WEP104) &&
(tx_key || (!addr && wdev->wext.default_key == -1))) {
- if (wdev->current_bss) {
+ if (wdev->connected ||
+ (wdev->iftype == NL80211_IFTYPE_ADHOC &&
+ wdev->u.ibss.current_bss)) {
/*
* If we are getting a new TX key from not having
* had one before we need to join a new IBSS with
@@ -549,7 +558,9 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
if (params->cipher == WLAN_CIPHER_SUITE_AES_CMAC &&
(tx_key || (!addr && wdev->wext.default_mgmt_key == -1))) {
- if (wdev->current_bss)
+ if (wdev->connected ||
+ (wdev->iftype == NL80211_IFTYPE_ADHOC &&
+ wdev->u.ibss.current_bss))
err = rdev_set_default_mgmt_key(rdev, dev, idx);
if (!err)
wdev->wext.default_mgmt_key = idx;
@@ -595,6 +606,11 @@ static int cfg80211_wext_siwencode(struct net_device *dev,
return -EOPNOTSUPP;
wiphy_lock(&rdev->wiphy);
+ if (wdev->valid_links) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
idx = erq->flags & IW_ENCODE_INDEX;
if (idx == 0) {
idx = wdev->wext.default_key;
@@ -613,7 +629,9 @@ static int cfg80211_wext_siwencode(struct net_device *dev,
/* No key data - just set the default TX key index */
err = 0;
wdev_lock(wdev);
- if (wdev->current_bss)
+ if (wdev->connected ||
+ (wdev->iftype == NL80211_IFTYPE_ADHOC &&
+ wdev->u.ibss.current_bss))
err = rdev_set_default_key(rdev, dev, idx, true,
true);
if (!err)
@@ -865,7 +883,7 @@ static int cfg80211_wext_giwfreq(struct net_device *dev,
break;
}
- ret = rdev_get_channel(rdev, wdev, &chandef);
+ ret = rdev_get_channel(rdev, wdev, 0, &chandef);
if (ret)
break;
freq->m = chandef.chan->center_freq;
@@ -1270,7 +1288,10 @@ static int cfg80211_wext_siwrate(struct net_device *dev,
return -EINVAL;
wiphy_lock(&rdev->wiphy);
- ret = rdev_set_bitrate_mask(rdev, dev, NULL, &mask);
+ if (dev->ieee80211_ptr->valid_links)
+ ret = -EOPNOTSUPP;
+ else
+ ret = rdev_set_bitrate_mask(rdev, dev, 0, NULL, &mask);
wiphy_unlock(&rdev->wiphy);
return ret;
@@ -1294,8 +1315,9 @@ static int cfg80211_wext_giwrate(struct net_device *dev,
err = 0;
wdev_lock(wdev);
- if (wdev->current_bss)
- memcpy(addr, wdev->current_bss->pub.bssid, ETH_ALEN);
+ if (!wdev->valid_links && wdev->links[0].client.current_bss)
+ memcpy(addr, wdev->links[0].client.current_bss->pub.bssid,
+ ETH_ALEN);
else
err = -EOPNOTSUPP;
wdev_unlock(wdev);
@@ -1339,11 +1361,11 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
/* Grab BSSID of current BSS, if any */
wdev_lock(wdev);
- if (!wdev->current_bss) {
+ if (wdev->valid_links || !wdev->links[0].client.current_bss) {
wdev_unlock(wdev);
return NULL;
}
- memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN);
+ memcpy(bssid, wdev->links[0].client.current_bss->pub.bssid, ETH_ALEN);
wdev_unlock(wdev);
memset(&sinfo, 0, sizeof(sinfo));
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index cd09a9042261..68f45afc352d 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -3,7 +3,7 @@
* cfg80211 wext compat for managed mode.
*
* Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
- * Copyright (C) 2009, 2020-2021 Intel Corporation.
+ * Copyright (C) 2009, 2020-2022 Intel Corporation
*/
#include <linux/export.h>
@@ -124,9 +124,12 @@ int cfg80211_mgd_wext_giwfreq(struct net_device *dev,
if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION))
return -EINVAL;
+ if (wdev->valid_links)
+ return -EOPNOTSUPP;
+
wdev_lock(wdev);
- if (wdev->current_bss)
- chan = wdev->current_bss->pub.channel;
+ if (wdev->links[0].client.current_bss)
+ chan = wdev->links[0].client.current_bss->pub.channel;
else if (wdev->wext.connect.channel)
chan = wdev->wext.connect.channel;
wdev_unlock(wdev);
@@ -208,15 +211,19 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION))
return -EINVAL;
+ if (wdev->valid_links)
+ return -EINVAL;
+
data->flags = 0;
wdev_lock(wdev);
- if (wdev->current_bss) {
+ if (wdev->links[0].client.current_bss) {
const struct element *ssid_elem;
rcu_read_lock();
- ssid_elem = ieee80211_bss_get_elem(&wdev->current_bss->pub,
- WLAN_EID_SSID);
+ ssid_elem = ieee80211_bss_get_elem(
+ &wdev->links[0].client.current_bss->pub,
+ WLAN_EID_SSID);
if (ssid_elem) {
data->flags = 1;
data->length = ssid_elem->datalen;
@@ -300,8 +307,14 @@ int cfg80211_mgd_wext_giwap(struct net_device *dev,
ap_addr->sa_family = ARPHRD_ETHER;
wdev_lock(wdev);
- if (wdev->current_bss)
- memcpy(ap_addr->sa_data, wdev->current_bss->pub.bssid, ETH_ALEN);
+ if (wdev->valid_links) {
+ wdev_unlock(wdev);
+ return -EOPNOTSUPP;
+ }
+ if (wdev->links[0].client.current_bss)
+ memcpy(ap_addr->sa_data,
+ wdev->links[0].client.current_bss->pub.bssid,
+ ETH_ALEN);
else
eth_zero_addr(ap_addr->sa_data);
wdev_unlock(wdev);
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index f01ef6bda390..869b9b9b9fad 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -57,7 +57,7 @@ static int xdp_umem_addr_map(struct xdp_umem *umem, struct page **pages,
static void xdp_umem_release(struct xdp_umem *umem)
{
umem->zc = false;
- ida_simple_remove(&umem_ida, umem->id);
+ ida_free(&umem_ida, umem->id);
xdp_umem_addr_unmap(umem);
xdp_umem_unpin_pages(umem);
@@ -242,7 +242,7 @@ struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr)
if (!umem)
return ERR_PTR(-ENOMEM);
- err = ida_simple_get(&umem_ida, 0, 0, GFP_KERNEL);
+ err = ida_alloc(&umem_ida, GFP_KERNEL);
if (err < 0) {
kfree(umem);
return ERR_PTR(err);
@@ -251,7 +251,7 @@ struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr)
err = xdp_umem_reg(umem, mr);
if (err) {
- ida_simple_remove(&umem_ida, umem->id);
+ ida_free(&umem_ida, umem->id);
kfree(umem);
return ERR_PTR(err);
}
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index 35c7e89b2e7d..637ca8838436 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -275,7 +275,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
xso->dev = NULL;
xso->dir = 0;
xso->real_dev = NULL;
- dev_put_track(dev, &xso->dev_tracker);
+ netdev_put(dev, &xso->dev_tracker);
if (err != -EOPNOTSUPP)
return err;