aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/bridge/br_fdb.c19
-rw-r--r--net/bridge/br_if.c5
-rw-r--r--net/bridge/br_private.h1
-rw-r--r--net/bridge/br_vlan.c5
-rw-r--r--net/caif/cfrfml.c3
-rw-r--r--net/core/dev.c13
-rw-r--r--net/core/ethtool.c158
-rw-r--r--net/core/filter.c25
-rw-r--r--net/core/pktgen.c2
-rw-r--r--net/core/rtnetlink.c51
-rw-r--r--net/core/sock.c2
-rw-r--r--net/core/xdp.c53
-rw-r--r--net/dsa/Kconfig3
-rw-r--r--net/dsa/Makefile1
-rw-r--r--net/dsa/dsa.c49
-rw-r--r--net/dsa/dsa_priv.h4
-rw-r--r--net/dsa/legacy.c3
-rw-r--r--net/dsa/slave.c28
-rw-r--r--net/dsa/tag_gswip.c109
-rw-r--r--net/ieee802154/6lowpan/reassembly.c2
-rw-r--r--net/ipv4/cipso_ipv4.c11
-rw-r--r--net/ipv4/devinet.c62
-rw-r--r--net/ipv4/fib_semantics.c4
-rw-r--r--net/ipv4/gre_demux.c7
-rw-r--r--net/ipv4/ip_fragment.c23
-rw-r--r--net/ipv4/ip_gre.c9
-rw-r--r--net/ipv4/ip_input.c6
-rw-r--r--net/ipv4/ip_output.c4
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_input.c13
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv6/addrconf.c126
-rw-r--r--net/ipv6/af_inet6.c1
-rw-r--r--net/ipv6/ip6_gre.c26
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/ipv6_sockglue.c11
-rw-r--r--net/ipv6/mcast.c2
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c2
-rw-r--r--net/ipv6/reassembly.c2
-rw-r--r--net/ipv6/route.c29
-rw-r--r--net/llc/llc_core.c4
-rw-r--r--net/mac80211/cfg.c57
-rw-r--r--net/mac80211/debugfs.c4
-rw-r--r--net/mac80211/debugfs_sta.c364
-rw-r--r--net/mac80211/driver-ops.h10
-rw-r--r--net/mac80211/ibss.c4
-rw-r--r--net/mac80211/ieee80211_i.h10
-rw-r--r--net/mac80211/key.c111
-rw-r--r--net/mac80211/main.c69
-rw-r--r--net/mac80211/mesh.c5
-rw-r--r--net/mac80211/mlme.c23
-rw-r--r--net/mac80211/rx.c43
-rw-r--r--net/mac80211/spectmgmt.c5
-rw-r--r--net/mac80211/sta_info.c27
-rw-r--r--net/mac80211/tx.c75
-rw-r--r--net/mac80211/util.c158
-rw-r--r--net/mac80211/vht.c20
-rw-r--r--net/ncsi/ncsi-netlink.c1
-rw-r--r--net/netfilter/nfnetlink_queue.c2
-rw-r--r--net/netlink/af_netlink.c11
-rw-r--r--net/openvswitch/flow.c22
-rw-r--r--net/packet/af_packet.c17
-rw-r--r--net/rds/rds.h2
-rw-r--r--net/rds/recv.c19
-rw-r--r--net/rfkill/core.c4
-rw-r--r--net/rxrpc/input.c2
-rw-r--r--net/sched/act_api.c6
-rw-r--r--net/sched/act_bpf.c3
-rw-r--r--net/sched/act_connmark.c14
-rw-r--r--net/sched/act_csum.c3
-rw-r--r--net/sched/act_gact.c3
-rw-r--r--net/sched/act_ife.c3
-rw-r--r--net/sched/act_ipt.c6
-rw-r--r--net/sched/act_mirred.c3
-rw-r--r--net/sched/act_nat.c18
-rw-r--r--net/sched/act_pedit.c3
-rw-r--r--net/sched/act_police.c189
-rw-r--r--net/sched/act_sample.c3
-rw-r--r--net/sched/act_simple.c3
-rw-r--r--net/sched/act_skbedit.c26
-rw-r--r--net/sched/act_skbmod.c3
-rw-r--r--net/sched/act_tunnel_key.c3
-rw-r--r--net/sched/act_vlan.c3
-rw-r--r--net/sched/cls_flower.c5
-rw-r--r--net/sched/sch_api.c7
-rw-r--r--net/sched/sch_cake.c6
-rw-r--r--net/sched/sch_fq.c2
-rw-r--r--net/sched/sch_fq_codel.c2
-rw-r--r--net/sched/sch_generic.c4
-rw-r--r--net/sched/sch_hhf.c2
-rw-r--r--net/sched/sch_htb.c116
-rw-r--r--net/sched/sch_netem.c14
-rw-r--r--net/sched/sch_tbf.c2
-rw-r--r--net/sctp/ulpqueue.c2
-rw-r--r--net/tipc/bearer.c2
-rw-r--r--net/tipc/topsrv.c10
-rw-r--r--net/tls/tls_sw.c230
-rw-r--r--net/wireless/nl80211.c10
-rw-r--r--net/wireless/reg.c112
-rw-r--r--net/wireless/trace.h2
-rw-r--r--net/wireless/util.c160
-rw-r--r--net/xdp/xdp_umem.c6
-rw-r--r--net/xdp/xdp_umem.h10
-rw-r--r--net/xdp/xdp_umem_props.h14
-rw-r--r--net/xdp/xsk.c34
-rw-r--r--net/xdp/xsk_queue.c5
-rw-r--r--net/xdp/xsk_queue.h13
-rw-r--r--net/xfrm/xfrm_device.c2
-rw-r--r--net/xfrm/xfrm_output.c2
109 files changed, 2184 insertions, 831 deletions
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 502f66349530..a56ed7f2a3a3 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -584,7 +584,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
unsigned long now = jiffies;
/* fastpath: update of existing entry */
- if (unlikely(source != fdb->dst)) {
+ if (unlikely(source != fdb->dst && !fdb->is_sticky)) {
fdb->dst = source;
fdb_modified = true;
/* Take over HW learned entry */
@@ -656,6 +656,8 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
ndm->ndm_flags |= NTF_OFFLOADED;
if (fdb->added_by_external_learn)
ndm->ndm_flags |= NTF_EXT_LEARNED;
+ if (fdb->is_sticky)
+ ndm->ndm_flags |= NTF_STICKY;
if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->key.addr))
goto nla_put_failure;
@@ -772,8 +774,10 @@ skip:
/* Update (create or replace) forwarding database entry */
static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
- const __u8 *addr, __u16 state, __u16 flags, __u16 vid)
+ const u8 *addr, u16 state, u16 flags, u16 vid,
+ u8 ndm_flags)
{
+ u8 is_sticky = !!(ndm_flags & NTF_STICKY);
struct net_bridge_fdb_entry *fdb;
bool modified = false;
@@ -789,6 +793,9 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
return -EINVAL;
}
+ if (is_sticky && (state & NUD_PERMANENT))
+ return -EINVAL;
+
fdb = br_fdb_find(br, addr, vid);
if (fdb == NULL) {
if (!(flags & NLM_F_CREATE))
@@ -832,6 +839,12 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
modified = true;
}
+
+ if (is_sticky != fdb->is_sticky) {
+ fdb->is_sticky = is_sticky;
+ modified = true;
+ }
+
fdb->added_by_user = 1;
fdb->used = jiffies;
@@ -865,7 +878,7 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
} else {
spin_lock_bh(&br->hash_lock);
err = fdb_add_entry(br, p, addr, ndm->ndm_state,
- nlh_flags, vid);
+ nlh_flags, vid, ndm->ndm_flags);
spin_unlock_bh(&br->hash_lock);
}
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 0363f1bdc401..3bb66508f07d 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -394,8 +394,7 @@ static int find_portno(struct net_bridge *br)
struct net_bridge_port *p;
unsigned long *inuse;
- inuse = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long),
- GFP_KERNEL);
+ inuse = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL);
if (!inuse)
return -ENOMEM;
@@ -404,7 +403,7 @@ static int find_portno(struct net_bridge *br)
set_bit(p->port_no, inuse);
}
index = find_first_zero_bit(inuse, BR_MAX_PORTS);
- kfree(inuse);
+ bitmap_free(inuse);
return (index >= BR_MAX_PORTS) ? -EXFULL : index;
}
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 11ed2029985f..d21035a17f4c 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -181,6 +181,7 @@ struct net_bridge_fdb_entry {
struct hlist_node fdb_node;
unsigned char is_local:1,
is_static:1,
+ is_sticky:1,
added_by_user:1,
added_by_external_learn:1,
offloaded:1;
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 7df269092103..bb6ba794864f 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -877,8 +877,7 @@ int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
return 0;
}
- changed = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long),
- GFP_KERNEL);
+ changed = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL);
if (!changed)
return -ENOMEM;
@@ -925,7 +924,7 @@ int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
br->default_pvid = pvid;
out:
- kfree(changed);
+ bitmap_free(changed);
return err;
err_port:
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index b82440e1fcb4..a931a71ef6df 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -264,9 +264,6 @@ static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
frontpkt = rearpkt;
rearpkt = NULL;
- err = -ENOMEM;
- if (frontpkt == NULL)
- goto out;
err = -EPROTO;
if (cfpkt_add_head(frontpkt, head, 6) < 0)
goto out;
diff --git a/net/core/dev.c b/net/core/dev.c
index 82114e1111e6..0b2d777e5b9e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1969,6 +1969,9 @@ void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
rcu_read_lock();
again:
list_for_each_entry_rcu(ptype, ptype_list, list) {
+ if (ptype->ignore_outgoing)
+ continue;
+
/* Never send packets back to the socket
* they originated from - MvS (miquels@drinkel.ow.org)
*/
@@ -3228,7 +3231,7 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *de
while (skb) {
struct sk_buff *next = skb->next;
- skb->next = NULL;
+ skb_mark_not_on_list(skb);
rc = xmit_one(skb, dev, txq, next != NULL);
if (unlikely(!dev_xmit_complete(rc))) {
skb->next = next;
@@ -3328,7 +3331,7 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d
for (; skb != NULL; skb = next) {
next = skb->next;
- skb->next = NULL;
+ skb_mark_not_on_list(skb);
/* in case skb wont be segmented, point to itself */
skb->prev = skb;
@@ -5292,8 +5295,7 @@ static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
list_for_each_entry_safe_reverse(skb, p, head, list) {
if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
return;
- list_del(&skb->list);
- skb->next = NULL;
+ skb_list_del_init(skb);
napi_gro_complete(skb);
napi->gro_hash[index].count--;
}
@@ -5478,8 +5480,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
if (pp) {
- list_del(&pp->list);
- pp->next = NULL;
+ skb_list_del_init(pp);
napi_gro_complete(pp);
napi->gro_hash[hash].count--;
}
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index c9993c6c2fd4..9d4e56d97080 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -539,47 +539,17 @@ struct ethtool_link_usettings {
} link_modes;
};
-/* Internal kernel helper to query a device ethtool_link_settings.
- *
- * Backward compatibility note: for compatibility with legacy drivers
- * that implement only the ethtool_cmd API, this has to work with both
- * drivers implementing get_link_ksettings API and drivers
- * implementing get_settings API. When drivers implement get_settings
- * and report ethtool_cmd deprecated fields
- * (transceiver/maxrxpkt/maxtxpkt), these fields are silently ignored
- * because the resulting struct ethtool_link_settings does not report them.
- */
+/* Internal kernel helper to query a device ethtool_link_settings. */
int __ethtool_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *link_ksettings)
{
- int err;
- struct ethtool_cmd cmd;
-
ASSERT_RTNL();
- if (dev->ethtool_ops->get_link_ksettings) {
- memset(link_ksettings, 0, sizeof(*link_ksettings));
- return dev->ethtool_ops->get_link_ksettings(dev,
- link_ksettings);
- }
-
- /* driver doesn't support %ethtool_link_ksettings API. revert to
- * legacy %ethtool_cmd API, unless it's not supported either.
- * TODO: remove when ethtool_ops::get_settings disappears internally
- */
- if (!dev->ethtool_ops->get_settings)
+ if (!dev->ethtool_ops->get_link_ksettings)
return -EOPNOTSUPP;
- memset(&cmd, 0, sizeof(cmd));
- cmd.cmd = ETHTOOL_GSET;
- err = dev->ethtool_ops->get_settings(dev, &cmd);
- if (err < 0)
- return err;
-
- /* we ignore deprecated fields transceiver/maxrxpkt/maxtxpkt
- */
- convert_legacy_settings_to_link_ksettings(link_ksettings, &cmd);
- return err;
+ memset(link_ksettings, 0, sizeof(*link_ksettings));
+ return dev->ethtool_ops->get_link_ksettings(dev, link_ksettings);
}
EXPORT_SYMBOL(__ethtool_get_link_ksettings);
@@ -635,16 +605,7 @@ store_link_ksettings_for_user(void __user *to,
return 0;
}
-/* Query device for its ethtool_link_settings.
- *
- * Backward compatibility note: this function must fail when driver
- * does not implement ethtool::get_link_ksettings, even if legacy
- * ethtool_ops::get_settings is implemented. This tells new versions
- * of ethtool that they should use the legacy API %ETHTOOL_GSET for
- * this driver, so that they can correctly access the ethtool_cmd
- * deprecated fields (transceiver/maxrxpkt/maxtxpkt), until no driver
- * implements ethtool_ops::get_settings anymore.
- */
+/* Query device for its ethtool_link_settings. */
static int ethtool_get_link_ksettings(struct net_device *dev,
void __user *useraddr)
{
@@ -652,7 +613,6 @@ static int ethtool_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings link_ksettings;
ASSERT_RTNL();
-
if (!dev->ethtool_ops->get_link_ksettings)
return -EOPNOTSUPP;
@@ -699,16 +659,7 @@ static int ethtool_get_link_ksettings(struct net_device *dev,
return store_link_ksettings_for_user(useraddr, &link_ksettings);
}
-/* Update device ethtool_link_settings.
- *
- * Backward compatibility note: this function must fail when driver
- * does not implement ethtool::set_link_ksettings, even if legacy
- * ethtool_ops::set_settings is implemented. This tells new versions
- * of ethtool that they should use the legacy API %ETHTOOL_SSET for
- * this driver, so that they can correctly update the ethtool_cmd
- * deprecated fields (transceiver/maxrxpkt/maxtxpkt), until no driver
- * implements ethtool_ops::get_settings anymore.
- */
+/* Update device ethtool_link_settings. */
static int ethtool_set_link_ksettings(struct net_device *dev,
void __user *useraddr)
{
@@ -746,51 +697,31 @@ static int ethtool_set_link_ksettings(struct net_device *dev,
/* Query device for its ethtool_cmd settings.
*
- * Backward compatibility note: for compatibility with legacy ethtool,
- * this has to work with both drivers implementing get_link_ksettings
- * API and drivers implementing get_settings API. When drivers
- * implement get_link_ksettings and report higher link mode bits, a
- * kernel warning is logged once (with name of 1st driver/device) to
- * recommend user to upgrade ethtool, but the command is successful
- * (only the lower link mode bits reported back to user).
+ * Backward compatibility note: for compatibility with legacy ethtool, this is
+ * now implemented via get_link_ksettings. When driver reports higher link mode
+ * bits, a kernel warning is logged once (with name of 1st driver/device) to
+ * recommend user to upgrade ethtool, but the command is successful (only the
+ * lower link mode bits reported back to user). Deprecated fields from
+ * ethtool_cmd (transceiver/maxrxpkt/maxtxpkt) are always set to zero.
*/
static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
{
+ struct ethtool_link_ksettings link_ksettings;
struct ethtool_cmd cmd;
+ int err;
ASSERT_RTNL();
+ if (!dev->ethtool_ops->get_link_ksettings)
+ return -EOPNOTSUPP;
- if (dev->ethtool_ops->get_link_ksettings) {
- /* First, use link_ksettings API if it is supported */
- int err;
- struct ethtool_link_ksettings link_ksettings;
-
- memset(&link_ksettings, 0, sizeof(link_ksettings));
- err = dev->ethtool_ops->get_link_ksettings(dev,
- &link_ksettings);
- if (err < 0)
- return err;
- convert_link_ksettings_to_legacy_settings(&cmd,
- &link_ksettings);
-
- /* send a sensible cmd tag back to user */
- cmd.cmd = ETHTOOL_GSET;
- } else {
- /* driver doesn't support %ethtool_link_ksettings
- * API. revert to legacy %ethtool_cmd API, unless it's
- * not supported either.
- */
- int err;
-
- if (!dev->ethtool_ops->get_settings)
- return -EOPNOTSUPP;
+ memset(&link_ksettings, 0, sizeof(link_ksettings));
+ err = dev->ethtool_ops->get_link_ksettings(dev, &link_ksettings);
+ if (err < 0)
+ return err;
+ convert_link_ksettings_to_legacy_settings(&cmd, &link_ksettings);
- memset(&cmd, 0, sizeof(cmd));
- cmd.cmd = ETHTOOL_GSET;
- err = dev->ethtool_ops->get_settings(dev, &cmd);
- if (err < 0)
- return err;
- }
+ /* send a sensible cmd tag back to user */
+ cmd.cmd = ETHTOOL_GSET;
if (copy_to_user(useraddr, &cmd, sizeof(cmd)))
return -EFAULT;
@@ -800,48 +731,29 @@ static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
/* Update device link settings with given ethtool_cmd.
*
- * Backward compatibility note: for compatibility with legacy ethtool,
- * this has to work with both drivers implementing set_link_ksettings
- * API and drivers implementing set_settings API. When drivers
- * implement set_link_ksettings and user's request updates deprecated
- * ethtool_cmd fields (transceiver/maxrxpkt/maxtxpkt), a kernel
- * warning is logged once (with name of 1st driver/device) to
- * recommend user to upgrade ethtool, and the request is rejected.
+ * Backward compatibility note: for compatibility with legacy ethtool, this is
+ * now always implemented via set_link_settings. When user's request updates
+ * deprecated ethtool_cmd fields (transceiver/maxrxpkt/maxtxpkt), a kernel
+ * warning is logged once (with name of 1st driver/device) to recommend user to
+ * upgrade ethtool, and the request is rejected.
*/
static int ethtool_set_settings(struct net_device *dev, void __user *useraddr)
{
+ struct ethtool_link_ksettings link_ksettings;
struct ethtool_cmd cmd;
ASSERT_RTNL();
if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
return -EFAULT;
-
- /* first, try new %ethtool_link_ksettings API. */
- if (dev->ethtool_ops->set_link_ksettings) {
- struct ethtool_link_ksettings link_ksettings;
-
- if (!convert_legacy_settings_to_link_ksettings(&link_ksettings,
- &cmd))
- return -EINVAL;
-
- link_ksettings.base.cmd = ETHTOOL_SLINKSETTINGS;
- link_ksettings.base.link_mode_masks_nwords
- = __ETHTOOL_LINK_MODE_MASK_NU32;
- return dev->ethtool_ops->set_link_ksettings(dev,
- &link_ksettings);
- }
-
- /* legacy %ethtool_cmd API */
-
- /* TODO: return -EOPNOTSUPP when ethtool_ops::get_settings
- * disappears internally
- */
-
- if (!dev->ethtool_ops->set_settings)
+ if (!dev->ethtool_ops->set_link_ksettings)
return -EOPNOTSUPP;
- return dev->ethtool_ops->set_settings(dev, &cmd);
+ if (!convert_legacy_settings_to_link_ksettings(&link_ksettings, &cmd))
+ return -EINVAL;
+ link_ksettings.base.link_mode_masks_nwords =
+ __ETHTOOL_LINK_MODE_MASK_NU32;
+ return dev->ethtool_ops->set_link_ksettings(dev, &link_ksettings);
}
static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev,
diff --git a/net/core/filter.c b/net/core/filter.c
index 5e00f2b85a56..0b40f95da21b 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -4013,6 +4013,12 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
tp->snd_ssthresh = val;
}
break;
+ case TCP_SAVE_SYN:
+ if (val < 0 || val > 1)
+ ret = -EINVAL;
+ else
+ tp->save_syn = val;
+ break;
default:
ret = -EINVAL;
}
@@ -4038,21 +4044,32 @@ static const struct bpf_func_proto bpf_setsockopt_proto = {
BPF_CALL_5(bpf_getsockopt, struct bpf_sock_ops_kern *, bpf_sock,
int, level, int, optname, char *, optval, int, optlen)
{
+ struct inet_connection_sock *icsk;
struct sock *sk = bpf_sock->sk;
+ struct tcp_sock *tp;
if (!sk_fullsock(sk))
goto err_clear;
-
#ifdef CONFIG_INET
if (level == SOL_TCP && sk->sk_prot->getsockopt == tcp_getsockopt) {
- if (optname == TCP_CONGESTION) {
- struct inet_connection_sock *icsk = inet_csk(sk);
+ switch (optname) {
+ case TCP_CONGESTION:
+ icsk = inet_csk(sk);
if (!icsk->icsk_ca_ops || optlen <= 1)
goto err_clear;
strncpy(optval, icsk->icsk_ca_ops->name, optlen);
optval[optlen - 1] = 0;
- } else {
+ break;
+ case TCP_SAVED_SYN:
+ tp = tcp_sk(sk);
+
+ if (optlen <= 0 || !tp->saved_syn ||
+ optlen > tp->saved_syn[0])
+ goto err_clear;
+ memcpy(optval, tp->saved_syn + 1, optlen);
+ break;
+ default:
goto err_clear;
}
} else if (level == SOL_IP) {
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 7f6938405fa1..6ac919847ce6 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3426,7 +3426,7 @@ xmit_more:
net_info_ratelimited("%s xmit error: %d\n",
pkt_dev->odevname, ret);
pkt_dev->errors++;
- /* fallthru */
+ /* fall through */
case NETDEV_TX_BUSY:
/* Retry it next time */
refcount_dec(&(pkt_dev->skb->users));
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 63ce2283a456..80a7e18c65fb 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1016,7 +1016,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
+ nla_total_size(4) /* IFLA_NEW_NETNSID */
+ nla_total_size(4) /* IFLA_NEW_IFINDEX */
+ nla_total_size(1) /* IFLA_PROTO_DOWN */
- + nla_total_size(4) /* IFLA_IF_NETNSID */
+ + nla_total_size(4) /* IFLA_TARGET_NETNSID */
+ nla_total_size(4) /* IFLA_CARRIER_UP_COUNT */
+ nla_total_size(4) /* IFLA_CARRIER_DOWN_COUNT */
+ nla_total_size(4) /* IFLA_MIN_MTU */
@@ -1598,7 +1598,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
ifm->ifi_flags = dev_get_flags(dev);
ifm->ifi_change = change;
- if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_IF_NETNSID, tgt_netnsid))
+ if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid))
goto nla_put_failure;
if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
@@ -1737,7 +1737,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_XDP] = { .type = NLA_NESTED },
[IFLA_EVENT] = { .type = NLA_U32 },
[IFLA_GROUP] = { .type = NLA_U32 },
- [IFLA_IF_NETNSID] = { .type = NLA_S32 },
+ [IFLA_TARGET_NETNSID] = { .type = NLA_S32 },
[IFLA_CARRIER_UP_COUNT] = { .type = NLA_U32 },
[IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 },
[IFLA_MIN_MTU] = { .type = NLA_U32 },
@@ -1845,7 +1845,15 @@ static bool link_dump_filtered(struct net_device *dev,
return false;
}
-static struct net *get_target_net(struct sock *sk, int netnsid)
+/**
+ * rtnl_get_net_ns_capable - Get netns if sufficiently privileged.
+ * @sk: netlink socket
+ * @netnsid: network namespace identifier
+ *
+ * Returns the network namespace identified by netnsid on success or an error
+ * pointer on failure.
+ */
+struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid)
{
struct net *net;
@@ -1862,6 +1870,7 @@ static struct net *get_target_net(struct sock *sk, int netnsid)
}
return net;
}
+EXPORT_SYMBOL_GPL(rtnl_get_net_ns_capable);
static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
{
@@ -1895,9 +1904,9 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
if (nlmsg_parse(cb->nlh, hdrlen, tb, IFLA_MAX,
ifla_policy, NULL) >= 0) {
- if (tb[IFLA_IF_NETNSID]) {
- netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
- tgt_net = get_target_net(skb->sk, netnsid);
+ if (tb[IFLA_TARGET_NETNSID]) {
+ netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
+ tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid);
if (IS_ERR(tgt_net)) {
tgt_net = net;
netnsid = -1;
@@ -1984,7 +1993,7 @@ EXPORT_SYMBOL(rtnl_link_get_net);
*
* 1. IFLA_NET_NS_PID
* 2. IFLA_NET_NS_FD
- * 3. IFLA_IF_NETNSID
+ * 3. IFLA_TARGET_NETNSID
*/
static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net,
struct nlattr *tb[])
@@ -1994,10 +2003,10 @@ static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net,
if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])
return rtnl_link_get_net(src_net, tb);
- if (!tb[IFLA_IF_NETNSID])
+ if (!tb[IFLA_TARGET_NETNSID])
return get_net(src_net);
- net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_IF_NETNSID]));
+ net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_TARGET_NETNSID]));
if (!net)
return ERR_PTR(-EINVAL);
@@ -2038,13 +2047,13 @@ static int rtnl_ensure_unique_netns(struct nlattr *tb[],
return -EOPNOTSUPP;
}
- if (tb[IFLA_IF_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]))
+ if (tb[IFLA_TARGET_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]))
goto invalid_attr;
- if (tb[IFLA_NET_NS_PID] && (tb[IFLA_IF_NETNSID] || tb[IFLA_NET_NS_FD]))
+ if (tb[IFLA_NET_NS_PID] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_FD]))
goto invalid_attr;
- if (tb[IFLA_NET_NS_FD] && (tb[IFLA_IF_NETNSID] || tb[IFLA_NET_NS_PID]))
+ if (tb[IFLA_NET_NS_FD] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_PID]))
goto invalid_attr;
return 0;
@@ -2320,7 +2329,7 @@ static int do_setlink(const struct sk_buff *skb,
if (err < 0)
return err;
- if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_IF_NETNSID]) {
+ if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) {
struct net *net = rtnl_link_get_net_capable(skb, dev_net(dev),
tb, CAP_NET_ADMIN);
if (IS_ERR(net)) {
@@ -2763,9 +2772,9 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
if (tb[IFLA_IFNAME])
nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
- if (tb[IFLA_IF_NETNSID]) {
- netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
- tgt_net = get_target_net(NETLINK_CB(skb).sk, netnsid);
+ if (tb[IFLA_TARGET_NETNSID]) {
+ netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
+ tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
if (IS_ERR(tgt_net))
return PTR_ERR(tgt_net);
}
@@ -3173,9 +3182,9 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
if (err < 0)
return err;
- if (tb[IFLA_IF_NETNSID]) {
- netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
- tgt_net = get_target_net(NETLINK_CB(skb).sk, netnsid);
+ if (tb[IFLA_TARGET_NETNSID]) {
+ netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
+ tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
if (IS_ERR(tgt_net))
return PTR_ERR(tgt_net);
}
@@ -3260,13 +3269,13 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
{
int idx;
int s_idx = cb->family;
+ int type = cb->nlh->nlmsg_type - RTM_BASE;
if (s_idx == 0)
s_idx = 1;
for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
struct rtnl_link **tab;
- int type = cb->nlh->nlmsg_type-RTM_BASE;
struct rtnl_link *link;
rtnl_dumpit_func dumpit;
diff --git a/net/core/sock.c b/net/core/sock.c
index 3730eb855095..8537b6ca72c5 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2332,7 +2332,7 @@ static void __release_sock(struct sock *sk)
next = skb->next;
prefetch(next);
WARN_ON_ONCE(skb_dst_is_noref(skb));
- skb->next = NULL;
+ skb_mark_not_on_list(skb);
sk_backlog_rcv(sk, skb);
cond_resched();
diff --git a/net/core/xdp.c b/net/core/xdp.c
index 89b6785cef2a..4b2b194f4f1f 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -94,11 +94,21 @@ static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu)
kfree(xa);
}
-static void __xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
+void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
{
struct xdp_mem_allocator *xa;
int id = xdp_rxq->mem.id;
+ if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
+ WARN(1, "Missing register, driver bug");
+ return;
+ }
+
+ if (xdp_rxq->mem.type != MEM_TYPE_PAGE_POOL &&
+ xdp_rxq->mem.type != MEM_TYPE_ZERO_COPY) {
+ return;
+ }
+
if (id == 0)
return;
@@ -110,6 +120,7 @@ static void __xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
mutex_unlock(&mem_id_lock);
}
+EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model);
void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq)
{
@@ -119,7 +130,7 @@ void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq)
WARN(!(xdp_rxq->reg_state == REG_STATE_REGISTERED), "Driver BUG");
- __xdp_rxq_info_unreg_mem_model(xdp_rxq);
+ xdp_rxq_info_unreg_mem_model(xdp_rxq);
xdp_rxq->reg_state = REG_STATE_UNREGISTERED;
xdp_rxq->dev = NULL;
@@ -398,3 +409,41 @@ void xdp_attachment_setup(struct xdp_attachment_info *info,
info->flags = bpf->flags;
}
EXPORT_SYMBOL_GPL(xdp_attachment_setup);
+
+struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp)
+{
+ unsigned int metasize, totsize;
+ void *addr, *data_to_copy;
+ struct xdp_frame *xdpf;
+ struct page *page;
+
+ /* Clone into a MEM_TYPE_PAGE_ORDER0 xdp_frame. */
+ metasize = xdp_data_meta_unsupported(xdp) ? 0 :
+ xdp->data - xdp->data_meta;
+ totsize = xdp->data_end - xdp->data + metasize;
+
+ if (sizeof(*xdpf) + totsize > PAGE_SIZE)
+ return NULL;
+
+ page = dev_alloc_page();
+ if (!page)
+ return NULL;
+
+ addr = page_to_virt(page);
+ xdpf = addr;
+ memset(xdpf, 0, sizeof(*xdpf));
+
+ addr += sizeof(*xdpf);
+ data_to_copy = metasize ? xdp->data_meta : xdp->data;
+ memcpy(addr, data_to_copy, totsize);
+
+ xdpf->data = addr + metasize;
+ xdpf->len = totsize - metasize;
+ xdpf->headroom = 0;
+ xdpf->metasize = metasize;
+ xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
+
+ xdp_return_buff(xdp);
+ return xdpf;
+}
+EXPORT_SYMBOL_GPL(xdp_convert_zc_to_xdp_frame);
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index 4183e4ba27a5..48c41918fb35 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -38,6 +38,9 @@ config NET_DSA_TAG_DSA
config NET_DSA_TAG_EDSA
bool
+config NET_DSA_TAG_GSWIP
+ bool
+
config NET_DSA_TAG_KSZ
bool
diff --git a/net/dsa/Makefile b/net/dsa/Makefile
index 9e4d3536f977..6e721f7a2947 100644
--- a/net/dsa/Makefile
+++ b/net/dsa/Makefile
@@ -9,6 +9,7 @@ dsa_core-$(CONFIG_NET_DSA_TAG_BRCM) += tag_brcm.o
dsa_core-$(CONFIG_NET_DSA_TAG_BRCM_PREPEND) += tag_brcm.o
dsa_core-$(CONFIG_NET_DSA_TAG_DSA) += tag_dsa.o
dsa_core-$(CONFIG_NET_DSA_TAG_EDSA) += tag_edsa.o
+dsa_core-$(CONFIG_NET_DSA_TAG_GSWIP) += tag_gswip.o
dsa_core-$(CONFIG_NET_DSA_TAG_KSZ) += tag_ksz.o
dsa_core-$(CONFIG_NET_DSA_TAG_LAN9303) += tag_lan9303.o
dsa_core-$(CONFIG_NET_DSA_TAG_MTK) += tag_mtk.o
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 9f3209ff7ffd..a69c1790bbfc 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -52,6 +52,9 @@ const struct dsa_device_ops *dsa_device_ops[DSA_TAG_LAST] = {
#ifdef CONFIG_NET_DSA_TAG_EDSA
[DSA_TAG_PROTO_EDSA] = &edsa_netdev_ops,
#endif
+#ifdef CONFIG_NET_DSA_TAG_GSWIP
+ [DSA_TAG_PROTO_GSWIP] = &gswip_netdev_ops,
+#endif
#ifdef CONFIG_NET_DSA_TAG_KSZ
[DSA_TAG_PROTO_KSZ] = &ksz_netdev_ops,
#endif
@@ -70,6 +73,52 @@ const struct dsa_device_ops *dsa_device_ops[DSA_TAG_LAST] = {
[DSA_TAG_PROTO_NONE] = &none_ops,
};
+const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops)
+{
+ const char *protocol_name[DSA_TAG_LAST] = {
+#ifdef CONFIG_NET_DSA_TAG_BRCM
+ [DSA_TAG_PROTO_BRCM] = "brcm",
+#endif
+#ifdef CONFIG_NET_DSA_TAG_BRCM_PREPEND
+ [DSA_TAG_PROTO_BRCM_PREPEND] = "brcm-prepend",
+#endif
+#ifdef CONFIG_NET_DSA_TAG_DSA
+ [DSA_TAG_PROTO_DSA] = "dsa",
+#endif
+#ifdef CONFIG_NET_DSA_TAG_EDSA
+ [DSA_TAG_PROTO_EDSA] = "edsa",
+#endif
+#ifdef CONFIG_NET_DSA_TAG_GSWIP
+ [DSA_TAG_PROTO_GSWIP] = "gswip",
+#endif
+#ifdef CONFIG_NET_DSA_TAG_KSZ
+ [DSA_TAG_PROTO_KSZ] = "ksz",
+#endif
+#ifdef CONFIG_NET_DSA_TAG_LAN9303
+ [DSA_TAG_PROTO_LAN9303] = "lan9303",
+#endif
+#ifdef CONFIG_NET_DSA_TAG_MTK
+ [DSA_TAG_PROTO_MTK] = "mtk",
+#endif
+#ifdef CONFIG_NET_DSA_TAG_QCA
+ [DSA_TAG_PROTO_QCA] = "qca",
+#endif
+#ifdef CONFIG_NET_DSA_TAG_TRAILER
+ [DSA_TAG_PROTO_TRAILER] = "trailer",
+#endif
+ [DSA_TAG_PROTO_NONE] = "none",
+ };
+ unsigned int i;
+
+ BUILD_BUG_ON(ARRAY_SIZE(protocol_name) != DSA_TAG_LAST);
+
+ for (i = 0; i < ARRAY_SIZE(dsa_device_ops); i++)
+ if (ops == dsa_device_ops[i])
+ return protocol_name[i];
+
+ return protocol_name[DSA_TAG_PROTO_NONE];
+};
+
const struct dsa_device_ops *dsa_resolve_tag_protocol(int tag_protocol)
{
const struct dsa_device_ops *ops;
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index 3964c6f7a7c0..9e4fd04ab53c 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -86,6 +86,7 @@ struct dsa_slave_priv {
/* dsa.c */
const struct dsa_device_ops *dsa_resolve_tag_protocol(int tag_protocol);
bool dsa_schedule_work(struct work_struct *work);
+const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
/* legacy.c */
#if IS_ENABLED(CONFIG_NET_DSA_LEGACY)
@@ -205,6 +206,9 @@ extern const struct dsa_device_ops dsa_netdev_ops;
/* tag_edsa.c */
extern const struct dsa_device_ops edsa_netdev_ops;
+/* tag_gswip.c */
+extern const struct dsa_device_ops gswip_netdev_ops;
+
/* tag_ksz.c */
extern const struct dsa_device_ops ksz_netdev_ops;
diff --git a/net/dsa/legacy.c b/net/dsa/legacy.c
index 42a7b85b84e1..8aa92b09db76 100644
--- a/net/dsa/legacy.c
+++ b/net/dsa/legacy.c
@@ -392,8 +392,7 @@ static void dsa_of_free_platform_data(struct dsa_platform_data *pd)
}
/* Drop our reference to the MDIO bus device */
- if (pd->chip[i].host_dev)
- put_device(pd->chip[i].host_dev);
+ put_device(pd->chip[i].host_dev);
}
kfree(pd->chip);
}
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 1c45c1d6d241..3f840b6eea69 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -1058,6 +1058,27 @@ static struct device_type dsa_type = {
.name = "dsa",
};
+static ssize_t tagging_show(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct net_device *dev = to_net_dev(d);
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+
+ return sprintf(buf, "%s\n",
+ dsa_tag_protocol_to_str(dp->cpu_dp->tag_ops));
+}
+static DEVICE_ATTR_RO(tagging);
+
+static struct attribute *dsa_slave_attrs[] = {
+ &dev_attr_tagging.attr,
+ NULL
+};
+
+static const struct attribute_group dsa_group = {
+ .name = "dsa",
+ .attrs = dsa_slave_attrs,
+};
+
static void dsa_slave_phylink_validate(struct net_device *dev,
unsigned long *supported,
struct phylink_link_state *state)
@@ -1353,8 +1374,14 @@ int dsa_slave_create(struct dsa_port *port)
goto out_phy;
}
+ ret = sysfs_create_group(&slave_dev->dev.kobj, &dsa_group);
+ if (ret)
+ goto out_unreg;
+
return 0;
+out_unreg:
+ unregister_netdev(slave_dev);
out_phy:
rtnl_lock();
phylink_disconnect_phy(p->dp->pl);
@@ -1378,6 +1405,7 @@ void dsa_slave_destroy(struct net_device *slave_dev)
rtnl_unlock();
dsa_slave_notify(slave_dev, DSA_PORT_UNREGISTER);
+ sysfs_remove_group(&slave_dev->dev.kobj, &dsa_group);
unregister_netdev(slave_dev);
phylink_destroy(dp->pl);
free_percpu(p->stats64);
diff --git a/net/dsa/tag_gswip.c b/net/dsa/tag_gswip.c
new file mode 100644
index 000000000000..49e9b73f1be3
--- /dev/null
+++ b/net/dsa/tag_gswip.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel / Lantiq GSWIP V2.0 PMAC tag support
+ *
+ * Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de>
+ */
+
+#include <linux/bitops.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <net/dsa.h>
+
+#include "dsa_priv.h"
+
+#define GSWIP_TX_HEADER_LEN 4
+
+/* special tag in TX path header */
+/* Byte 0 */
+#define GSWIP_TX_SLPID_SHIFT 0 /* source port ID */
+#define GSWIP_TX_SLPID_CPU 2
+#define GSWIP_TX_SLPID_APP1 3
+#define GSWIP_TX_SLPID_APP2 4
+#define GSWIP_TX_SLPID_APP3 5
+#define GSWIP_TX_SLPID_APP4 6
+#define GSWIP_TX_SLPID_APP5 7
+
+/* Byte 1 */
+#define GSWIP_TX_CRCGEN_DIS BIT(7)
+#define GSWIP_TX_DPID_SHIFT 0 /* destination group ID */
+#define GSWIP_TX_DPID_ELAN 0
+#define GSWIP_TX_DPID_EWAN 1
+#define GSWIP_TX_DPID_CPU 2
+#define GSWIP_TX_DPID_APP1 3
+#define GSWIP_TX_DPID_APP2 4
+#define GSWIP_TX_DPID_APP3 5
+#define GSWIP_TX_DPID_APP4 6
+#define GSWIP_TX_DPID_APP5 7
+
+/* Byte 2 */
+#define GSWIP_TX_PORT_MAP_EN BIT(7)
+#define GSWIP_TX_PORT_MAP_SEL BIT(6)
+#define GSWIP_TX_LRN_DIS BIT(5)
+#define GSWIP_TX_CLASS_EN BIT(4)
+#define GSWIP_TX_CLASS_SHIFT 0
+#define GSWIP_TX_CLASS_MASK GENMASK(3, 0)
+
+/* Byte 3 */
+#define GSWIP_TX_DPID_EN BIT(0)
+#define GSWIP_TX_PORT_MAP_SHIFT 1
+#define GSWIP_TX_PORT_MAP_MASK GENMASK(6, 1)
+
+#define GSWIP_RX_HEADER_LEN 8
+
+/* special tag in RX path header */
+/* Byte 7 */
+#define GSWIP_RX_SPPID_SHIFT 4
+#define GSWIP_RX_SPPID_MASK GENMASK(6, 4)
+
+static struct sk_buff *gswip_tag_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ int err;
+ u8 *gswip_tag;
+
+ err = skb_cow_head(skb, GSWIP_TX_HEADER_LEN);
+ if (err)
+ return NULL;
+
+ skb_push(skb, GSWIP_TX_HEADER_LEN);
+
+ gswip_tag = skb->data;
+ gswip_tag[0] = GSWIP_TX_SLPID_CPU;
+ gswip_tag[1] = GSWIP_TX_DPID_ELAN;
+ gswip_tag[2] = GSWIP_TX_PORT_MAP_EN | GSWIP_TX_PORT_MAP_SEL;
+ gswip_tag[3] = BIT(dp->index + GSWIP_TX_PORT_MAP_SHIFT) & GSWIP_TX_PORT_MAP_MASK;
+ gswip_tag[3] |= GSWIP_TX_DPID_EN;
+
+ return skb;
+}
+
+static struct sk_buff *gswip_tag_rcv(struct sk_buff *skb,
+ struct net_device *dev,
+ struct packet_type *pt)
+{
+ int port;
+ u8 *gswip_tag;
+
+ if (unlikely(!pskb_may_pull(skb, GSWIP_RX_HEADER_LEN)))
+ return NULL;
+
+ gswip_tag = skb->data - ETH_HLEN;
+
+ /* Get source port information */
+ port = (gswip_tag[7] & GSWIP_RX_SPPID_MASK) >> GSWIP_RX_SPPID_SHIFT;
+ skb->dev = dsa_master_find_slave(dev, 0, port);
+ if (!skb->dev)
+ return NULL;
+
+ /* remove GSWIP tag */
+ skb_pull_rcsum(skb, GSWIP_RX_HEADER_LEN);
+
+ return skb;
+}
+
+const struct dsa_device_ops gswip_netdev_ops = {
+ .xmit = gswip_tag_xmit,
+ .rcv = gswip_tag_rcv,
+};
diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
index e7857a8ac86d..09ffbf5ce8fa 100644
--- a/net/ieee802154/6lowpan/reassembly.c
+++ b/net/ieee802154/6lowpan/reassembly.c
@@ -260,7 +260,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
}
sub_frag_mem_limit(fq->q.net, sum_truesize);
- head->next = NULL;
+ skb_mark_not_on_list(head);
head->dev = ldev;
head->tstamp = fq->q.stamp;
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 82178cc69c96..777fa3b7fb13 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -1512,7 +1512,7 @@ static int cipso_v4_parsetag_loc(const struct cipso_v4_doi *doi_def,
*
* Description:
* Parse the packet's IP header looking for a CIPSO option. Returns a pointer
- * to the start of the CIPSO option on success, NULL if one if not found.
+ * to the start of the CIPSO option on success, NULL if one is not found.
*
*/
unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
@@ -1522,10 +1522,8 @@ unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
int optlen;
int taglen;
- for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 0; ) {
+ for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 1; ) {
switch (optptr[0]) {
- case IPOPT_CIPSO:
- return optptr;
case IPOPT_END:
return NULL;
case IPOPT_NOOP:
@@ -1534,6 +1532,11 @@ unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
default:
taglen = optptr[1];
}
+ if (!taglen || taglen > optlen)
+ return NULL;
+ if (optptr[0] == IPOPT_CIPSO)
+ return optptr;
+
optlen -= taglen;
optptr += taglen;
}
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index ea4bd8a52422..44d931a3cd50 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -100,6 +100,15 @@ static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
[IFA_CACHEINFO] = { .len = sizeof(struct ifa_cacheinfo) },
[IFA_FLAGS] = { .type = NLA_U32 },
[IFA_RT_PRIORITY] = { .type = NLA_U32 },
+ [IFA_TARGET_NETNSID] = { .type = NLA_S32 },
+};
+
+struct inet_fill_args {
+ u32 portid;
+ u32 seq;
+ int event;
+ unsigned int flags;
+ int netnsid;
};
#define IN4_ADDR_HSIZE_SHIFT 8
@@ -1584,13 +1593,14 @@ static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
}
static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
- u32 portid, u32 seq, int event, unsigned int flags)
+ struct inet_fill_args *args)
{
struct ifaddrmsg *ifm;
struct nlmsghdr *nlh;
u32 preferred, valid;
- nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), flags);
+ nlh = nlmsg_put(skb, args->portid, args->seq, args->event, sizeof(*ifm),
+ args->flags);
if (!nlh)
return -EMSGSIZE;
@@ -1601,6 +1611,10 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
ifm->ifa_scope = ifa->ifa_scope;
ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
+ if (args->netnsid >= 0 &&
+ nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
+ goto nla_put_failure;
+
if (!(ifm->ifa_flags & IFA_F_PERMANENT)) {
preferred = ifa->ifa_preferred_lft;
valid = ifa->ifa_valid_lft;
@@ -1647,7 +1661,16 @@ nla_put_failure:
static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
{
+ struct inet_fill_args fillargs = {
+ .portid = NETLINK_CB(cb->skb).portid,
+ .seq = cb->nlh->nlmsg_seq,
+ .event = RTM_NEWADDR,
+ .flags = NLM_F_MULTI,
+ .netnsid = -1,
+ };
struct net *net = sock_net(skb->sk);
+ struct nlattr *tb[IFA_MAX+1];
+ struct net *tgt_net = net;
int h, s_h;
int idx, s_idx;
int ip_idx, s_ip_idx;
@@ -1660,12 +1683,24 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
s_idx = idx = cb->args[1];
s_ip_idx = ip_idx = cb->args[2];
+ if (nlmsg_parse(cb->nlh, sizeof(struct ifaddrmsg), tb, IFA_MAX,
+ ifa_ipv4_policy, NULL) >= 0) {
+ if (tb[IFA_TARGET_NETNSID]) {
+ fillargs.netnsid = nla_get_s32(tb[IFA_TARGET_NETNSID]);
+
+ tgt_net = rtnl_get_net_ns_capable(skb->sk,
+ fillargs.netnsid);
+ if (IS_ERR(tgt_net))
+ return PTR_ERR(tgt_net);
+ }
+ }
+
for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
idx = 0;
- head = &net->dev_index_head[h];
+ head = &tgt_net->dev_index_head[h];
rcu_read_lock();
- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
- net->dev_base_seq;
+ cb->seq = atomic_read(&tgt_net->ipv4.dev_addr_genid) ^
+ tgt_net->dev_base_seq;
hlist_for_each_entry_rcu(dev, head, index_hlist) {
if (idx < s_idx)
goto cont;
@@ -1679,10 +1714,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
ifa = ifa->ifa_next, ip_idx++) {
if (ip_idx < s_ip_idx)
continue;
- if (inet_fill_ifaddr(skb, ifa,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- RTM_NEWADDR, NLM_F_MULTI) < 0) {
+ if (inet_fill_ifaddr(skb, ifa, &fillargs) < 0) {
rcu_read_unlock();
goto done;
}
@@ -1698,6 +1730,8 @@ done:
cb->args[0] = h;
cb->args[1] = idx;
cb->args[2] = ip_idx;
+ if (fillargs.netnsid >= 0)
+ put_net(tgt_net);
return skb->len;
}
@@ -1705,8 +1739,14 @@ done:
static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
u32 portid)
{
+ struct inet_fill_args fillargs = {
+ .portid = portid,
+ .seq = nlh ? nlh->nlmsg_seq : 0,
+ .event = event,
+ .flags = 0,
+ .netnsid = -1,
+ };
struct sk_buff *skb;
- u32 seq = nlh ? nlh->nlmsg_seq : 0;
int err = -ENOBUFS;
struct net *net;
@@ -1715,7 +1755,7 @@ static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
if (!skb)
goto errout;
- err = inet_fill_ifaddr(skb, ifa, portid, seq, event, 0);
+ err = inet_fill_ifaddr(skb, ifa, &fillargs);
if (err < 0) {
/* -EMSGSIZE implies BUG in inet_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index f3c89ccf14c5..bee8db979195 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -797,8 +797,10 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_nh *nh,
return -EINVAL;
}
dev = __dev_get_by_index(net, nh->nh_oif);
- if (!dev)
+ if (!dev) {
+ NL_SET_ERR_MSG(extack, "Nexthop device required for onlink");
return -ENODEV;
+ }
if (!(dev->flags & IFF_UP)) {
NL_SET_ERR_MSG(extack,
"Nexthop device is not up");
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index b798862b6be5..7efe740c06eb 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -86,13 +86,14 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
options = (__be32 *)(greh + 1);
if (greh->flags & GRE_CSUM) {
- if (skb_checksum_simple_validate(skb)) {
+ if (!skb_checksum_simple_validate(skb)) {
+ skb_checksum_try_convert(skb, IPPROTO_GRE, 0,
+ null_compute_pseudo);
+ } else if (csum_err) {
*csum_err = true;
return -EINVAL;
}
- skb_checksum_try_convert(skb, IPPROTO_GRE, 0,
- null_compute_pseudo);
options++;
}
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index e7227128df2c..da930b01a147 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -382,7 +382,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
*/
if (end < qp->q.len ||
((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len))
- goto err;
+ goto discard_qp;
qp->q.flags |= INET_FRAG_LAST_IN;
qp->q.len = end;
} else {
@@ -394,20 +394,20 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
if (end > qp->q.len) {
/* Some bits beyond end -> corruption. */
if (qp->q.flags & INET_FRAG_LAST_IN)
- goto err;
+ goto discard_qp;
qp->q.len = end;
}
}
if (end == offset)
- goto err;
+ goto discard_qp;
err = -ENOMEM;
if (!pskb_pull(skb, skb_network_offset(skb) + ihl))
- goto err;
+ goto discard_qp;
err = pskb_trim_rcsum(skb, end - offset);
if (err)
- goto err;
+ goto discard_qp;
/* Note : skb->rbnode and skb->dev share the same location. */
dev = skb->dev;
@@ -423,6 +423,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
* We do the same here for IPv4 (and increment an snmp counter).
*/
+ err = -EINVAL;
/* Find out where to put this fragment. */
prev_tail = qp->q.fragments_tail;
if (!prev_tail)
@@ -431,7 +432,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
/* This is the common case: skb goes to the end. */
/* Detect and discard overlaps. */
if (offset < prev_tail->ip_defrag_offset + prev_tail->len)
- goto discard_qp;
+ goto overlap;
if (offset == prev_tail->ip_defrag_offset + prev_tail->len)
ip4_frag_append_to_last_run(&qp->q, skb);
else
@@ -450,7 +451,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
FRAG_CB(skb1)->frag_run_len)
rbn = &parent->rb_right;
else /* Found an overlap with skb1. */
- goto discard_qp;
+ goto overlap;
} while (*rbn);
/* Here we have parent properly set, and rbn pointing to
* one of its NULL left/right children. Insert skb.
@@ -487,16 +488,18 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
skb->_skb_refdst = 0UL;
err = ip_frag_reasm(qp, skb, prev_tail, dev);
skb->_skb_refdst = orefdst;
+ if (err)
+ inet_frag_kill(&qp->q);
return err;
}
skb_dst_drop(skb);
return -EINPROGRESS;
+overlap:
+ __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
discard_qp:
inet_frag_kill(&qp->q);
- err = -EINVAL;
- __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
err:
kfree_skb(skb);
return err;
@@ -621,7 +624,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
sub_frag_mem_limit(qp->q.net, head->truesize);
*nextp = NULL;
- head->next = NULL;
+ skb_mark_not_on_list(head);
head->prev = NULL;
head->dev = dev;
head->tstamp = qp->q.stamp;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 8cce0e9ea08c..c3385a84f8ff 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -232,13 +232,10 @@ static void gre_err(struct sk_buff *skb, u32 info)
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
struct tnl_ptk_info tpi;
- bool csum_err = false;
- if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP),
- iph->ihl * 4) < 0) {
- if (!csum_err) /* ignore csum errors. */
- return;
- }
+ if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IP),
+ iph->ihl * 4) < 0)
+ return;
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
ipv4_update_pmtu(skb, dev_net(skb->dev), info,
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 3196cf58f418..35a786c0aaa0 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -531,11 +531,7 @@ static void ip_sublist_rcv_finish(struct list_head *head)
struct sk_buff *skb, *next;
list_for_each_entry_safe(skb, next, head, list) {
- list_del(&skb->list);
- /* Handle ip{6}_forward case, as sch_direct_xmit have
- * another kind of SKB-list usage (see validate_xmit_skb_list)
- */
- skb->next = NULL;
+ skb_list_del_init(skb);
dst_input(skb);
}
}
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 9c4e72e9c60a..c09219e7f230 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -278,7 +278,7 @@ static int ip_finish_output_gso(struct net *net, struct sock *sk,
struct sk_buff *nskb = segs->next;
int err;
- segs->next = NULL;
+ skb_mark_not_on_list(segs);
err = ip_fragment(net, sk, segs, mtu, ip_finish_output2);
if (err && ret == 0)
@@ -684,7 +684,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
skb = frag;
frag = skb->next;
- skb->next = NULL;
+ skb_mark_not_on_list(skb);
}
if (err == 0) {
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 10c6246396cc..67670fac7c8d 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2595,6 +2595,8 @@ int tcp_disconnect(struct sock *sk, int flags)
tp->compressed_ack = 0;
tp->bytes_sent = 0;
tp->bytes_retrans = 0;
+ tp->duplicate_sack[0].start_seq = 0;
+ tp->duplicate_sack[0].end_seq = 0;
tp->dsack_dups = 0;
tp->reord_seen = 0;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 4cf2f7bb2802..d9034073138c 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4199,6 +4199,17 @@ static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq)
tcp_sack_extend(tp->duplicate_sack, seq, end_seq);
}
+static void tcp_rcv_spurious_retrans(struct sock *sk, const struct sk_buff *skb)
+{
+ /* When the ACK path fails or drops most ACKs, the sender would
+ * timeout and spuriously retransmit the same segment repeatedly.
+ * The receiver remembers and reflects via DSACKs. Leverage the
+ * DSACK state and change the txhash to re-route speculatively.
+ */
+ if (TCP_SKB_CB(skb)->seq == tcp_sk(sk)->duplicate_sack[0].start_seq)
+ sk_rethink_txhash(sk);
+}
+
static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -4211,6 +4222,7 @@ static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) {
u32 end_seq = TCP_SKB_CB(skb)->end_seq;
+ tcp_rcv_spurious_retrans(sk, skb);
if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))
end_seq = tp->rcv_nxt;
tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, end_seq);
@@ -4755,6 +4767,7 @@ queue_and_out:
}
if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
+ tcp_rcv_spurious_retrans(sk, skb);
/* A retransmit, 2nd most common case. Force an immediate ack. */
NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 44c09eddbb78..09547ef9c4c6 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2549,7 +2549,7 @@ static int __net_init tcp_sk_init(struct net *net)
net->ipv4.sysctl_tcp_tw_reuse = 2;
cnt = tcp_hashinfo.ehash_mask + 1;
- net->ipv4.tcp_death_row.sysctl_max_tw_buckets = (cnt + 1) / 2;
+ net->ipv4.tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index d51a8c0b3372..d4733160e6b7 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -4491,6 +4491,7 @@ static const struct nla_policy ifa_ipv6_policy[IFA_MAX+1] = {
[IFA_CACHEINFO] = { .len = sizeof(struct ifa_cacheinfo) },
[IFA_FLAGS] = { .len = sizeof(u32) },
[IFA_RT_PRIORITY] = { .len = sizeof(u32) },
+ [IFA_TARGET_NETNSID] = { .type = NLA_S32 },
};
static int
@@ -4793,19 +4794,32 @@ static inline int inet6_ifaddr_msgsize(void)
+ nla_total_size(4) /* IFA_RT_PRIORITY */;
}
+struct inet6_fill_args {
+ u32 portid;
+ u32 seq;
+ int event;
+ unsigned int flags;
+ int netnsid;
+};
+
static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
- u32 portid, u32 seq, int event, unsigned int flags)
+ struct inet6_fill_args *args)
{
struct nlmsghdr *nlh;
u32 preferred, valid;
- nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
+ nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
+ sizeof(struct ifaddrmsg), args->flags);
if (!nlh)
return -EMSGSIZE;
put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
ifa->idev->dev->ifindex);
+ if (args->netnsid >= 0 &&
+ nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
+ goto error;
+
if (!((ifa->flags&IFA_F_PERMANENT) &&
(ifa->prefered_lft == INFINITY_LIFE_TIME))) {
preferred = ifa->prefered_lft;
@@ -4855,7 +4869,7 @@ error:
}
static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
- u32 portid, u32 seq, int event, u16 flags)
+ struct inet6_fill_args *args)
{
struct nlmsghdr *nlh;
u8 scope = RT_SCOPE_UNIVERSE;
@@ -4864,10 +4878,15 @@ static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
if (ipv6_addr_scope(&ifmca->mca_addr) & IFA_SITE)
scope = RT_SCOPE_SITE;
- nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
+ nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
+ sizeof(struct ifaddrmsg), args->flags);
if (!nlh)
return -EMSGSIZE;
+ if (args->netnsid >= 0 &&
+ nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
+ return -EMSGSIZE;
+
put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
if (nla_put_in6_addr(skb, IFA_MULTICAST, &ifmca->mca_addr) < 0 ||
put_cacheinfo(skb, ifmca->mca_cstamp, ifmca->mca_tstamp,
@@ -4881,7 +4900,7 @@ static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
}
static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
- u32 portid, u32 seq, int event, unsigned int flags)
+ struct inet6_fill_args *args)
{
struct net_device *dev = fib6_info_nh_dev(ifaca->aca_rt);
int ifindex = dev ? dev->ifindex : 1;
@@ -4891,10 +4910,15 @@ static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
if (ipv6_addr_scope(&ifaca->aca_addr) & IFA_SITE)
scope = RT_SCOPE_SITE;
- nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
+ nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
+ sizeof(struct ifaddrmsg), args->flags);
if (!nlh)
return -EMSGSIZE;
+ if (args->netnsid >= 0 &&
+ nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
+ return -EMSGSIZE;
+
put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
if (nla_put_in6_addr(skb, IFA_ANYCAST, &ifaca->aca_addr) < 0 ||
put_cacheinfo(skb, ifaca->aca_cstamp, ifaca->aca_tstamp,
@@ -4916,8 +4940,14 @@ enum addr_type_t {
/* called with rcu_read_lock() */
static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
struct netlink_callback *cb, enum addr_type_t type,
- int s_ip_idx, int *p_ip_idx)
+ int s_ip_idx, int *p_ip_idx, int netnsid)
{
+ struct inet6_fill_args fillargs = {
+ .portid = NETLINK_CB(cb->skb).portid,
+ .seq = cb->nlh->nlmsg_seq,
+ .flags = NLM_F_MULTI,
+ .netnsid = netnsid,
+ };
struct ifmcaddr6 *ifmca;
struct ifacaddr6 *ifaca;
int err = 1;
@@ -4927,16 +4957,13 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
switch (type) {
case UNICAST_ADDR: {
struct inet6_ifaddr *ifa;
+ fillargs.event = RTM_NEWADDR;
/* unicast address incl. temp addr */
list_for_each_entry(ifa, &idev->addr_list, if_list) {
if (++ip_idx < s_ip_idx)
continue;
- err = inet6_fill_ifaddr(skb, ifa,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- RTM_NEWADDR,
- NLM_F_MULTI);
+ err = inet6_fill_ifaddr(skb, ifa, &fillargs);
if (err < 0)
break;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
@@ -4944,31 +4971,26 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
break;
}
case MULTICAST_ADDR:
+ fillargs.event = RTM_GETMULTICAST;
+
/* multicast address */
for (ifmca = idev->mc_list; ifmca;
ifmca = ifmca->next, ip_idx++) {
if (ip_idx < s_ip_idx)
continue;
- err = inet6_fill_ifmcaddr(skb, ifmca,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- RTM_GETMULTICAST,
- NLM_F_MULTI);
+ err = inet6_fill_ifmcaddr(skb, ifmca, &fillargs);
if (err < 0)
break;
}
break;
case ANYCAST_ADDR:
+ fillargs.event = RTM_GETANYCAST;
/* anycast address */
for (ifaca = idev->ac_list; ifaca;
ifaca = ifaca->aca_next, ip_idx++) {
if (ip_idx < s_ip_idx)
continue;
- err = inet6_fill_ifacaddr(skb, ifaca,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- RTM_GETANYCAST,
- NLM_F_MULTI);
+ err = inet6_fill_ifacaddr(skb, ifaca, &fillargs);
if (err < 0)
break;
}
@@ -4985,6 +5007,9 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
enum addr_type_t type)
{
struct net *net = sock_net(skb->sk);
+ struct nlattr *tb[IFA_MAX+1];
+ struct net *tgt_net = net;
+ int netnsid = -1;
int h, s_h;
int idx, ip_idx;
int s_idx, s_ip_idx;
@@ -4996,11 +5021,22 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
s_idx = idx = cb->args[1];
s_ip_idx = ip_idx = cb->args[2];
+ if (nlmsg_parse(cb->nlh, sizeof(struct ifaddrmsg), tb, IFA_MAX,
+ ifa_ipv6_policy, NULL) >= 0) {
+ if (tb[IFA_TARGET_NETNSID]) {
+ netnsid = nla_get_s32(tb[IFA_TARGET_NETNSID]);
+
+ tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid);
+ if (IS_ERR(tgt_net))
+ return PTR_ERR(tgt_net);
+ }
+ }
+
rcu_read_lock();
- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
+ cb->seq = atomic_read(&tgt_net->ipv6.dev_addr_genid) ^ tgt_net->dev_base_seq;
for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
idx = 0;
- head = &net->dev_index_head[h];
+ head = &tgt_net->dev_index_head[h];
hlist_for_each_entry_rcu(dev, head, index_hlist) {
if (idx < s_idx)
goto cont;
@@ -5012,7 +5048,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
goto cont;
if (in6_dump_addrs(idev, skb, cb, type,
- s_ip_idx, &ip_idx) < 0)
+ s_ip_idx, &ip_idx, netnsid) < 0)
goto done;
cont:
idx++;
@@ -5023,6 +5059,8 @@ done:
cb->args[0] = h;
cb->args[1] = idx;
cb->args[2] = ip_idx;
+ if (netnsid >= 0)
+ put_net(tgt_net);
return skb->len;
}
@@ -5053,6 +5091,14 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(in_skb->sk);
+ struct inet6_fill_args fillargs = {
+ .portid = NETLINK_CB(in_skb).portid,
+ .seq = nlh->nlmsg_seq,
+ .event = RTM_NEWADDR,
+ .flags = 0,
+ .netnsid = -1,
+ };
+ struct net *tgt_net = net;
struct ifaddrmsg *ifm;
struct nlattr *tb[IFA_MAX+1];
struct in6_addr *addr = NULL, *peer;
@@ -5066,15 +5112,24 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
if (err < 0)
return err;
+ if (tb[IFA_TARGET_NETNSID]) {
+ fillargs.netnsid = nla_get_s32(tb[IFA_TARGET_NETNSID]);
+
+ tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(in_skb).sk,
+ fillargs.netnsid);
+ if (IS_ERR(tgt_net))
+ return PTR_ERR(tgt_net);
+ }
+
addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer);
if (!addr)
return -EINVAL;
ifm = nlmsg_data(nlh);
if (ifm->ifa_index)
- dev = dev_get_by_index(net, ifm->ifa_index);
+ dev = dev_get_by_index(tgt_net, ifm->ifa_index);
- ifa = ipv6_get_ifaddr(net, addr, dev, 1);
+ ifa = ipv6_get_ifaddr(tgt_net, addr, dev, 1);
if (!ifa) {
err = -EADDRNOTAVAIL;
goto errout;
@@ -5086,20 +5141,22 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
goto errout_ifa;
}
- err = inet6_fill_ifaddr(skb, ifa, NETLINK_CB(in_skb).portid,
- nlh->nlmsg_seq, RTM_NEWADDR, 0);
+ err = inet6_fill_ifaddr(skb, ifa, &fillargs);
if (err < 0) {
/* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
WARN_ON(err == -EMSGSIZE);
kfree_skb(skb);
goto errout_ifa;
}
- err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
+ err = rtnl_unicast(skb, tgt_net, NETLINK_CB(in_skb).portid);
errout_ifa:
in6_ifa_put(ifa);
errout:
if (dev)
dev_put(dev);
+ if (fillargs.netnsid >= 0)
+ put_net(tgt_net);
+
return err;
}
@@ -5107,13 +5164,20 @@ static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
{
struct sk_buff *skb;
struct net *net = dev_net(ifa->idev->dev);
+ struct inet6_fill_args fillargs = {
+ .portid = 0,
+ .seq = 0,
+ .event = event,
+ .flags = 0,
+ .netnsid = -1,
+ };
int err = -ENOBUFS;
skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC);
if (!skb)
goto errout;
- err = inet6_fill_ifaddr(skb, ifa, 0, 0, event, 0);
+ err = inet6_fill_ifaddr(skb, ifa, &fillargs);
if (err < 0) {
/* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
WARN_ON(err == -EMSGSIZE);
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 9a4261e50272..77ef8478234f 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -209,6 +209,7 @@ lookup_protocol:
np->hop_limit = -1;
np->mcast_hops = IPV6_DEFAULT_MCASTHOPS;
np->mc_loop = 1;
+ np->mc_all = 1;
np->pmtudisc = IPV6_PMTUDISC_WANT;
np->repflow = net->ipv6.sysctl.flowlabel_reflect;
sk->sk_ipv6only = net->ipv6.sysctl.bindv6only;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index e493b041d4ac..515adbdba1d2 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -427,35 +427,17 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
struct net *net = dev_net(skb->dev);
- const struct gre_base_hdr *greh;
const struct ipv6hdr *ipv6h;
- int grehlen = sizeof(*greh);
+ struct tnl_ptk_info tpi;
struct ip6_tnl *t;
- int key_off = 0;
- __be16 flags;
- __be32 key;
- if (!pskb_may_pull(skb, offset + grehlen))
- return;
- greh = (const struct gre_base_hdr *)(skb->data + offset);
- flags = greh->flags;
- if (flags & (GRE_VERSION | GRE_ROUTING))
+ if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IPV6),
+ offset) < 0)
return;
- if (flags & GRE_CSUM)
- grehlen += 4;
- if (flags & GRE_KEY) {
- key_off = grehlen + offset;
- grehlen += 4;
- }
- if (!pskb_may_pull(skb, offset + grehlen))
- return;
ipv6h = (const struct ipv6hdr *)skb->data;
- greh = (const struct gre_base_hdr *)(skb->data + offset);
- key = key_off ? *(__be32 *)(skb->data + key_off) : 0;
-
t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr,
- key, greh->protocol);
+ tpi.key, tpi.proto);
if (!t)
return;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index f9f8f554d141..89e0d5118afe 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -725,7 +725,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
skb = frag;
frag = skb->next;
- skb->next = NULL;
+ skb_mark_not_on_list(skb);
}
kfree(tmp_hdr);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index c0cac9cc3a28..381ce38940ae 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -674,6 +674,13 @@ done:
retv = ipv6_sock_ac_drop(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_acaddr);
break;
}
+ case IPV6_MULTICAST_ALL:
+ if (optlen < sizeof(int))
+ goto e_inval;
+ np->mc_all = valbool;
+ retv = 0;
+ break;
+
case MCAST_JOIN_GROUP:
case MCAST_LEAVE_GROUP:
{
@@ -1266,6 +1273,10 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
val = np->mcast_oif;
break;
+ case IPV6_MULTICAST_ALL:
+ val = np->mc_all;
+ break;
+
case IPV6_UNICAST_IF:
val = (__force int)htonl((__u32) np->ucast_oif);
break;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 4ae54aaca373..6895e1dc0b03 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -636,7 +636,7 @@ bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
}
if (!mc) {
rcu_read_unlock();
- return true;
+ return np->mc_all;
}
read_lock(&mc->sflock);
psl = mc->sflist;
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 8f68a518d9db..b8ac369f98ad 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -450,7 +450,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic
sub_frag_mem_limit(fq->q.net, head->truesize);
head->ignore_df = 1;
- head->next = NULL;
+ skb_mark_not_on_list(head);
head->dev = dev;
head->tstamp = fq->q.stamp;
ipv6_hdr(head)->payload_len = htons(payload_len);
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 5c5b4f79296e..f1b1ff30fe5b 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -388,7 +388,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
}
sub_frag_mem_limit(fq->q.net, sum_truesize);
- head->next = NULL;
+ skb_mark_not_on_list(head);
head->dev = dev;
head->tstamp = fq->q.stamp;
ipv6_hdr(head)->payload_len = htons(payload_len);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 480a79f47c52..21a919e74fbf 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -993,7 +993,6 @@ static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort)
#ifdef CONFIG_IPV6_SUBTREES
rt->rt6i_src = ort->fib6_src;
#endif
- rt->rt6i_prefsrc = ort->fib6_prefsrc;
}
static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
@@ -1447,11 +1446,6 @@ static int rt6_insert_exception(struct rt6_info *nrt,
if (ort->fib6_src.plen)
src_key = &nrt->rt6i_src.addr;
#endif
-
- /* Update rt6i_prefsrc as it could be changed
- * in rt6_remove_prefsrc()
- */
- nrt->rt6i_prefsrc = ort->fib6_prefsrc;
/* rt6_mtu_change() might lower mtu on ort.
* Only insert this exception route if its mtu
* is less than ort's mtu value.
@@ -1633,25 +1627,6 @@ static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
rcu_read_unlock();
}
-static void rt6_exceptions_remove_prefsrc(struct fib6_info *rt)
-{
- struct rt6_exception_bucket *bucket;
- struct rt6_exception *rt6_ex;
- int i;
-
- bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
- lockdep_is_held(&rt6_exception_lock));
-
- if (bucket) {
- for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
- hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
- rt6_ex->rt6i->rt6i_prefsrc.plen = 0;
- }
- bucket++;
- }
- }
-}
-
static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev,
struct rt6_info *rt, int mtu)
{
@@ -3135,8 +3110,6 @@ install_route:
rt->fib6_nh.nh_dev = dev;
rt->fib6_table = table;
- cfg->fc_nlinfo.nl_net = dev_net(dev);
-
if (idev)
in6_dev_put(idev);
@@ -3793,8 +3766,6 @@ static int fib6_remove_prefsrc(struct fib6_info *rt, void *arg)
spin_lock_bh(&rt6_exception_lock);
/* remove prefsrc entry */
rt->fib6_prefsrc.plen = 0;
- /* need to update cache as well */
- rt6_exceptions_remove_prefsrc(rt);
spin_unlock_bh(&rt6_exception_lock);
}
return 0;
diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c
index 260b3dc1b4a2..64d4bef04e73 100644
--- a/net/llc/llc_core.c
+++ b/net/llc/llc_core.c
@@ -127,9 +127,7 @@ void llc_sap_close(struct llc_sap *sap)
list_del_rcu(&sap->node);
spin_unlock_bh(&llc_sap_list_lock);
- synchronize_rcu();
-
- kfree(sap);
+ kfree_rcu(sap, rcu);
}
static struct packet_type llc_packet_type __read_mostly = {
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index d25da0e66da1..504627e2117f 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -158,12 +158,10 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
if (ret)
return ret;
- if (type == NL80211_IFTYPE_AP_VLAN &&
- params && params->use_4addr == 0) {
+ if (type == NL80211_IFTYPE_AP_VLAN && params->use_4addr == 0) {
RCU_INIT_POINTER(sdata->u.vlan.sta, NULL);
ieee80211_check_fast_rx_iface(sdata);
- } else if (type == NL80211_IFTYPE_STATION &&
- params && params->use_4addr >= 0) {
+ } else if (type == NL80211_IFTYPE_STATION && params->use_4addr >= 0) {
sdata->u.mgd.use_4addr = params->use_4addr;
}
@@ -911,6 +909,9 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
sdata->vif.bss_conf.beacon_int = params->beacon_interval;
+ if (params->he_cap)
+ sdata->vif.bss_conf.he_support = true;
+
mutex_lock(&local->mtx);
err = ieee80211_vif_use_channel(sdata, &params->chandef,
IEEE80211_CHANCTX_SHARED);
@@ -1092,50 +1093,6 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
return 0;
}
-/* Layer 2 Update frame (802.2 Type 1 LLC XID Update response) */
-struct iapp_layer2_update {
- u8 da[ETH_ALEN]; /* broadcast */
- u8 sa[ETH_ALEN]; /* STA addr */
- __be16 len; /* 6 */
- u8 dsap; /* 0 */
- u8 ssap; /* 0 */
- u8 control;
- u8 xid_info[3];
-} __packed;
-
-static void ieee80211_send_layer2_update(struct sta_info *sta)
-{
- struct iapp_layer2_update *msg;
- struct sk_buff *skb;
-
- /* Send Level 2 Update Frame to update forwarding tables in layer 2
- * bridge devices */
-
- skb = dev_alloc_skb(sizeof(*msg));
- if (!skb)
- return;
- msg = skb_put(skb, sizeof(*msg));
-
- /* 802.2 Type 1 Logical Link Control (LLC) Exchange Identifier (XID)
- * Update response frame; IEEE Std 802.2-1998, 5.4.1.2.1 */
-
- eth_broadcast_addr(msg->da);
- memcpy(msg->sa, sta->sta.addr, ETH_ALEN);
- msg->len = htons(6);
- msg->dsap = 0;
- msg->ssap = 0x01; /* NULL LSAP, CR Bit: Response */
- msg->control = 0xaf; /* XID response lsb.1111F101.
- * F=0 (no poll command; unsolicited frame) */
- msg->xid_info[0] = 0x81; /* XID format identifier */
- msg->xid_info[1] = 1; /* LLC types/classes: Type 1 LLC */
- msg->xid_info[2] = 0; /* XID sender's receive window size (RW) */
-
- skb->dev = sta->sdata->dev;
- skb->protocol = eth_type_trans(skb, sta->sdata->dev);
- memset(skb->cb, 0, sizeof(skb->cb));
- netif_rx_ni(skb);
-}
-
static int sta_apply_auth_flags(struct ieee80211_local *local,
struct sta_info *sta,
u32 mask, u32 set)
@@ -1499,7 +1456,7 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
}
if (layer2_update)
- ieee80211_send_layer2_update(sta);
+ cfg80211_send_layer2_update(sta->sdata->dev, sta->sta.addr);
rcu_read_unlock();
@@ -1601,7 +1558,7 @@ static int ieee80211_change_station(struct wiphy *wiphy,
if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
ieee80211_vif_inc_num_mcast(sta->sdata);
- ieee80211_send_layer2_update(sta);
+ cfg80211_send_layer2_update(sta->sdata->dev, sta->sta.addr);
}
err = sta_apply_parameters(local, sta, params);
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index b5adf3625d16..3fe541e358f3 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -3,6 +3,7 @@
*
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* GPLv2
*
@@ -214,6 +215,9 @@ static const char *hw_flag_names[] = {
FLAG(SUPPORTS_TDLS_BUFFER_STA),
FLAG(DEAUTH_NEED_MGD_TX_PREP),
FLAG(DOESNT_SUPPORT_QOS_NDP),
+ FLAG(BUFF_MMPDU_TXQ),
+ FLAG(SUPPORTS_VHT_EXT_NSS_BW),
+ FLAG(STA_MMPDU_TXQ),
#undef FLAG
};
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 4105081dc1df..af5185a836e5 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -4,6 +4,7 @@
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -140,7 +141,7 @@ static ssize_t sta_aqm_read(struct file *file, char __user *userbuf,
{
struct sta_info *sta = file->private_data;
struct ieee80211_local *local = sta->local;
- size_t bufsz = AQM_TXQ_ENTRY_LEN*(IEEE80211_NUM_TIDS+1);
+ size_t bufsz = AQM_TXQ_ENTRY_LEN * (IEEE80211_NUM_TIDS + 2);
char *buf = kzalloc(bufsz, GFP_KERNEL), *p = buf;
struct txq_info *txqi;
ssize_t rv;
@@ -162,7 +163,9 @@ static ssize_t sta_aqm_read(struct file *file, char __user *userbuf,
bufsz+buf-p,
"tid ac backlog-bytes backlog-packets new-flows drops marks overlimit collisions tx-bytes tx-packets flags\n");
- for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
+ for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) {
+ if (!sta->sta.txq[i])
+ continue;
txqi = to_txq_info(sta->sta.txq[i]);
p += scnprintf(p, bufsz+buf-p,
"%d %d %u %u %u %u %u %u %u %u %u 0x%lx(%s%s%s)\n",
@@ -487,12 +490,368 @@ static ssize_t sta_vht_capa_read(struct file *file, char __user *userbuf,
p += scnprintf(p, sizeof(buf)+buf-p,
"MCS TX highest: %d Mbps\n",
le16_to_cpu(vhtc->vht_mcs.tx_highest));
+#undef PFLAG
}
return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
}
STA_OPS(vht_capa);
+static ssize_t sta_he_capa_read(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ char *buf, *p;
+ size_t buf_sz = PAGE_SIZE;
+ struct sta_info *sta = file->private_data;
+ struct ieee80211_sta_he_cap *hec = &sta->sta.he_cap;
+ struct ieee80211_he_mcs_nss_supp *nss = &hec->he_mcs_nss_supp;
+ u8 ppe_size;
+ u8 *cap;
+ int i;
+ ssize_t ret;
+
+ buf = kmalloc(buf_sz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ p = buf;
+
+ p += scnprintf(p, buf_sz + buf - p, "HE %ssupported\n",
+ hec->has_he ? "" : "not ");
+ if (!hec->has_he)
+ goto out;
+
+ cap = hec->he_cap_elem.mac_cap_info;
+ p += scnprintf(p, buf_sz + buf - p,
+ "MAC-CAP: %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x\n",
+ cap[0], cap[1], cap[2], cap[3], cap[4], cap[5]);
+
+#define PRINT(fmt, ...) \
+ p += scnprintf(p, buf_sz + buf - p, "\t\t" fmt "\n", \
+ ##__VA_ARGS__)
+
+#define PFLAG(t, n, a, b) \
+ do { \
+ if (cap[n] & IEEE80211_HE_##t##_CAP##n##_##a) \
+ PRINT("%s", b); \
+ } while (0)
+
+#define PFLAG_RANGE(t, i, n, s, m, off, fmt) \
+ do { \
+ u8 msk = IEEE80211_HE_##t##_CAP##i##_##n##_MASK; \
+ u8 idx = ((cap[i] & msk) >> (ffs(msk) - 1)) + off; \
+ PRINT(fmt, (s << idx) + (m * idx)); \
+ } while (0)
+
+#define PFLAG_RANGE_DEFAULT(t, i, n, s, m, off, fmt, a, b) \
+ do { \
+ if (cap[i] == IEEE80211_HE_##t ##_CAP##i##_##n##_##a) { \
+ PRINT("%s", b); \
+ break; \
+ } \
+ PFLAG_RANGE(t, i, n, s, m, off, fmt); \
+ } while (0)
+
+ PFLAG(MAC, 0, HTC_HE, "HTC-HE");
+ PFLAG(MAC, 0, TWT_REQ, "TWT-REQ");
+ PFLAG(MAC, 0, TWT_RES, "TWT-RES");
+ PFLAG_RANGE_DEFAULT(MAC, 0, DYNAMIC_FRAG, 0, 1, 0,
+ "DYNAMIC-FRAG-LEVEL-%d", NOT_SUPP, "NOT-SUPP");
+ PFLAG_RANGE_DEFAULT(MAC, 0, MAX_NUM_FRAG_MSDU, 1, 0, 0,
+ "MAX-NUM-FRAG-MSDU-%d", UNLIMITED, "UNLIMITED");
+
+ PFLAG_RANGE_DEFAULT(MAC, 1, MIN_FRAG_SIZE, 128, 0, -1,
+ "MIN-FRAG-SIZE-%d", UNLIMITED, "UNLIMITED");
+ PFLAG_RANGE_DEFAULT(MAC, 1, TF_MAC_PAD_DUR, 0, 8, 0,
+ "TF-MAC-PAD-DUR-%dUS", MASK, "UNKNOWN");
+ PFLAG_RANGE(MAC, 1, MULTI_TID_AGG_RX_QOS, 0, 1, 1,
+ "MULTI-TID-AGG-RX-QOS-%d");
+
+ if (cap[0] & IEEE80211_HE_MAC_CAP0_HTC_HE) {
+ switch (((cap[2] << 1) | (cap[1] >> 7)) & 0x3) {
+ case 0:
+ PRINT("LINK-ADAPTATION-NO-FEEDBACK");
+ break;
+ case 1:
+ PRINT("LINK-ADAPTATION-RESERVED");
+ break;
+ case 2:
+ PRINT("LINK-ADAPTATION-UNSOLICITED-FEEDBACK");
+ break;
+ case 3:
+ PRINT("LINK-ADAPTATION-BOTH");
+ break;
+ }
+ }
+
+ PFLAG(MAC, 2, ALL_ACK, "ALL-ACK");
+ PFLAG(MAC, 2, TRS, "TRS");
+ PFLAG(MAC, 2, BSR, "BSR");
+ PFLAG(MAC, 2, BCAST_TWT, "BCAST-TWT");
+ PFLAG(MAC, 2, 32BIT_BA_BITMAP, "32BIT-BA-BITMAP");
+ PFLAG(MAC, 2, MU_CASCADING, "MU-CASCADING");
+ PFLAG(MAC, 2, ACK_EN, "ACK-EN");
+
+ PFLAG(MAC, 3, OMI_CONTROL, "OMI-CONTROL");
+ PFLAG(MAC, 3, OFDMA_RA, "OFDMA-RA");
+
+ switch (cap[3] & IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK) {
+ case IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_USE_VHT:
+ PRINT("MAX-AMPDU-LEN-EXP-USE-VHT");
+ break;
+ case IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_1:
+ PRINT("MAX-AMPDU-LEN-EXP-VHT-1");
+ break;
+ case IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2:
+ PRINT("MAX-AMPDU-LEN-EXP-VHT-2");
+ break;
+ case IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_RESERVED:
+ PRINT("MAX-AMPDU-LEN-EXP-RESERVED");
+ break;
+ }
+
+ PFLAG(MAC, 3, AMSDU_FRAG, "AMSDU-FRAG");
+ PFLAG(MAC, 3, FLEX_TWT_SCHED, "FLEX-TWT-SCHED");
+ PFLAG(MAC, 3, RX_CTRL_FRAME_TO_MULTIBSS, "RX-CTRL-FRAME-TO-MULTIBSS");
+
+ PFLAG(MAC, 4, BSRP_BQRP_A_MPDU_AGG, "BSRP-BQRP-A-MPDU-AGG");
+ PFLAG(MAC, 4, QTP, "QTP");
+ PFLAG(MAC, 4, BQR, "BQR");
+ PFLAG(MAC, 4, SRP_RESP, "SRP-RESP");
+ PFLAG(MAC, 4, NDP_FB_REP, "NDP-FB-REP");
+ PFLAG(MAC, 4, OPS, "OPS");
+ PFLAG(MAC, 4, AMDSU_IN_AMPDU, "AMSDU-IN-AMPDU");
+
+ PRINT("MULTI-TID-AGG-TX-QOS-%d", ((cap[5] << 1) | (cap[4] >> 7)) & 0x7);
+
+ PFLAG(MAC, 5, SUBCHAN_SELECVITE_TRANSMISSION,
+ "SUBCHAN-SELECVITE-TRANSMISSION");
+ PFLAG(MAC, 5, UL_2x996_TONE_RU, "UL-2x996-TONE-RU");
+ PFLAG(MAC, 5, OM_CTRL_UL_MU_DATA_DIS_RX, "OM-CTRL-UL-MU-DATA-DIS-RX");
+
+ cap = hec->he_cap_elem.phy_cap_info;
+ p += scnprintf(p, buf_sz + buf - p,
+ "PHY CAP: %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x\n",
+ cap[0], cap[1], cap[2], cap[3], cap[4], cap[5], cap[6],
+ cap[7], cap[8], cap[9], cap[10]);
+
+ PFLAG(PHY, 0, CHANNEL_WIDTH_SET_40MHZ_IN_2G,
+ "CHANNEL-WIDTH-SET-40MHZ-IN-2G");
+ PFLAG(PHY, 0, CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G,
+ "CHANNEL-WIDTH-SET-40MHZ-80MHZ-IN-5G");
+ PFLAG(PHY, 0, CHANNEL_WIDTH_SET_160MHZ_IN_5G,
+ "CHANNEL-WIDTH-SET-160MHZ-IN-5G");
+ PFLAG(PHY, 0, CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G,
+ "CHANNEL-WIDTH-SET-80PLUS80-MHZ-IN-5G");
+ PFLAG(PHY, 0, CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G,
+ "CHANNEL-WIDTH-SET-RU-MAPPING-IN-2G");
+ PFLAG(PHY, 0, CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G,
+ "CHANNEL-WIDTH-SET-RU-MAPPING-IN-5G");
+
+ switch (cap[1] & IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK) {
+ case IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_20MHZ:
+ PRINT("PREAMBLE-PUNC-RX-80MHZ-ONLY-SECOND-20MHZ");
+ break;
+ case IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_40MHZ:
+ PRINT("PREAMBLE-PUNC-RX-80MHZ-ONLY-SECOND-40MHZ");
+ break;
+ case IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_20MHZ:
+ PRINT("PREAMBLE-PUNC-RX-160MHZ-ONLY-SECOND-20MHZ");
+ break;
+ case IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_40MHZ:
+ PRINT("PREAMBLE-PUNC-RX-160MHZ-ONLY-SECOND-40MHZ");
+ break;
+ }
+
+ PFLAG(PHY, 1, DEVICE_CLASS_A,
+ "IEEE80211-HE-PHY-CAP1-DEVICE-CLASS-A");
+ PFLAG(PHY, 1, LDPC_CODING_IN_PAYLOAD,
+ "LDPC-CODING-IN-PAYLOAD");
+ PFLAG(PHY, 1, HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US,
+ "HY-CAP1-HE-LTF-AND-GI-FOR-HE-PPDUS-0-8US");
+ PRINT("MIDAMBLE-RX-MAX-NSTS-%d", ((cap[2] << 1) | (cap[1] >> 7)) & 0x3);
+
+ PFLAG(PHY, 2, NDP_4x_LTF_AND_3_2US, "NDP-4X-LTF-AND-3-2US");
+ PFLAG(PHY, 2, STBC_TX_UNDER_80MHZ, "STBC-TX-UNDER-80MHZ");
+ PFLAG(PHY, 2, STBC_RX_UNDER_80MHZ, "STBC-RX-UNDER-80MHZ");
+ PFLAG(PHY, 2, DOPPLER_TX, "DOPPLER-TX");
+ PFLAG(PHY, 2, DOPPLER_RX, "DOPPLER-RX");
+ PFLAG(PHY, 2, UL_MU_FULL_MU_MIMO, "UL-MU-FULL-MU-MIMO");
+ PFLAG(PHY, 2, UL_MU_PARTIAL_MU_MIMO, "UL-MU-PARTIAL-MU-MIMO");
+
+ switch (cap[3] & IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK) {
+ case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM:
+ PRINT("DCM-MAX-CONST-TX-NO-DCM");
+ break;
+ case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK:
+ PRINT("DCM-MAX-CONST-TX-BPSK");
+ break;
+ case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK:
+ PRINT("DCM-MAX-CONST-TX-QPSK");
+ break;
+ case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_16_QAM:
+ PRINT("DCM-MAX-CONST-TX-16-QAM");
+ break;
+ }
+
+ PFLAG(PHY, 3, DCM_MAX_TX_NSS_1, "DCM-MAX-TX-NSS-1");
+ PFLAG(PHY, 3, DCM_MAX_TX_NSS_2, "DCM-MAX-TX-NSS-2");
+
+ switch (cap[3] & IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK) {
+ case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM:
+ PRINT("DCM-MAX-CONST-RX-NO-DCM");
+ break;
+ case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK:
+ PRINT("DCM-MAX-CONST-RX-BPSK");
+ break;
+ case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK:
+ PRINT("DCM-MAX-CONST-RX-QPSK");
+ break;
+ case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM:
+ PRINT("DCM-MAX-CONST-RX-16-QAM");
+ break;
+ }
+
+ PFLAG(PHY, 3, DCM_MAX_RX_NSS_1, "DCM-MAX-RX-NSS-1");
+ PFLAG(PHY, 3, DCM_MAX_RX_NSS_2, "DCM-MAX-RX-NSS-2");
+ PFLAG(PHY, 3, RX_HE_MU_PPDU_FROM_NON_AP_STA,
+ "RX-HE-MU-PPDU-FROM-NON-AP-STA");
+ PFLAG(PHY, 3, SU_BEAMFORMER, "SU-BEAMFORMER");
+
+ PFLAG(PHY, 4, SU_BEAMFORMEE, "SU-BEAMFORMEE");
+ PFLAG(PHY, 4, MU_BEAMFORMER, "MU-BEAMFORMER");
+
+ PFLAG_RANGE(PHY, 4, BEAMFORMEE_MAX_STS_UNDER_80MHZ, 0, 1, 4,
+ "BEAMFORMEE-MAX-STS-UNDER-%d");
+ PFLAG_RANGE(PHY, 4, BEAMFORMEE_MAX_STS_ABOVE_80MHZ, 0, 1, 4,
+ "BEAMFORMEE-MAX-STS-ABOVE-%d");
+
+ PFLAG_RANGE(PHY, 5, BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ, 0, 1, 1,
+ "NUM-SND-DIM-UNDER-80MHZ-%d");
+ PFLAG_RANGE(PHY, 5, BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ, 0, 1, 1,
+ "NUM-SND-DIM-ABOVE-80MHZ-%d");
+ PFLAG(PHY, 5, NG16_SU_FEEDBACK, "NG16-SU-FEEDBACK");
+ PFLAG(PHY, 5, NG16_MU_FEEDBACK, "NG16-MU-FEEDBACK");
+
+ PFLAG(PHY, 6, CODEBOOK_SIZE_42_SU, "CODEBOOK-SIZE-42-SU");
+ PFLAG(PHY, 6, CODEBOOK_SIZE_75_MU, "CODEBOOK-SIZE-75-MU");
+ PFLAG(PHY, 6, TRIG_SU_BEAMFORMER_FB, "TRIG-SU-BEAMFORMER-FB");
+ PFLAG(PHY, 6, TRIG_MU_BEAMFORMER_FB, "TRIG-MU-BEAMFORMER-FB");
+ PFLAG(PHY, 6, TRIG_CQI_FB, "TRIG-CQI-FB");
+ PFLAG(PHY, 6, PARTIAL_BW_EXT_RANGE, "PARTIAL-BW-EXT-RANGE");
+ PFLAG(PHY, 6, PARTIAL_BANDWIDTH_DL_MUMIMO,
+ "PARTIAL-BANDWIDTH-DL-MUMIMO");
+ PFLAG(PHY, 6, PPE_THRESHOLD_PRESENT, "PPE-THRESHOLD-PRESENT");
+
+ PFLAG(PHY, 7, SRP_BASED_SR, "SRP-BASED-SR");
+ PFLAG(PHY, 7, POWER_BOOST_FACTOR_AR, "POWER-BOOST-FACTOR-AR");
+ PFLAG(PHY, 7, HE_SU_MU_PPDU_4XLTF_AND_08_US_GI,
+ "HE-SU-MU-PPDU-4XLTF-AND-08-US-GI");
+ PFLAG_RANGE(PHY, 7, MAX_NC, 0, 1, 1, "MAX-NC-%d");
+ PFLAG(PHY, 7, STBC_TX_ABOVE_80MHZ, "STBC-TX-ABOVE-80MHZ");
+ PFLAG(PHY, 7, STBC_RX_ABOVE_80MHZ, "STBC-RX-ABOVE-80MHZ");
+
+ PFLAG(PHY, 8, HE_ER_SU_PPDU_4XLTF_AND_08_US_GI,
+ "HE-ER-SU-PPDU-4XLTF-AND-08-US-GI");
+ PFLAG(PHY, 8, 20MHZ_IN_40MHZ_HE_PPDU_IN_2G,
+ "20MHZ-IN-40MHZ-HE-PPDU-IN-2G");
+ PFLAG(PHY, 8, 20MHZ_IN_160MHZ_HE_PPDU, "20MHZ-IN-160MHZ-HE-PPDU");
+ PFLAG(PHY, 8, 80MHZ_IN_160MHZ_HE_PPDU, "80MHZ-IN-160MHZ-HE-PPDU");
+ PFLAG(PHY, 8, HE_ER_SU_1XLTF_AND_08_US_GI,
+ "HE-ER-SU-1XLTF-AND-08-US-GI");
+ PFLAG(PHY, 8, MIDAMBLE_RX_TX_2X_AND_1XLTF,
+ "MIDAMBLE-RX-TX-2X-AND-1XLTF");
+
+ switch (cap[8] & IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_MASK) {
+ case IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_20MHZ:
+ PRINT("DDCM-MAX-BW-20MHZ");
+ break;
+ case IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_40MHZ:
+ PRINT("DCM-MAX-BW-40MHZ");
+ break;
+ case IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_80MHZ:
+ PRINT("DCM-MAX-BW-80MHZ");
+ break;
+ case IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_160_OR_80P80_MHZ:
+ PRINT("DCM-MAX-BW-160-OR-80P80-MHZ");
+ break;
+ }
+
+ PFLAG(PHY, 9, LONGER_THAN_16_SIGB_OFDM_SYM,
+ "LONGER-THAN-16-SIGB-OFDM-SYM");
+ PFLAG(PHY, 9, NON_TRIGGERED_CQI_FEEDBACK,
+ "NON-TRIGGERED-CQI-FEEDBACK");
+ PFLAG(PHY, 9, TX_1024_QAM_LESS_THAN_242_TONE_RU,
+ "TX-1024-QAM-LESS-THAN-242-TONE-RU");
+ PFLAG(PHY, 9, RX_1024_QAM_LESS_THAN_242_TONE_RU,
+ "RX-1024-QAM-LESS-THAN-242-TONE-RU");
+ PFLAG(PHY, 9, RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB,
+ "RX-FULL-BW-SU-USING-MU-WITH-COMP-SIGB");
+ PFLAG(PHY, 9, RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB,
+ "RX-FULL-BW-SU-USING-MU-WITH-NON-COMP-SIGB");
+
+#undef PFLAG_RANGE_DEFAULT
+#undef PFLAG_RANGE
+#undef PFLAG
+
+#define PRINT_NSS_SUPP(f, n) \
+ do { \
+ int i; \
+ u16 v = le16_to_cpu(nss->f); \
+ p += scnprintf(p, buf_sz + buf - p, n ": %#.4x\n", v); \
+ for (i = 0; i < 8; i += 2) { \
+ switch ((v >> i) & 0x3) { \
+ case 0: \
+ PRINT(n "-%d-SUPPORT-0-7", i / 2); \
+ break; \
+ case 1: \
+ PRINT(n "-%d-SUPPORT-0-9", i / 2); \
+ break; \
+ case 2: \
+ PRINT(n "-%d-SUPPORT-0-11", i / 2); \
+ break; \
+ case 3: \
+ PRINT(n "-%d-NOT-SUPPORTED", i / 2); \
+ break; \
+ } \
+ } \
+ } while (0)
+
+ PRINT_NSS_SUPP(rx_mcs_80, "RX-MCS-80");
+ PRINT_NSS_SUPP(tx_mcs_80, "TX-MCS-80");
+
+ if (cap[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) {
+ PRINT_NSS_SUPP(rx_mcs_160, "RX-MCS-160");
+ PRINT_NSS_SUPP(tx_mcs_160, "TX-MCS-160");
+ }
+
+ if (cap[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) {
+ PRINT_NSS_SUPP(rx_mcs_80p80, "RX-MCS-80P80");
+ PRINT_NSS_SUPP(tx_mcs_80p80, "TX-MCS-80P80");
+ }
+
+#undef PRINT_NSS_SUPP
+#undef PRINT
+
+ if (!(cap[6] & IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT))
+ goto out;
+
+ p += scnprintf(p, buf_sz + buf - p, "PPE-THRESHOLDS: %#.2x",
+ hec->ppe_thres[0]);
+
+ ppe_size = ieee80211_he_ppe_size(hec->ppe_thres[0], cap);
+ for (i = 1; i < ppe_size; i++) {
+ p += scnprintf(p, buf_sz + buf - p, " %#.2x",
+ hec->ppe_thres[i]);
+ }
+ p += scnprintf(p, buf_sz + buf - p, "\n");
+
+out:
+ ret = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
+ kfree(buf);
+ return ret;
+}
+STA_OPS(he_capa);
#define DEBUGFS_ADD(name) \
debugfs_create_file(#name, 0400, \
@@ -538,6 +897,7 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
DEBUGFS_ADD(agg_status);
DEBUGFS_ADD(ht_capa);
DEBUGFS_ADD(vht_capa);
+ DEBUGFS_ADD(he_capa);
DEBUGFS_ADD_COUNTER(rx_duplicates, rx_stats.num_duplicates);
DEBUGFS_ADD_COUNTER(rx_fragments, rx_stats.fragments);
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 8f6998091d26..e42c641b6190 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -1173,6 +1173,16 @@ static inline void drv_wake_tx_queue(struct ieee80211_local *local,
local->ops->wake_tx_queue(&local->hw, &txq->txq);
}
+static inline int drv_can_aggregate_in_amsdu(struct ieee80211_local *local,
+ struct sk_buff *head,
+ struct sk_buff *skb)
+{
+ if (!local->ops->can_aggregate_in_amsdu)
+ return true;
+
+ return local->ops->can_aggregate_in_amsdu(&local->hw, head, skb);
+}
+
static inline int drv_start_nan(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
struct cfg80211_nan_conf *conf)
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index f0f5fedb8caa..0d704e8d7078 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -1070,7 +1070,9 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata,
struct ieee80211_vht_cap cap_ie;
struct ieee80211_sta_vht_cap cap = sta->sta.vht_cap;
- ieee80211_chandef_vht_oper(elems->vht_operation,
+ ieee80211_chandef_vht_oper(&local->hw,
+ elems->vht_operation,
+ elems->ht_operation,
&chandef);
memcpy(&cap_ie, elems->vht_cap_elem, sizeof(cap_ie));
ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 172aeae21ae9..f40a2167935f 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -818,6 +818,7 @@ enum txq_info_flags {
IEEE80211_TXQ_STOP,
IEEE80211_TXQ_AMPDU,
IEEE80211_TXQ_NO_AMSDU,
+ IEEE80211_TXQ_STOP_NETIF_TX,
};
/**
@@ -1198,6 +1199,9 @@ struct ieee80211_local {
/* number of RX chains the hardware has */
u8 rx_chains;
+ /* bitmap of which sbands were copied */
+ u8 sband_allocated;
+
int tx_headroom; /* required headroom for hardware/radiotap */
/* Tasklet and skb queue to process calls from IRQ mode. All frames
@@ -1226,6 +1230,7 @@ struct ieee80211_local {
struct sk_buff_head pending[IEEE80211_MAX_QUEUES];
struct tasklet_struct tx_pending_tasklet;
+ struct tasklet_struct wake_txqs_tasklet;
atomic_t agg_queue_stop[IEEE80211_MAX_QUEUES];
@@ -2038,6 +2043,7 @@ void ieee80211_txq_remove_vlan(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata);
void ieee80211_fill_txq_stats(struct cfg80211_txq_stats *txqstats,
struct txq_info *txqi);
+void ieee80211_wake_txqs(unsigned long data);
void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
u16 transaction, u16 auth_alg, u16 status,
const u8 *extra, size_t extra_len, const u8 *bssid,
@@ -2106,7 +2112,9 @@ u8 *ieee80211_add_wmm_info_ie(u8 *buf, u8 qosinfo);
/* channel management */
bool ieee80211_chandef_ht_oper(const struct ieee80211_ht_operation *ht_oper,
struct cfg80211_chan_def *chandef);
-bool ieee80211_chandef_vht_oper(const struct ieee80211_vht_operation *oper,
+bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw,
+ const struct ieee80211_vht_operation *oper,
+ const struct ieee80211_ht_operation *htop,
struct cfg80211_chan_def *chandef);
u32 ieee80211_chandef_downgrade(struct cfg80211_chan_def *c);
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index c054ac85793c..4700718e010f 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -248,6 +248,7 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
(key->conf.flags & IEEE80211_KEY_FLAG_RESERVE_TAILROOM)))
increment_tailroom_need_count(sdata);
+ key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
ret = drv_set_key(key->local, DISABLE_KEY, sdata,
sta ? &sta->sta : NULL, &key->conf);
@@ -256,8 +257,65 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
"failed to remove key (%d, %pM) from hardware (%d)\n",
key->conf.keyidx,
sta ? sta->sta.addr : bcast_addr, ret);
+}
- key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
+static int ieee80211_hw_key_replace(struct ieee80211_key *old_key,
+ struct ieee80211_key *new_key,
+ bool ptk0rekey)
+{
+ struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_local *local;
+ struct sta_info *sta;
+ int ret;
+
+ /* Aggregation sessions are OK when running on SW crypto.
+ * A broken remote STA may cause issues not observed with HW
+ * crypto, though.
+ */
+ if (!(old_key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
+ return 0;
+
+ assert_key_lock(old_key->local);
+ sta = old_key->sta;
+
+ /* PTK only using key ID 0 needs special handling on rekey */
+ if (new_key && sta && ptk0rekey) {
+ local = old_key->local;
+ sdata = old_key->sdata;
+
+ /* Stop TX till we are on the new key */
+ old_key->flags |= KEY_FLAG_TAINTED;
+ ieee80211_clear_fast_xmit(sta);
+
+ /* Aggregation sessions during rekey are complicated due to the
+ * reorder buffer and retransmits. Side step that by blocking
+ * aggregation during rekey and tear down running sessions.
+ */
+ if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION)) {
+ set_sta_flag(sta, WLAN_STA_BLOCK_BA);
+ ieee80211_sta_tear_down_BA_sessions(sta,
+ AGG_STOP_LOCAL_REQUEST);
+ }
+
+ if (!wiphy_ext_feature_isset(local->hw.wiphy,
+ NL80211_EXT_FEATURE_CAN_REPLACE_PTK0)) {
+ pr_warn_ratelimited("Rekeying PTK for STA %pM but driver can't safely do that.",
+ sta->sta.addr);
+ /* Flushing the driver queues *may* help prevent
+ * the clear text leaks and freezes.
+ */
+ ieee80211_flush_queues(local, sdata, false);
+ }
+ }
+
+ ieee80211_key_disable_hw_accel(old_key);
+
+ if (new_key)
+ ret = ieee80211_key_enable_hw_accel(new_key);
+ else
+ ret = 0;
+
+ return ret;
}
static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata,
@@ -316,38 +374,57 @@ void ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata,
}
-static void ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
+static int ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta,
bool pairwise,
struct ieee80211_key *old,
struct ieee80211_key *new)
{
int idx;
+ int ret;
bool defunikey, defmultikey, defmgmtkey;
/* caller must provide at least one old/new */
if (WARN_ON(!new && !old))
- return;
+ return 0;
if (new)
list_add_tail_rcu(&new->list, &sdata->key_list);
WARN_ON(new && old && new->conf.keyidx != old->conf.keyidx);
- if (old)
+ if (old) {
idx = old->conf.keyidx;
- else
+ /* TODO: proper implement and test "Extended Key ID for
+ * Individually Addressed Frames" from IEEE 802.11-2016.
+ * Till then always assume only key ID 0 is used for
+ * pairwise keys.*/
+ ret = ieee80211_hw_key_replace(old, new, pairwise);
+ } else {
+ /* new must be provided in case old is not */
idx = new->conf.keyidx;
+ if (!new->local->wowlan)
+ ret = ieee80211_key_enable_hw_accel(new);
+ else
+ ret = 0;
+ }
+
+ if (ret)
+ return ret;
if (sta) {
if (pairwise) {
rcu_assign_pointer(sta->ptk[idx], new);
sta->ptk_idx = idx;
- ieee80211_check_fast_xmit(sta);
+ if (new) {
+ clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
+ ieee80211_check_fast_xmit(sta);
+ }
} else {
rcu_assign_pointer(sta->gtk[idx], new);
}
- ieee80211_check_fast_rx(sta);
+ if (new)
+ ieee80211_check_fast_rx(sta);
} else {
defunikey = old &&
old == key_mtx_dereference(sdata->local,
@@ -380,6 +457,8 @@ static void ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
if (old)
list_del_rcu(&old->list);
+
+ return 0;
}
struct ieee80211_key *
@@ -575,9 +654,6 @@ static void ieee80211_key_free_common(struct ieee80211_key *key)
static void __ieee80211_key_destroy(struct ieee80211_key *key,
bool delay_tailroom)
{
- if (key->local)
- ieee80211_key_disable_hw_accel(key);
-
if (key->local) {
struct ieee80211_sub_if_data *sdata = key->sdata;
@@ -654,7 +730,6 @@ int ieee80211_key_link(struct ieee80211_key *key,
struct ieee80211_sub_if_data *sdata,
struct sta_info *sta)
{
- struct ieee80211_local *local = sdata->local;
struct ieee80211_key *old_key;
int idx = key->conf.keyidx;
bool pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE;
@@ -691,17 +766,13 @@ int ieee80211_key_link(struct ieee80211_key *key,
increment_tailroom_need_count(sdata);
- ieee80211_key_replace(sdata, sta, pairwise, old_key, key);
- ieee80211_key_destroy(old_key, delay_tailroom);
-
- ieee80211_debugfs_key_add(key);
+ ret = ieee80211_key_replace(sdata, sta, pairwise, old_key, key);
- if (!local->wowlan) {
- ret = ieee80211_key_enable_hw_accel(key);
- if (ret)
- ieee80211_key_free(key, delay_tailroom);
+ if (!ret) {
+ ieee80211_debugfs_key_add(key);
+ ieee80211_key_destroy(old_key, delay_tailroom);
} else {
- ret = 0;
+ ieee80211_key_free(key, delay_tailroom);
}
out:
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 513627896204..77381017bac7 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -4,6 +4,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -610,6 +611,18 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
local->ops = ops;
local->use_chanctx = use_chanctx;
+ /*
+ * We need a bit of data queued to build aggregates properly, so
+ * instruct the TCP stack to allow more than a single ms of data
+ * to be queued in the stack. The value is a bit-shift of 1
+ * second, so 8 is ~4ms of queued data. Only affects local TCP
+ * sockets.
+ * This is the default, anyhow - drivers may need to override it
+ * for local reasons (longer buffers, longer completion time, or
+ * similar).
+ */
+ local->hw.tx_sk_pacing_shift = 8;
+
/* set up some defaults */
local->hw.queues = 1;
local->hw.max_rates = 1;
@@ -684,6 +697,10 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending,
(unsigned long)local);
+ if (ops->wake_tx_queue)
+ tasklet_init(&local->wake_txqs_tasklet, ieee80211_wake_txqs,
+ (unsigned long)local);
+
tasklet_init(&local->tasklet,
ieee80211_tasklet_handler,
(unsigned long) local);
@@ -1154,6 +1171,51 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
goto fail_rate;
}
+ if (local->rate_ctrl) {
+ clear_bit(IEEE80211_HW_SUPPORTS_VHT_EXT_NSS_BW, hw->flags);
+ if (local->rate_ctrl->ops->capa & RATE_CTRL_CAPA_VHT_EXT_NSS_BW)
+ ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
+ }
+
+ /*
+ * If the VHT capabilities don't have IEEE80211_VHT_EXT_NSS_BW_CAPABLE,
+ * or have it when we don't, copy the sband structure and set/clear it.
+ * This is necessary because rate scaling algorithms could be switched
+ * and have different support values.
+ * Print a message so that in the common case the reallocation can be
+ * avoided.
+ */
+ BUILD_BUG_ON(NUM_NL80211_BANDS > 8 * sizeof(local->sband_allocated));
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ struct ieee80211_supported_band *sband;
+ bool local_cap, ie_cap;
+
+ local_cap = ieee80211_hw_check(hw, SUPPORTS_VHT_EXT_NSS_BW);
+
+ sband = local->hw.wiphy->bands[band];
+ if (!sband || !sband->vht_cap.vht_supported)
+ continue;
+
+ ie_cap = !!(sband->vht_cap.vht_mcs.tx_highest &
+ cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE));
+
+ if (local_cap == ie_cap)
+ continue;
+
+ sband = kmemdup(sband, sizeof(*sband), GFP_KERNEL);
+ if (!sband)
+ goto fail_rate;
+
+ wiphy_dbg(hw->wiphy, "copying sband (band %d) due to VHT EXT NSS BW flag\n",
+ band);
+
+ sband->vht_cap.vht_mcs.tx_highest ^=
+ cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
+
+ local->hw.wiphy->bands[band] = sband;
+ local->sband_allocated |= BIT(band);
+ }
+
/* add one default STA interface if supported */
if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_STATION) &&
!ieee80211_hw_check(hw, NO_AUTO_VIF)) {
@@ -1272,6 +1334,7 @@ static int ieee80211_free_ack_frame(int id, void *p, void *data)
void ieee80211_free_hw(struct ieee80211_hw *hw)
{
struct ieee80211_local *local = hw_to_local(hw);
+ enum nl80211_band band;
mutex_destroy(&local->iflist_mtx);
mutex_destroy(&local->mtx);
@@ -1287,6 +1350,12 @@ void ieee80211_free_hw(struct ieee80211_hw *hw)
ieee80211_free_led_names(local);
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ if (!(local->sband_allocated & BIT(band)))
+ continue;
+ kfree(local->hw.wiphy->bands[band]);
+ }
+
wiphy_free(local->hw.wiphy);
}
EXPORT_SYMBOL(ieee80211_free_hw);
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index d51da26e9c18..8bad414c52ad 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2008, 2009 open80211s Ltd.
+ * Copyright (C) 2018 Intel Corporation
* Authors: Luis Carlos Cobo <luisca@cozybit.com>
* Javier Cardona <javier@cozybit.com>
*
@@ -98,7 +99,9 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
cfg80211_chandef_create(&sta_chan_def, sdata->vif.bss_conf.chandef.chan,
NL80211_CHAN_NO_HT);
ieee80211_chandef_ht_oper(ie->ht_operation, &sta_chan_def);
- ieee80211_chandef_vht_oper(ie->vht_operation, &sta_chan_def);
+ ieee80211_chandef_vht_oper(&sdata->local->hw,
+ ie->vht_operation, ie->ht_operation,
+ &sta_chan_def);
if (!cfg80211_chandef_compatible(&sdata->vif.bss_conf.chandef,
&sta_chan_def))
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 3dbecae4be73..89dac799a85f 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -220,7 +220,8 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
memcpy(&he_oper_vht_cap, he_oper->optional, 3);
he_oper_vht_cap.basic_mcs_set = cpu_to_le16(0);
- if (!ieee80211_chandef_vht_oper(&he_oper_vht_cap,
+ if (!ieee80211_chandef_vht_oper(&sdata->local->hw,
+ &he_oper_vht_cap, ht_oper,
&vht_chandef)) {
if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE))
sdata_info(sdata,
@@ -228,7 +229,8 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
ret = IEEE80211_STA_DISABLE_HE;
goto out;
}
- } else if (!ieee80211_chandef_vht_oper(vht_oper, &vht_chandef)) {
+ } else if (!ieee80211_chandef_vht_oper(&sdata->local->hw, vht_oper,
+ ht_oper, &vht_chandef)) {
if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
sdata_info(sdata,
"AP VHT information is invalid, disable VHT\n");
@@ -3237,19 +3239,16 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
}
if (bss_conf->he_support) {
- u32 he_oper_params =
- le32_to_cpu(elems.he_operation->he_oper_params);
+ bss_conf->bss_color =
+ le32_get_bits(elems.he_operation->he_oper_params,
+ IEEE80211_HE_OPERATION_BSS_COLOR_MASK);
- bss_conf->bss_color = he_oper_params &
- IEEE80211_HE_OPERATION_BSS_COLOR_MASK;
bss_conf->htc_trig_based_pkt_ext =
- (he_oper_params &
- IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK) <<
- IEEE80211_HE_OPERATION_DFLT_PE_DURATION_OFFSET;
+ le32_get_bits(elems.he_operation->he_oper_params,
+ IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK);
bss_conf->frame_time_rts_th =
- (he_oper_params &
- IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK) <<
- IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET;
+ le32_get_bits(elems.he_operation->he_oper_params,
+ IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK);
bss_conf->multi_sta_back_32bit =
sta->sta.he_cap.he_cap_elem.mac_cap_info[2] &
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 96611d5dfadb..a0ca27aeb732 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -115,7 +115,8 @@ static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len,
if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
RX_FLAG_FAILED_PLCP_CRC |
- RX_FLAG_ONLY_MONITOR))
+ RX_FLAG_ONLY_MONITOR |
+ RX_FLAG_NO_PSDU))
return true;
if (unlikely(skb->len < 16 + present_fcs_len + rtap_space))
@@ -189,6 +190,15 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) != 12);
}
+ if (status->flag & RX_FLAG_NO_PSDU)
+ len += 1;
+
+ if (status->flag & RX_FLAG_RADIOTAP_LSIG) {
+ len = ALIGN(len, 2);
+ len += 4;
+ BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_lsig) != 4);
+ }
+
if (status->chains) {
/* antenna and antenna signal fields */
len += 2 * hweight8(status->chains);
@@ -279,6 +289,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
struct ieee80211_vendor_radiotap rtap = {};
struct ieee80211_radiotap_he he = {};
struct ieee80211_radiotap_he_mu he_mu = {};
+ struct ieee80211_radiotap_lsig lsig = {};
if (status->flag & RX_FLAG_RADIOTAP_HE) {
he = *(struct ieee80211_radiotap_he *)skb->data;
@@ -291,6 +302,11 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
skb_pull(skb, sizeof(he_mu));
}
+ if (status->flag & RX_FLAG_RADIOTAP_LSIG) {
+ lsig = *(struct ieee80211_radiotap_lsig *)skb->data;
+ skb_pull(skb, sizeof(lsig));
+ }
+
if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
rtap = *(struct ieee80211_vendor_radiotap *)skb->data;
/* rtap.len and rtap.pad are undone immediately */
@@ -549,7 +565,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
if (status->encoding == RX_ENC_HE &&
status->flag & RX_FLAG_RADIOTAP_HE) {
-#define HE_PREP(f, val) cpu_to_le16(FIELD_PREP(IEEE80211_RADIOTAP_HE_##f, val))
+#define HE_PREP(f, val) le16_encode_bits(val, IEEE80211_RADIOTAP_HE_##f)
if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) {
he.data6 |= HE_PREP(DATA6_NSTS,
@@ -630,6 +646,21 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
pos += sizeof(he_mu);
}
+ if (status->flag & RX_FLAG_NO_PSDU) {
+ rthdr->it_present |=
+ cpu_to_le32(1 << IEEE80211_RADIOTAP_ZERO_LEN_PSDU);
+ *pos++ = status->zero_length_psdu_type;
+ }
+
+ if (status->flag & RX_FLAG_RADIOTAP_LSIG) {
+ /* ensure 2 byte alignment */
+ while ((pos - (u8 *)rthdr) & 1)
+ pos++;
+ rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_LSIG);
+ memcpy(pos, &lsig, sizeof(lsig));
+ pos += sizeof(lsig);
+ }
+
for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
*pos++ = status->chain_signal[chain];
*pos++ = chain;
@@ -1505,7 +1536,7 @@ static void sta_ps_start(struct sta_info *sta)
if (!sta->sta.txq[0])
return;
- for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) {
+ for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) {
if (txq_has_queue(sta->sta.txq[tid]))
set_bit(tid, &sta->txq_buffered_tids);
else
@@ -2046,6 +2077,7 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
idx = sdata->fragment_next;
for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
struct ieee80211_hdr *f_hdr;
+ struct sk_buff *f_skb;
idx--;
if (idx < 0)
@@ -2057,7 +2089,8 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
entry->last_frag + 1 != frag)
continue;
- f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data;
+ f_skb = __skb_peek(&entry->skb_list);
+ f_hdr = (struct ieee80211_hdr *) f_skb->data;
/*
* Check ftype and addresses are equal, else check next fragment
@@ -2314,7 +2347,7 @@ __ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control)
if (!sdata->u.mgd.use_4addr)
return -1;
- else
+ else if (!ether_addr_equal(hdr->addr1, sdata->vif.addr))
check_port_control = true;
}
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index 029334835747..4e4902bdbef8 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -144,6 +144,7 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
wide_bw_chansw_ie->new_center_freq_seg1,
/* .basic_mcs_set doesn't matter */
};
+ struct ieee80211_ht_operation ht_oper = {};
/* default, for the case of IEEE80211_VHT_CHANWIDTH_USE_HT,
* to the previously parsed chandef
@@ -151,7 +152,9 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
new_vht_chandef = csa_ie->chandef;
/* ignore if parsing fails */
- if (!ieee80211_chandef_vht_oper(&vht_oper, &new_vht_chandef))
+ if (!ieee80211_chandef_vht_oper(&sdata->local->hw,
+ &vht_oper, &ht_oper,
+ &new_vht_chandef))
new_vht_chandef.chan = NULL;
if (sta_flags & IEEE80211_STA_DISABLE_80P80MHZ &&
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index f34202242d24..fb8c2252ac0e 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -113,7 +113,12 @@ static void __cleanup_single_sta(struct sta_info *sta)
if (sta->sta.txq[0]) {
for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) {
- struct txq_info *txqi = to_txq_info(sta->sta.txq[i]);
+ struct txq_info *txqi;
+
+ if (!sta->sta.txq[i])
+ continue;
+
+ txqi = to_txq_info(sta->sta.txq[i]);
spin_lock_bh(&fq->lock);
ieee80211_txq_purge(local, txqi);
@@ -374,6 +379,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) {
struct txq_info *txq = txq_data + i * size;
+ /* might not do anything for the bufferable MMPDU TXQ */
ieee80211_txq_init(sdata, sta, txq, i);
}
}
@@ -1239,13 +1245,11 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
if (!ieee80211_hw_check(&local->hw, AP_LINK_PS))
drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta);
- if (sta->sta.txq[0]) {
- for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) {
- if (!txq_has_queue(sta->sta.txq[i]))
- continue;
+ for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) {
+ if (!sta->sta.txq[i] || !txq_has_queue(sta->sta.txq[i]))
+ continue;
- drv_wake_tx_queue(local, to_txq_info(sta->sta.txq[i]));
- }
+ drv_wake_tx_queue(local, to_txq_info(sta->sta.txq[i]));
}
skb_queue_head_init(&pending);
@@ -1683,7 +1687,8 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta,
return;
for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) {
- if (!(driver_release_tids & BIT(tid)) ||
+ if (!sta->sta.txq[tid] ||
+ !(driver_release_tids & BIT(tid)) ||
txq_has_queue(sta->sta.txq[tid]))
continue;
@@ -2323,13 +2328,13 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL);
}
- if (ieee80211_hw_check(&sta->local->hw, REPORTS_TX_ACK_STATUS) &&
- !(sinfo->filled & BIT_ULL(NL80211_STA_INFO_DATA_ACK_SIGNAL_AVG))) {
+ if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG)) &&
+ sta->status_stats.ack_signal_filled) {
sinfo->avg_ack_signal =
-(s8)ewma_avg_signal_read(
&sta->status_stats.avg_ack_signal);
sinfo->filled |=
- BIT_ULL(NL80211_STA_INFO_DATA_ACK_SIGNAL_AVG);
+ BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG);
}
}
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index f353d9db54bc..c42bfa1dcd2c 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1249,10 +1249,18 @@ static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
(info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
return NULL;
- if (!ieee80211_is_data_present(hdr->frame_control))
- return NULL;
-
- if (sta) {
+ if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) {
+ if ((!ieee80211_is_mgmt(hdr->frame_control) ||
+ ieee80211_is_bufferable_mmpdu(hdr->frame_control) ||
+ vif->type == NL80211_IFTYPE_STATION) &&
+ sta && sta->uploaded) {
+ /*
+ * This will be NULL if the driver didn't set the
+ * opt-in hardware flag.
+ */
+ txq = sta->sta.txq[IEEE80211_NUM_TIDS];
+ }
+ } else if (sta) {
u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
if (!sta->uploaded)
@@ -1440,16 +1448,33 @@ void ieee80211_txq_init(struct ieee80211_sub_if_data *sdata,
txqi->txq.vif = &sdata->vif;
- if (sta) {
- txqi->txq.sta = &sta->sta;
- sta->sta.txq[tid] = &txqi->txq;
- txqi->txq.tid = tid;
- txqi->txq.ac = ieee80211_ac_from_tid(tid);
- } else {
+ if (!sta) {
sdata->vif.txq = &txqi->txq;
txqi->txq.tid = 0;
txqi->txq.ac = IEEE80211_AC_BE;
+
+ return;
+ }
+
+ if (tid == IEEE80211_NUM_TIDS) {
+ if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+ /* Drivers need to opt in to the management MPDU TXQ */
+ if (!ieee80211_hw_check(&sdata->local->hw,
+ STA_MMPDU_TXQ))
+ return;
+ } else if (!ieee80211_hw_check(&sdata->local->hw,
+ BUFF_MMPDU_TXQ)) {
+ /* Drivers need to opt in to the bufferable MMPDU TXQ */
+ return;
+ }
+ txqi->txq.ac = IEEE80211_AC_VO;
+ } else {
+ txqi->txq.ac = ieee80211_ac_from_tid(tid);
}
+
+ txqi->txq.sta = &sta->sta;
+ txqi->txq.tid = tid;
+ sta->sta.txq[tid] = &txqi->txq;
}
void ieee80211_txq_purge(struct ieee80211_local *local,
@@ -2951,6 +2976,10 @@ void ieee80211_check_fast_xmit(struct sta_info *sta)
if (!(build.key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
goto out;
+ /* Key is being removed */
+ if (build.key->flags & KEY_FLAG_TAINTED)
+ goto out;
+
switch (build.key->conf.cipher) {
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
@@ -3196,6 +3225,10 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
max_amsdu_len = min_t(int, max_amsdu_len,
sta->sta.max_rc_amsdu_len);
+ if (sta->sta.max_tid_amsdu_len[tid])
+ max_amsdu_len = min_t(int, max_amsdu_len,
+ sta->sta.max_tid_amsdu_len[tid]);
+
spin_lock_bh(&fq->lock);
/* TODO: Ideally aggregation should be done on dequeue to remain
@@ -3228,6 +3261,9 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
if (max_frags && nfrags > max_frags)
goto out;
+ if (!drv_can_aggregate_in_amsdu(local, head, skb))
+ goto out;
+
if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head))
goto out;
@@ -3472,13 +3508,19 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info;
struct ieee80211_tx_data tx;
ieee80211_tx_result r;
- struct ieee80211_vif *vif;
+ struct ieee80211_vif *vif = txq->vif;
spin_lock_bh(&fq->lock);
- if (test_bit(IEEE80211_TXQ_STOP, &txqi->flags))
+ if (test_bit(IEEE80211_TXQ_STOP, &txqi->flags) ||
+ test_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags))
goto out;
+ if (vif->txqs_stopped[ieee80211_ac_from_tid(txq->tid)]) {
+ set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags);
+ goto out;
+ }
+
/* Make sure fragments stay together. */
skb = __skb_dequeue(&txqi->frags);
if (skb)
@@ -3573,6 +3615,7 @@ begin:
}
IEEE80211_SKB_CB(skb)->control.vif = vif;
+
out:
spin_unlock_bh(&fq->lock);
@@ -3601,13 +3644,7 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
if (!IS_ERR_OR_NULL(sta)) {
struct ieee80211_fast_tx *fast_tx;
- /* We need a bit of data queued to build aggregates properly, so
- * instruct the TCP stack to allow more than a single ms of data
- * to be queued in the stack. The value is a bit-shift of 1
- * second, so 8 is ~4ms of queued data. Only affects local TCP
- * sockets.
- */
- sk_pacing_shift_update(skb->sk, 8);
+ sk_pacing_shift_update(skb->sk, sdata->local->hw.tx_sk_pacing_shift);
fast_tx = rcu_dereference(sta->fast_tx);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 716cd6442d86..36a3c2ada515 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -240,6 +240,99 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL(ieee80211_ctstoself_duration);
+static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_vif *vif = &sdata->vif;
+ struct fq *fq = &local->fq;
+ struct ps_data *ps = NULL;
+ struct txq_info *txqi;
+ struct sta_info *sta;
+ int i;
+
+ spin_lock_bh(&fq->lock);
+
+ if (sdata->vif.type == NL80211_IFTYPE_AP)
+ ps = &sdata->bss->ps;
+
+ sdata->vif.txqs_stopped[ac] = false;
+
+ list_for_each_entry_rcu(sta, &local->sta_list, list) {
+ if (sdata != sta->sdata)
+ continue;
+
+ for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) {
+ struct ieee80211_txq *txq = sta->sta.txq[i];
+
+ txqi = to_txq_info(txq);
+
+ if (ac != txq->ac)
+ continue;
+
+ if (!test_and_clear_bit(IEEE80211_TXQ_STOP_NETIF_TX,
+ &txqi->flags))
+ continue;
+
+ spin_unlock_bh(&fq->lock);
+ drv_wake_tx_queue(local, txqi);
+ spin_lock_bh(&fq->lock);
+ }
+ }
+
+ if (!vif->txq)
+ goto out;
+
+ txqi = to_txq_info(vif->txq);
+
+ if (!test_and_clear_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags) ||
+ (ps && atomic_read(&ps->num_sta_ps)) || ac != vif->txq->ac)
+ goto out;
+
+ spin_unlock_bh(&fq->lock);
+
+ drv_wake_tx_queue(local, txqi);
+ return;
+out:
+ spin_unlock_bh(&fq->lock);
+}
+
+void ieee80211_wake_txqs(unsigned long data)
+{
+ struct ieee80211_local *local = (struct ieee80211_local *)data;
+ struct ieee80211_sub_if_data *sdata;
+ int n_acs = IEEE80211_NUM_ACS;
+ unsigned long flags;
+ int i;
+
+ rcu_read_lock();
+ spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
+
+ if (local->hw.queues < IEEE80211_NUM_ACS)
+ n_acs = 1;
+
+ for (i = 0; i < local->hw.queues; i++) {
+ if (local->queue_stop_reasons[i])
+ continue;
+
+ spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+ list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+ int ac;
+
+ for (ac = 0; ac < n_acs; ac++) {
+ int ac_queue = sdata->vif.hw_queue[ac];
+
+ if (ac_queue == i ||
+ sdata->vif.cab_queue == i)
+ __ieee80211_wake_txqs(sdata, ac);
+ }
+ }
+ spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
+ }
+
+ spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+ rcu_read_unlock();
+}
+
void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue)
{
struct ieee80211_sub_if_data *sdata;
@@ -308,6 +401,9 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
rcu_read_unlock();
} else
tasklet_schedule(&local->tx_pending_tasklet);
+
+ if (local->ops->wake_tx_queue)
+ tasklet_schedule(&local->wake_txqs_tasklet);
}
void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
@@ -351,9 +447,6 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
if (__test_and_set_bit(reason, &local->queue_stop_reasons[queue]))
return;
- if (local->ops->wake_tx_queue)
- return;
-
if (local->hw.queues < IEEE80211_NUM_ACS)
n_acs = 1;
@@ -366,8 +459,15 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
for (ac = 0; ac < n_acs; ac++) {
if (sdata->vif.hw_queue[ac] == queue ||
- sdata->vif.cab_queue == queue)
- netif_stop_subqueue(sdata->dev, ac);
+ sdata->vif.cab_queue == queue) {
+ if (!local->ops->wake_tx_queue) {
+ netif_stop_subqueue(sdata->dev, ac);
+ continue;
+ }
+ spin_lock(&local->fq.lock);
+ sdata->vif.txqs_stopped[ac] = true;
+ spin_unlock(&local->fq.lock);
+ }
}
}
rcu_read_unlock();
@@ -2657,49 +2757,65 @@ bool ieee80211_chandef_ht_oper(const struct ieee80211_ht_operation *ht_oper,
return true;
}
-bool ieee80211_chandef_vht_oper(const struct ieee80211_vht_operation *oper,
+bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw,
+ const struct ieee80211_vht_operation *oper,
+ const struct ieee80211_ht_operation *htop,
struct cfg80211_chan_def *chandef)
{
struct cfg80211_chan_def new = *chandef;
- int cf1, cf2;
+ int cf0, cf1;
+ int ccfs0, ccfs1, ccfs2;
+ int ccf0, ccf1;
- if (!oper)
+ if (!oper || !htop)
return false;
- cf1 = ieee80211_channel_to_frequency(oper->center_freq_seg0_idx,
- chandef->chan->band);
- cf2 = ieee80211_channel_to_frequency(oper->center_freq_seg1_idx,
- chandef->chan->band);
+ ccfs0 = oper->center_freq_seg0_idx;
+ ccfs1 = oper->center_freq_seg1_idx;
+ ccfs2 = (le16_to_cpu(htop->operation_mode) &
+ IEEE80211_HT_OP_MODE_CCFS2_MASK)
+ >> IEEE80211_HT_OP_MODE_CCFS2_SHIFT;
+
+ /* when parsing (and we know how to) CCFS1 and CCFS2 are equivalent */
+ ccf0 = ccfs0;
+ ccf1 = ccfs1;
+ if (!ccfs1 && ieee80211_hw_check(hw, SUPPORTS_VHT_EXT_NSS_BW))
+ ccf1 = ccfs2;
+
+ cf0 = ieee80211_channel_to_frequency(ccf0, chandef->chan->band);
+ cf1 = ieee80211_channel_to_frequency(ccf1, chandef->chan->band);
switch (oper->chan_width) {
case IEEE80211_VHT_CHANWIDTH_USE_HT:
+ /* just use HT information directly */
break;
case IEEE80211_VHT_CHANWIDTH_80MHZ:
new.width = NL80211_CHAN_WIDTH_80;
- new.center_freq1 = cf1;
+ new.center_freq1 = cf0;
/* If needed, adjust based on the newer interop workaround. */
- if (oper->center_freq_seg1_idx) {
+ if (ccf1) {
unsigned int diff;
- diff = abs(oper->center_freq_seg1_idx -
- oper->center_freq_seg0_idx);
+ diff = abs(ccf1 - ccf0);
if (diff == 8) {
new.width = NL80211_CHAN_WIDTH_160;
- new.center_freq1 = cf2;
+ new.center_freq1 = cf1;
} else if (diff > 8) {
new.width = NL80211_CHAN_WIDTH_80P80;
- new.center_freq2 = cf2;
+ new.center_freq2 = cf1;
}
}
break;
case IEEE80211_VHT_CHANWIDTH_160MHZ:
+ /* deprecated encoding */
new.width = NL80211_CHAN_WIDTH_160;
- new.center_freq1 = cf1;
+ new.center_freq1 = cf0;
break;
case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
+ /* deprecated encoding */
new.width = NL80211_CHAN_WIDTH_80P80;
- new.center_freq1 = cf1;
- new.center_freq2 = cf2;
+ new.center_freq1 = cf0;
+ new.center_freq2 = cf1;
break;
default:
return false;
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index 259325cbcc31..006d82e4a397 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -3,6 +3,7 @@
*
* Portions of this file
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -231,6 +232,13 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
memcpy(&vht_cap->vht_mcs, &vht_cap_ie->supp_mcs,
sizeof(struct ieee80211_vht_mcs_info));
+ /* copy EXT_NSS_BW Support value or remove the capability */
+ if (ieee80211_hw_check(&sdata->local->hw, SUPPORTS_VHT_EXT_NSS_BW))
+ vht_cap->cap |= (cap_info & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK);
+ else
+ vht_cap->vht_mcs.tx_highest &=
+ ~cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
+
/* but also restrict MCSes */
for (i = 0; i < 8; i++) {
u16 own_rx, own_tx, peer_rx, peer_tx;
@@ -294,6 +302,18 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
break;
default:
sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_80;
+
+ if (!(vht_cap->vht_mcs.tx_highest &
+ cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE)))
+ break;
+
+ /*
+ * If this is non-zero, then it does support 160 MHz after all,
+ * in one form or the other. We don't distinguish here (or even
+ * above) between 160 and 80+80 yet.
+ */
+ if (cap_info & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK)
+ sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_160;
}
sta->sta.bandwidth = ieee80211_sta_cur_vht_bw(sta);
diff --git a/net/ncsi/ncsi-netlink.c b/net/ncsi/ncsi-netlink.c
index 45f33d6dedf7..32cb7751d216 100644
--- a/net/ncsi/ncsi-netlink.c
+++ b/net/ncsi/ncsi-netlink.c
@@ -12,7 +12,6 @@
#include <linux/if_arp.h>
#include <linux/rtnetlink.h>
#include <linux/etherdevice.h>
-#include <linux/module.h>
#include <net/genetlink.h>
#include <net/ncsi.h>
#include <linux/skbuff.h>
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index d33094f4ec41..43041f087eb3 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -765,7 +765,7 @@ __nfqnl_enqueue_packet_gso(struct net *net, struct nfqnl_instance *queue,
return ret;
}
- skb->next = NULL;
+ skb_mark_not_on_list(skb);
entry_seg = nf_queue_entry_dup(entry);
if (entry_seg) {
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 930d17fa906c..e3a0538ec0be 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -574,11 +574,6 @@ static int netlink_insert(struct sock *sk, u32 portid)
if (nlk_sk(sk)->bound)
goto err;
- err = -ENOMEM;
- if (BITS_PER_LONG > 32 &&
- unlikely(atomic_read(&table->hash.nelems) >= UINT_MAX))
- goto err;
-
nlk_sk(sk)->portid = portid;
sock_hold(sk);
@@ -993,7 +988,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
struct netlink_sock *nlk = nlk_sk(sk);
struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
int err = 0;
- long unsigned int groups = nladdr->nl_groups;
+ unsigned long groups = nladdr->nl_groups;
bool bound;
if (addr_len < sizeof(struct sockaddr_nl))
@@ -1011,9 +1006,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
return err;
}
- if (nlk->ngroups == 0)
- groups = 0;
- else if (nlk->ngroups < 8*sizeof(groups))
+ if (nlk->ngroups < BITS_PER_LONG)
groups &= (1UL << nlk->ngroups) - 1;
bound = nlk->bound;
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 56b8e7167790..35966da84769 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -254,21 +254,18 @@ static bool icmphdr_ok(struct sk_buff *skb)
static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key)
{
+ unsigned short frag_off;
+ unsigned int payload_ofs = 0;
unsigned int nh_ofs = skb_network_offset(skb);
unsigned int nh_len;
- int payload_ofs;
struct ipv6hdr *nh;
- uint8_t nexthdr;
- __be16 frag_off;
- int err;
+ int err, nexthdr, flags = 0;
err = check_header(skb, nh_ofs + sizeof(*nh));
if (unlikely(err))
return err;
nh = ipv6_hdr(skb);
- nexthdr = nh->nexthdr;
- payload_ofs = (u8 *)(nh + 1) - skb->data;
key->ip.proto = NEXTHDR_NONE;
key->ip.tos = ipv6_get_dsfield(nh);
@@ -277,10 +274,9 @@ static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key)
key->ipv6.addr.src = nh->saddr;
key->ipv6.addr.dst = nh->daddr;
- payload_ofs = ipv6_skip_exthdr(skb, payload_ofs, &nexthdr, &frag_off);
-
- if (frag_off) {
- if (frag_off & htons(~0x7))
+ nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags);
+ if (flags & IP6_FH_F_FRAG) {
+ if (frag_off)
key->ip.frag = OVS_FRAG_TYPE_LATER;
else
key->ip.frag = OVS_FRAG_TYPE_FIRST;
@@ -288,11 +284,11 @@ static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key)
key->ip.frag = OVS_FRAG_TYPE_NONE;
}
- /* Delayed handling of error in ipv6_skip_exthdr() as it
- * always sets frag_off to a valid value which may be
+ /* Delayed handling of error in ipv6_find_hdr() as it
+ * always sets flags and frag_off to a valid value which may be
* used to set key->ip.frag above.
*/
- if (unlikely(payload_ofs < 0))
+ if (unlikely(nexthdr < 0))
return -EPROTO;
nh_len = payload_ofs - nh_ofs;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 75c92a87e7b2..f85f67b5c1f4 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3805,6 +3805,20 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
return fanout_set_data(po, optval, optlen);
}
+ case PACKET_IGNORE_OUTGOING:
+ {
+ int val;
+
+ if (optlen != sizeof(val))
+ return -EINVAL;
+ if (copy_from_user(&val, optval, sizeof(val)))
+ return -EFAULT;
+ if (val < 0 || val > 1)
+ return -EINVAL;
+
+ po->prot_hook.ignore_outgoing = !!val;
+ return 0;
+ }
case PACKET_TX_HAS_OFF:
{
unsigned int val;
@@ -3928,6 +3942,9 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
((u32)po->fanout->flags << 24)) :
0);
break;
+ case PACKET_IGNORE_OUTGOING:
+ val = po->prot_hook.ignore_outgoing;
+ break;
case PACKET_ROLLOVER_STATS:
if (!po->rollover)
return -EINVAL;
diff --git a/net/rds/rds.h b/net/rds/rds.h
index c4dcf654d8fe..6bfaf05b63b2 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -278,7 +278,7 @@ struct rds_incoming {
struct in6_addr i_saddr;
rds_rdma_cookie_t i_rdma_cookie;
- struct timeval i_rx_tstamp;
+ ktime_t i_rx_tstamp;
u64 i_rx_lat_trace[RDS_RX_MAX_TRACES];
};
diff --git a/net/rds/recv.c b/net/rds/recv.c
index 504cd6bcc54c..727639dac8a7 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -43,18 +43,14 @@
void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
struct in6_addr *saddr)
{
- int i;
-
refcount_set(&inc->i_refcount, 1);
INIT_LIST_HEAD(&inc->i_item);
inc->i_conn = conn;
inc->i_saddr = *saddr;
inc->i_rdma_cookie = 0;
- inc->i_rx_tstamp.tv_sec = 0;
- inc->i_rx_tstamp.tv_usec = 0;
+ inc->i_rx_tstamp = ktime_set(0, 0);
- for (i = 0; i < RDS_RX_MAX_TRACES; i++)
- inc->i_rx_lat_trace[i] = 0;
+ memset(inc->i_rx_lat_trace, 0, sizeof(inc->i_rx_lat_trace));
}
EXPORT_SYMBOL_GPL(rds_inc_init);
@@ -67,8 +63,7 @@ void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *cp,
inc->i_conn_path = cp;
inc->i_saddr = *saddr;
inc->i_rdma_cookie = 0;
- inc->i_rx_tstamp.tv_sec = 0;
- inc->i_rx_tstamp.tv_usec = 0;
+ inc->i_rx_tstamp = ktime_set(0, 0);
}
EXPORT_SYMBOL_GPL(rds_inc_path_init);
@@ -385,7 +380,7 @@ void rds_recv_incoming(struct rds_connection *conn, struct in6_addr *saddr,
be32_to_cpu(inc->i_hdr.h_len),
inc->i_hdr.h_dport);
if (sock_flag(sk, SOCK_RCVTSTAMP))
- do_gettimeofday(&inc->i_rx_tstamp);
+ inc->i_rx_tstamp = ktime_get_real();
rds_inc_addref(inc);
inc->i_rx_lat_trace[RDS_MSG_RX_END] = local_clock();
list_add_tail(&inc->i_item, &rs->rs_recv_queue);
@@ -552,11 +547,11 @@ static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg,
goto out;
}
- if ((inc->i_rx_tstamp.tv_sec != 0) &&
+ if ((inc->i_rx_tstamp != 0) &&
sock_flag(rds_rs_to_sk(rs), SOCK_RCVTSTAMP)) {
+ struct timeval tv = ktime_to_timeval(inc->i_rx_tstamp);
ret = put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP,
- sizeof(struct timeval),
- &inc->i_rx_tstamp);
+ sizeof(tv), &tv);
if (ret)
goto out;
}
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 1355f5ca8d22..abca57040f37 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -510,8 +510,8 @@ void rfkill_remove_epo_lock(void)
/**
* rfkill_is_epo_lock_active - returns true EPO is active
*
- * Returns 0 (false) if there is NOT an active EPO contidion,
- * and 1 (true) if there is an active EPO contition, which
+ * Returns 0 (false) if there is NOT an active EPO condition,
+ * and 1 (true) if there is an active EPO condition, which
* locks all radios in one of the BLOCKED states.
*
* Can be called in atomic context.
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index cfdc199c6351..ee8e7e1d5c0f 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -259,7 +259,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
while (list) {
skb = list;
list = skb->next;
- skb->next = NULL;
+ skb_mark_not_on_list(skb);
rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
}
}
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index e12f8ef7baa4..6f118d62c731 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -1073,12 +1073,14 @@ static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla,
err = -EINVAL;
ops = tc_lookup_action(tb[TCA_ACT_KIND]);
if (!ops) { /* could happen in batch of actions */
- NL_SET_ERR_MSG(extack, "Specified TC action not found");
+ NL_SET_ERR_MSG(extack, "Specified TC action kind not found");
goto err_out;
}
err = -ENOENT;
- if (ops->lookup(net, &a, index, extack) == 0)
+ if (ops->lookup(net, &a, index) == 0) {
+ NL_SET_ERR_MSG(extack, "TC action with specified index not found");
goto err_mod;
+ }
module_put(ops->owner);
return a;
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 0c68bc9cf0b4..c7633843e223 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -387,8 +387,7 @@ static int tcf_bpf_walker(struct net *net, struct sk_buff *skb,
return tcf_generic_walker(tn, skb, cb, type, ops, extack);
}
-static int tcf_bpf_search(struct net *net, struct tc_action **a, u32 index,
- struct netlink_ext_ack *extack)
+static int tcf_bpf_search(struct net *net, struct tc_action **a, u32 index)
{
struct tc_action_net *tn = net_generic(net, bpf_net_id);
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index 6f0f273f1139..8475913f2070 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -143,8 +143,10 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
return -EEXIST;
}
/* replacing action and zone */
+ spin_lock_bh(&ci->tcf_lock);
ci->tcf_action = parm->action;
ci->zone = parm->zone;
+ spin_unlock_bh(&ci->tcf_lock);
ret = 0;
}
@@ -156,16 +158,16 @@ static inline int tcf_connmark_dump(struct sk_buff *skb, struct tc_action *a,
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_connmark_info *ci = to_connmark(a);
-
struct tc_connmark opt = {
.index = ci->tcf_index,
.refcnt = refcount_read(&ci->tcf_refcnt) - ref,
.bindcnt = atomic_read(&ci->tcf_bindcnt) - bind,
- .action = ci->tcf_action,
- .zone = ci->zone,
};
struct tcf_t t;
+ spin_lock_bh(&ci->tcf_lock);
+ opt.action = ci->tcf_action;
+ opt.zone = ci->zone;
if (nla_put(skb, TCA_CONNMARK_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
@@ -173,9 +175,12 @@ static inline int tcf_connmark_dump(struct sk_buff *skb, struct tc_action *a,
if (nla_put_64bit(skb, TCA_CONNMARK_TM, sizeof(t), &t,
TCA_CONNMARK_PAD))
goto nla_put_failure;
+ spin_unlock_bh(&ci->tcf_lock);
return skb->len;
+
nla_put_failure:
+ spin_unlock_bh(&ci->tcf_lock);
nlmsg_trim(skb, b);
return -1;
}
@@ -190,8 +195,7 @@ static int tcf_connmark_walker(struct net *net, struct sk_buff *skb,
return tcf_generic_walker(tn, skb, cb, type, ops, extack);
}
-static int tcf_connmark_search(struct net *net, struct tc_action **a, u32 index,
- struct netlink_ext_ack *extack)
+static int tcf_connmark_search(struct net *net, struct tc_action **a, u32 index)
{
struct tc_action_net *tn = net_generic(net, connmark_net_id);
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index b8a67ae3105a..3dc25b7806d7 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -646,8 +646,7 @@ static int tcf_csum_walker(struct net *net, struct sk_buff *skb,
return tcf_generic_walker(tn, skb, cb, type, ops, extack);
}
-static int tcf_csum_search(struct net *net, struct tc_action **a, u32 index,
- struct netlink_ext_ack *extack)
+static int tcf_csum_search(struct net *net, struct tc_action **a, u32 index)
{
struct tc_action_net *tn = net_generic(net, csum_net_id);
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index cd1d9bd32ef9..aa44d14b43c7 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -222,8 +222,7 @@ static int tcf_gact_walker(struct net *net, struct sk_buff *skb,
return tcf_generic_walker(tn, skb, cb, type, ops, extack);
}
-static int tcf_gact_search(struct net *net, struct tc_action **a, u32 index,
- struct netlink_ext_ack *extack)
+static int tcf_gact_search(struct net *net, struct tc_action **a, u32 index)
{
struct tc_action_net *tn = net_generic(net, gact_net_id);
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index 06a3d4801878..30b63fa23ee2 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -855,8 +855,7 @@ static int tcf_ife_walker(struct net *net, struct sk_buff *skb,
return tcf_generic_walker(tn, skb, cb, type, ops, extack);
}
-static int tcf_ife_search(struct net *net, struct tc_action **a, u32 index,
- struct netlink_ext_ack *extack)
+static int tcf_ife_search(struct net *net, struct tc_action **a, u32 index)
{
struct tc_action_net *tn = net_generic(net, ife_net_id);
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 23273b5303fd..1efbfb10b1fc 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -329,8 +329,7 @@ static int tcf_ipt_walker(struct net *net, struct sk_buff *skb,
return tcf_generic_walker(tn, skb, cb, type, ops, extack);
}
-static int tcf_ipt_search(struct net *net, struct tc_action **a, u32 index,
- struct netlink_ext_ack *extack)
+static int tcf_ipt_search(struct net *net, struct tc_action **a, u32 index)
{
struct tc_action_net *tn = net_generic(net, ipt_net_id);
@@ -379,8 +378,7 @@ static int tcf_xt_walker(struct net *net, struct sk_buff *skb,
return tcf_generic_walker(tn, skb, cb, type, ops, extack);
}
-static int tcf_xt_search(struct net *net, struct tc_action **a, u32 index,
- struct netlink_ext_ack *extack)
+static int tcf_xt_search(struct net *net, struct tc_action **a, u32 index)
{
struct tc_action_net *tn = net_generic(net, xt_net_id);
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 8bf66d0a6800..a9d64bfe5a2a 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -338,8 +338,7 @@ static int tcf_mirred_walker(struct net *net, struct sk_buff *skb,
return tcf_generic_walker(tn, skb, cb, type, ops, extack);
}
-static int tcf_mirred_search(struct net *net, struct tc_action **a, u32 index,
- struct netlink_ext_ack *extack)
+static int tcf_mirred_search(struct net *net, struct tc_action **a, u32 index)
{
struct tc_action_net *tn = net_generic(net, mirred_net_id);
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 4313aa102440..c5c1e23add77 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -256,28 +256,31 @@ static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a,
unsigned char *b = skb_tail_pointer(skb);
struct tcf_nat *p = to_tcf_nat(a);
struct tc_nat opt = {
- .old_addr = p->old_addr,
- .new_addr = p->new_addr,
- .mask = p->mask,
- .flags = p->flags,
-
.index = p->tcf_index,
- .action = p->tcf_action,
.refcnt = refcount_read(&p->tcf_refcnt) - ref,
.bindcnt = atomic_read(&p->tcf_bindcnt) - bind,
};
struct tcf_t t;
+ spin_lock_bh(&p->tcf_lock);
+ opt.old_addr = p->old_addr;
+ opt.new_addr = p->new_addr;
+ opt.mask = p->mask;
+ opt.flags = p->flags;
+ opt.action = p->tcf_action;
+
if (nla_put(skb, TCA_NAT_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
tcf_tm_dump(&t, &p->tcf_tm);
if (nla_put_64bit(skb, TCA_NAT_TM, sizeof(t), &t, TCA_NAT_PAD))
goto nla_put_failure;
+ spin_unlock_bh(&p->tcf_lock);
return skb->len;
nla_put_failure:
+ spin_unlock_bh(&p->tcf_lock);
nlmsg_trim(skb, b);
return -1;
}
@@ -292,8 +295,7 @@ static int tcf_nat_walker(struct net *net, struct sk_buff *skb,
return tcf_generic_walker(tn, skb, cb, type, ops, extack);
}
-static int tcf_nat_search(struct net *net, struct tc_action **a, u32 index,
- struct netlink_ext_ack *extack)
+static int tcf_nat_search(struct net *net, struct tc_action **a, u32 index)
{
struct tc_action_net *tn = net_generic(net, nat_net_id);
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index ad99a99f11f6..da3dd0f68cc2 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -460,8 +460,7 @@ static int tcf_pedit_walker(struct net *net, struct sk_buff *skb,
return tcf_generic_walker(tn, skb, cb, type, ops, extack);
}
-static int tcf_pedit_search(struct net *net, struct tc_action **a, u32 index,
- struct netlink_ext_ack *extack)
+static int tcf_pedit_search(struct net *net, struct tc_action **a, u32 index)
{
struct tc_action_net *tn = net_generic(net, pedit_net_id);
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 5d8bfa878477..92649d2667ed 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -22,8 +22,7 @@
#include <net/act_api.h>
#include <net/netlink.h>
-struct tcf_police {
- struct tc_action common;
+struct tcf_police_params {
int tcfp_result;
u32 tcfp_ewma_rate;
s64 tcfp_burst;
@@ -36,6 +35,12 @@ struct tcf_police {
bool rate_present;
struct psched_ratecfg peak;
bool peak_present;
+ struct rcu_head rcu;
+};
+
+struct tcf_police {
+ struct tc_action common;
+ struct tcf_police_params __rcu *params;
};
#define to_police(pc) ((struct tcf_police *)pc)
@@ -84,6 +89,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
struct tcf_police *police;
struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
struct tc_action_net *tn = net_generic(net, police_net_id);
+ struct tcf_police_params *new;
bool exists = false;
int size;
@@ -110,7 +116,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
if (!exists) {
ret = tcf_idr_create(tn, parm->index, NULL, a,
- &act_police_ops, bind, false);
+ &act_police_ops, bind, true);
if (ret) {
tcf_idr_cleanup(tn, parm->index);
return ret;
@@ -137,7 +143,8 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
}
if (est) {
- err = gen_replace_estimator(&police->tcf_bstats, NULL,
+ err = gen_replace_estimator(&police->tcf_bstats,
+ police->common.cpu_bstats,
&police->tcf_rate_est,
&police->tcf_lock,
NULL, est);
@@ -150,50 +157,60 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
goto failure;
}
- spin_lock_bh(&police->tcf_lock);
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (unlikely(!new)) {
+ err = -ENOMEM;
+ goto failure;
+ }
+
/* No failure allowed after this point */
- police->tcfp_mtu = parm->mtu;
- if (police->tcfp_mtu == 0) {
- police->tcfp_mtu = ~0;
+ new->tcfp_mtu = parm->mtu;
+ if (!new->tcfp_mtu) {
+ new->tcfp_mtu = ~0;
if (R_tab)
- police->tcfp_mtu = 255 << R_tab->rate.cell_log;
+ new->tcfp_mtu = 255 << R_tab->rate.cell_log;
}
if (R_tab) {
- police->rate_present = true;
- psched_ratecfg_precompute(&police->rate, &R_tab->rate, 0);
+ new->rate_present = true;
+ psched_ratecfg_precompute(&new->rate, &R_tab->rate, 0);
qdisc_put_rtab(R_tab);
} else {
- police->rate_present = false;
+ new->rate_present = false;
}
if (P_tab) {
- police->peak_present = true;
- psched_ratecfg_precompute(&police->peak, &P_tab->rate, 0);
+ new->peak_present = true;
+ psched_ratecfg_precompute(&new->peak, &P_tab->rate, 0);
qdisc_put_rtab(P_tab);
} else {
- police->peak_present = false;
+ new->peak_present = false;
}
if (tb[TCA_POLICE_RESULT])
- police->tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]);
- police->tcfp_burst = PSCHED_TICKS2NS(parm->burst);
- police->tcfp_toks = police->tcfp_burst;
- if (police->peak_present) {
- police->tcfp_mtu_ptoks = (s64) psched_l2t_ns(&police->peak,
- police->tcfp_mtu);
- police->tcfp_ptoks = police->tcfp_mtu_ptoks;
+ new->tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]);
+ new->tcfp_burst = PSCHED_TICKS2NS(parm->burst);
+ new->tcfp_toks = new->tcfp_burst;
+ if (new->peak_present) {
+ new->tcfp_mtu_ptoks = (s64)psched_l2t_ns(&new->peak,
+ new->tcfp_mtu);
+ new->tcfp_ptoks = new->tcfp_mtu_ptoks;
}
- police->tcf_action = parm->action;
if (tb[TCA_POLICE_AVRATE])
- police->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]);
+ new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]);
+ spin_lock_bh(&police->tcf_lock);
+ new->tcfp_t_c = ktime_get_ns();
+ police->tcf_action = parm->action;
+ rcu_swap_protected(police->params,
+ new,
+ lockdep_is_held(&police->tcf_lock));
spin_unlock_bh(&police->tcf_lock);
- if (ret != ACT_P_CREATED)
- return ret;
- police->tcfp_t_c = ktime_get_ns();
- tcf_idr_insert(tn, *a);
+ if (new)
+ kfree_rcu(new, rcu);
+ if (ret == ACT_P_CREATED)
+ tcf_idr_insert(tn, *a);
return ret;
failure:
@@ -207,64 +224,69 @@ static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
{
struct tcf_police *police = to_police(a);
- s64 now;
- s64 toks;
- s64 ptoks = 0;
-
- spin_lock(&police->tcf_lock);
+ struct tcf_police_params *p;
+ s64 now, toks, ptoks = 0;
+ int ret;
- bstats_update(&police->tcf_bstats, skb);
tcf_lastuse_update(&police->tcf_tm);
+ bstats_cpu_update(this_cpu_ptr(police->common.cpu_bstats), skb);
- if (police->tcfp_ewma_rate) {
+ ret = READ_ONCE(police->tcf_action);
+ p = rcu_dereference_bh(police->params);
+
+ if (p->tcfp_ewma_rate) {
struct gnet_stats_rate_est64 sample;
if (!gen_estimator_read(&police->tcf_rate_est, &sample) ||
- sample.bps >= police->tcfp_ewma_rate) {
- police->tcf_qstats.overlimits++;
- if (police->tcf_action == TC_ACT_SHOT)
- police->tcf_qstats.drops++;
- spin_unlock(&police->tcf_lock);
- return police->tcf_action;
- }
+ sample.bps >= p->tcfp_ewma_rate)
+ goto inc_overlimits;
}
- if (qdisc_pkt_len(skb) <= police->tcfp_mtu) {
- if (!police->rate_present) {
- spin_unlock(&police->tcf_lock);
- return police->tcfp_result;
+ if (qdisc_pkt_len(skb) <= p->tcfp_mtu) {
+ if (!p->rate_present) {
+ ret = p->tcfp_result;
+ goto end;
}
now = ktime_get_ns();
- toks = min_t(s64, now - police->tcfp_t_c,
- police->tcfp_burst);
- if (police->peak_present) {
- ptoks = toks + police->tcfp_ptoks;
- if (ptoks > police->tcfp_mtu_ptoks)
- ptoks = police->tcfp_mtu_ptoks;
- ptoks -= (s64) psched_l2t_ns(&police->peak,
- qdisc_pkt_len(skb));
+ toks = min_t(s64, now - p->tcfp_t_c, p->tcfp_burst);
+ if (p->peak_present) {
+ ptoks = toks + p->tcfp_ptoks;
+ if (ptoks > p->tcfp_mtu_ptoks)
+ ptoks = p->tcfp_mtu_ptoks;
+ ptoks -= (s64)psched_l2t_ns(&p->peak,
+ qdisc_pkt_len(skb));
}
- toks += police->tcfp_toks;
- if (toks > police->tcfp_burst)
- toks = police->tcfp_burst;
- toks -= (s64) psched_l2t_ns(&police->rate, qdisc_pkt_len(skb));
+ toks += p->tcfp_toks;
+ if (toks > p->tcfp_burst)
+ toks = p->tcfp_burst;
+ toks -= (s64)psched_l2t_ns(&p->rate, qdisc_pkt_len(skb));
if ((toks|ptoks) >= 0) {
- police->tcfp_t_c = now;
- police->tcfp_toks = toks;
- police->tcfp_ptoks = ptoks;
- if (police->tcfp_result == TC_ACT_SHOT)
- police->tcf_qstats.drops++;
- spin_unlock(&police->tcf_lock);
- return police->tcfp_result;
+ p->tcfp_t_c = now;
+ p->tcfp_toks = toks;
+ p->tcfp_ptoks = ptoks;
+ ret = p->tcfp_result;
+ goto inc_drops;
}
}
- police->tcf_qstats.overlimits++;
- if (police->tcf_action == TC_ACT_SHOT)
- police->tcf_qstats.drops++;
- spin_unlock(&police->tcf_lock);
- return police->tcf_action;
+inc_overlimits:
+ qstats_overlimit_inc(this_cpu_ptr(police->common.cpu_qstats));
+inc_drops:
+ if (ret == TC_ACT_SHOT)
+ qstats_drop_inc(this_cpu_ptr(police->common.cpu_qstats));
+end:
+ return ret;
+}
+
+static void tcf_police_cleanup(struct tc_action *a)
+{
+ struct tcf_police *police = to_police(a);
+ struct tcf_police_params *p;
+
+ p = rcu_dereference_protected(police->params, 1);
+ if (p)
+ kfree_rcu(p, rcu);
}
static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a,
@@ -272,6 +294,7 @@ static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a,
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_police *police = to_police(a);
+ struct tcf_police_params *p;
struct tc_police opt = {
.index = police->tcf_index,
.refcnt = refcount_read(&police->tcf_refcnt) - ref,
@@ -281,19 +304,21 @@ static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a,
spin_lock_bh(&police->tcf_lock);
opt.action = police->tcf_action;
- opt.mtu = police->tcfp_mtu;
- opt.burst = PSCHED_NS2TICKS(police->tcfp_burst);
- if (police->rate_present)
- psched_ratecfg_getrate(&opt.rate, &police->rate);
- if (police->peak_present)
- psched_ratecfg_getrate(&opt.peakrate, &police->peak);
+ p = rcu_dereference_protected(police->params,
+ lockdep_is_held(&police->tcf_lock));
+ opt.mtu = p->tcfp_mtu;
+ opt.burst = PSCHED_NS2TICKS(p->tcfp_burst);
+ if (p->rate_present)
+ psched_ratecfg_getrate(&opt.rate, &p->rate);
+ if (p->peak_present)
+ psched_ratecfg_getrate(&opt.peakrate, &p->peak);
if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt))
goto nla_put_failure;
- if (police->tcfp_result &&
- nla_put_u32(skb, TCA_POLICE_RESULT, police->tcfp_result))
+ if (p->tcfp_result &&
+ nla_put_u32(skb, TCA_POLICE_RESULT, p->tcfp_result))
goto nla_put_failure;
- if (police->tcfp_ewma_rate &&
- nla_put_u32(skb, TCA_POLICE_AVRATE, police->tcfp_ewma_rate))
+ if (p->tcfp_ewma_rate &&
+ nla_put_u32(skb, TCA_POLICE_AVRATE, p->tcfp_ewma_rate))
goto nla_put_failure;
t.install = jiffies_to_clock_t(jiffies - police->tcf_tm.install);
@@ -312,8 +337,7 @@ nla_put_failure:
return -1;
}
-static int tcf_police_search(struct net *net, struct tc_action **a, u32 index,
- struct netlink_ext_ack *extack)
+static int tcf_police_search(struct net *net, struct tc_action **a, u32 index)
{
struct tc_action_net *tn = net_generic(net, police_net_id);
@@ -333,6 +357,7 @@ static struct tc_action_ops act_police_ops = {
.init = tcf_police_init,
.walk = tcf_police_walker,
.lookup = tcf_police_search,
+ .cleanup = tcf_police_cleanup,
.size = sizeof(struct tcf_police),
};
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index 6b67aa13d2dd..1a0c682fd734 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -224,8 +224,7 @@ static int tcf_sample_walker(struct net *net, struct sk_buff *skb,
return tcf_generic_walker(tn, skb, cb, type, ops, extack);
}
-static int tcf_sample_search(struct net *net, struct tc_action **a, u32 index,
- struct netlink_ext_ack *extack)
+static int tcf_sample_search(struct net *net, struct tc_action **a, u32 index)
{
struct tc_action_net *tn = net_generic(net, sample_net_id);
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 52400d49f81f..902957beceb3 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -188,8 +188,7 @@ static int tcf_simp_walker(struct net *net, struct sk_buff *skb,
return tcf_generic_walker(tn, skb, cb, type, ops, extack);
}
-static int tcf_simp_search(struct net *net, struct tc_action **a, u32 index,
- struct netlink_ext_ack *extack)
+static int tcf_simp_search(struct net *net, struct tc_action **a, u32 index)
{
struct tc_action_net *tn = net_generic(net, simp_net_id);
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 73e44ce2a883..64dba3708fce 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -99,7 +99,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, skbedit_net_id);
- struct tcf_skbedit_params *params_old, *params_new;
+ struct tcf_skbedit_params *params_new;
struct nlattr *tb[TCA_SKBEDIT_MAX + 1];
struct tc_skbedit *parm;
struct tcf_skbedit *d;
@@ -187,8 +187,6 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
}
}
- ASSERT_RTNL();
-
params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
if (unlikely(!params_new)) {
if (ret == ACT_P_CREATED)
@@ -210,11 +208,13 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
if (flags & SKBEDIT_F_MASK)
params_new->mask = *mask;
+ spin_lock_bh(&d->tcf_lock);
d->tcf_action = parm->action;
- params_old = rtnl_dereference(d->params);
- rcu_assign_pointer(d->params, params_new);
- if (params_old)
- kfree_rcu(params_old, rcu);
+ rcu_swap_protected(d->params, params_new,
+ lockdep_is_held(&d->tcf_lock));
+ spin_unlock_bh(&d->tcf_lock);
+ if (params_new)
+ kfree_rcu(params_new, rcu);
if (ret == ACT_P_CREATED)
tcf_idr_insert(tn, *a);
@@ -231,12 +231,14 @@ static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
.index = d->tcf_index,
.refcnt = refcount_read(&d->tcf_refcnt) - ref,
.bindcnt = atomic_read(&d->tcf_bindcnt) - bind,
- .action = d->tcf_action,
};
u64 pure_flags = 0;
struct tcf_t t;
- params = rtnl_dereference(d->params);
+ spin_lock_bh(&d->tcf_lock);
+ params = rcu_dereference_protected(d->params,
+ lockdep_is_held(&d->tcf_lock));
+ opt.action = d->tcf_action;
if (nla_put(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
@@ -264,9 +266,12 @@ static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
tcf_tm_dump(&t, &d->tcf_tm);
if (nla_put_64bit(skb, TCA_SKBEDIT_TM, sizeof(t), &t, TCA_SKBEDIT_PAD))
goto nla_put_failure;
+ spin_unlock_bh(&d->tcf_lock);
+
return skb->len;
nla_put_failure:
+ spin_unlock_bh(&d->tcf_lock);
nlmsg_trim(skb, b);
return -1;
}
@@ -291,8 +296,7 @@ static int tcf_skbedit_walker(struct net *net, struct sk_buff *skb,
return tcf_generic_walker(tn, skb, cb, type, ops, extack);
}
-static int tcf_skbedit_search(struct net *net, struct tc_action **a, u32 index,
- struct netlink_ext_ack *extack)
+static int tcf_skbedit_search(struct net *net, struct tc_action **a, u32 index)
{
struct tc_action_net *tn = net_generic(net, skbedit_net_id);
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index 588077fafd6c..59710a183bd3 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -251,8 +251,7 @@ static int tcf_skbmod_walker(struct net *net, struct sk_buff *skb,
return tcf_generic_walker(tn, skb, cb, type, ops, extack);
}
-static int tcf_skbmod_search(struct net *net, struct tc_action **a, u32 index,
- struct netlink_ext_ack *extack)
+static int tcf_skbmod_search(struct net *net, struct tc_action **a, u32 index)
{
struct tc_action_net *tn = net_generic(net, skbmod_net_id);
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 681f6f04e7da..4cca8f274662 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -548,8 +548,7 @@ static int tunnel_key_walker(struct net *net, struct sk_buff *skb,
return tcf_generic_walker(tn, skb, cb, type, ops, extack);
}
-static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index,
- struct netlink_ext_ack *extack)
+static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index)
{
struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index 033d273afe50..ba677d54a7af 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -288,8 +288,7 @@ static int tcf_vlan_walker(struct net *net, struct sk_buff *skb,
return tcf_generic_walker(tn, skb, cb, type, ops, extack);
}
-static int tcf_vlan_search(struct net *net, struct tc_action **a, u32 index,
- struct netlink_ext_ack *extack)
+static int tcf_vlan_search(struct net *net, struct tc_action **a, u32 index)
{
struct tc_action_net *tn = net_generic(net, vlan_net_id);
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 6fd9bdd93796..4b8dd37dd4f8 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -98,7 +98,7 @@ struct cls_fl_filter {
struct list_head list;
u32 handle;
u32 flags;
- unsigned int in_hw_count;
+ u32 in_hw_count;
struct rcu_work rwork;
struct net_device *hw_dev;
};
@@ -1880,6 +1880,9 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
goto nla_put_failure;
+ if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
+ goto nla_put_failure;
+
if (tcf_exts_dump(skb, &f->exts))
goto nla_put_failure;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 98541c6399db..411c40344b77 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -27,7 +27,6 @@
#include <linux/kmod.h>
#include <linux/list.h>
#include <linux/hrtimer.h>
-#include <linux/lockdep.h>
#include <linux/slab.h>
#include <linux/hashtable.h>
@@ -1053,10 +1052,6 @@ static int qdisc_block_indexes_set(struct Qdisc *sch, struct nlattr **tca,
return 0;
}
-/* lockdep annotation is needed for ingress; egress gets it only for name */
-static struct lock_class_key qdisc_tx_lock;
-static struct lock_class_key qdisc_rx_lock;
-
/*
Allocate and initialize new qdisc.
@@ -1121,7 +1116,6 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
if (handle == TC_H_INGRESS) {
sch->flags |= TCQ_F_INGRESS;
handle = TC_H_MAKE(TC_H_INGRESS, 0);
- lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock);
} else {
if (handle == 0) {
handle = qdisc_alloc_handle(dev);
@@ -1129,7 +1123,6 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
if (handle == 0)
goto err_out3;
}
- lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
if (!netif_is_multiqueue(dev))
sch->flags |= TCQ_F_ONETXQUEUE;
}
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index c07c30b916d5..dc539295ae65 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -812,7 +812,7 @@ static struct sk_buff *dequeue_head(struct cake_flow *flow)
if (skb) {
flow->head = skb->next;
- skb->next = NULL;
+ skb_mark_not_on_list(skb);
}
return skb;
@@ -1252,7 +1252,7 @@ found:
else
flow->head = elig_ack->next;
- elig_ack->next = NULL;
+ skb_mark_not_on_list(elig_ack);
return elig_ack;
}
@@ -1675,7 +1675,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
while (segs) {
nskb = segs->next;
- segs->next = NULL;
+ skb_mark_not_on_list(segs);
qdisc_skb_cb(segs)->pkt_len = segs->len;
cobalt_set_enqueue_time(segs, now);
get_cobalt_cb(segs)->adjusted_len = cake_overhead(q,
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 4808713c73b9..b27ba36a269c 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -319,7 +319,7 @@ static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow)
if (skb) {
flow->head = skb->next;
- skb->next = NULL;
+ skb_mark_not_on_list(skb);
flow->qlen--;
qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 6c0a9d5dbf94..cd04d40c30b6 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -124,7 +124,7 @@ static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
struct sk_buff *skb = flow->head;
flow->head = skb->next;
- skb->next = NULL;
+ skb_mark_not_on_list(skb);
return skb;
}
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 69078c82963e..a64132a5db36 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -184,7 +184,7 @@ static void try_bulk_dequeue_skb(struct Qdisc *q,
skb = nskb;
(*packets)++; /* GSO counts as one pkt */
}
- skb->next = NULL;
+ skb_mark_not_on_list(skb);
}
/* This variant of try_bulk_dequeue_skb() makes sure
@@ -210,7 +210,7 @@ static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
skb = nskb;
} while (++cnt < 8);
(*packets) += cnt;
- skb->next = NULL;
+ skb_mark_not_on_list(skb);
}
/* Note that dequeue_skb can possibly return a SKB list (via skb->next).
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index c3a8388dcdf6..9d6a47697406 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -330,7 +330,7 @@ static struct sk_buff *dequeue_head(struct wdrr_bucket *bucket)
struct sk_buff *skb = bucket->head;
bucket->head = skb->next;
- skb->next = NULL;
+ skb_mark_not_on_list(skb);
return skb;
}
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 43c4bfe625a9..18ac2d6ca294 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -132,7 +132,7 @@ struct htb_class {
struct htb_class_inner {
struct htb_prio clprio[TC_HTB_NUMPRIO];
} inner;
- } un;
+ };
s64 pq_key;
int prio_activity; /* for which prios are we active */
@@ -411,13 +411,13 @@ static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
int prio = ffz(~m);
m &= ~(1 << prio);
- if (p->un.inner.clprio[prio].feed.rb_node)
+ if (p->inner.clprio[prio].feed.rb_node)
/* parent already has its feed in use so that
* reset bit in mask as parent is already ok
*/
mask &= ~(1 << prio);
- htb_add_to_id_tree(&p->un.inner.clprio[prio].feed, cl, prio);
+ htb_add_to_id_tree(&p->inner.clprio[prio].feed, cl, prio);
}
p->prio_activity |= mask;
cl = p;
@@ -447,19 +447,19 @@ static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
int prio = ffz(~m);
m &= ~(1 << prio);
- if (p->un.inner.clprio[prio].ptr == cl->node + prio) {
+ if (p->inner.clprio[prio].ptr == cl->node + prio) {
/* we are removing child which is pointed to from
* parent feed - forget the pointer but remember
* classid
*/
- p->un.inner.clprio[prio].last_ptr_id = cl->common.classid;
- p->un.inner.clprio[prio].ptr = NULL;
+ p->inner.clprio[prio].last_ptr_id = cl->common.classid;
+ p->inner.clprio[prio].ptr = NULL;
}
htb_safe_rb_erase(cl->node + prio,
- &p->un.inner.clprio[prio].feed);
+ &p->inner.clprio[prio].feed);
- if (!p->un.inner.clprio[prio].feed.rb_node)
+ if (!p->inner.clprio[prio].feed.rb_node)
mask |= 1 << prio;
}
@@ -555,7 +555,7 @@ htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
*/
static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
{
- WARN_ON(cl->level || !cl->un.leaf.q || !cl->un.leaf.q->q.qlen);
+ WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen);
if (!cl->prio_activity) {
cl->prio_activity = 1 << cl->prio;
@@ -577,22 +577,6 @@ static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
cl->prio_activity = 0;
}
-static void htb_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
- struct qdisc_skb_head *qh)
-{
- struct sk_buff *last = qh->tail;
-
- if (last) {
- skb->next = NULL;
- last->next = skb;
- qh->tail = skb;
- } else {
- qh->tail = skb;
- qh->head = skb;
- }
- qh->qlen++;
-}
-
static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
@@ -603,7 +587,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (cl == HTB_DIRECT) {
/* enqueue to helper queue */
if (q->direct_queue.qlen < q->direct_qlen) {
- htb_enqueue_tail(skb, sch, &q->direct_queue);
+ __qdisc_enqueue_tail(skb, &q->direct_queue);
q->direct_pkts++;
} else {
return qdisc_drop(skb, sch, to_free);
@@ -615,7 +599,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
__qdisc_drop(skb, to_free);
return ret;
#endif
- } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q,
+ } else if ((ret = qdisc_enqueue(skb, cl->leaf.q,
to_free)) != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret)) {
qdisc_qstats_drop(sch);
@@ -823,7 +807,7 @@ static struct htb_class *htb_lookup_leaf(struct htb_prio *hprio, const int prio)
cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
if (!cl->level)
return cl;
- clp = &cl->un.inner.clprio[prio];
+ clp = &cl->inner.clprio[prio];
(++sp)->root = clp->feed.rb_node;
sp->pptr = &clp->ptr;
sp->pid = &clp->last_ptr_id;
@@ -857,7 +841,7 @@ next:
* graft operation on the leaf since last dequeue;
* simply deactivate and skip such class
*/
- if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
+ if (unlikely(cl->leaf.q->q.qlen == 0)) {
struct htb_class *next;
htb_deactivate(q, cl);
@@ -873,12 +857,12 @@ next:
goto next;
}
- skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
+ skb = cl->leaf.q->dequeue(cl->leaf.q);
if (likely(skb != NULL))
break;
- qdisc_warn_nonwc("htb", cl->un.leaf.q);
- htb_next_rb_node(level ? &cl->parent->un.inner.clprio[prio].ptr:
+ qdisc_warn_nonwc("htb", cl->leaf.q);
+ htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr:
&q->hlevel[0].hprio[prio].ptr);
cl = htb_lookup_leaf(hprio, prio);
@@ -886,16 +870,16 @@ next:
if (likely(skb != NULL)) {
bstats_update(&cl->bstats, skb);
- cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb);
- if (cl->un.leaf.deficit[level] < 0) {
- cl->un.leaf.deficit[level] += cl->quantum;
- htb_next_rb_node(level ? &cl->parent->un.inner.clprio[prio].ptr :
+ cl->leaf.deficit[level] -= qdisc_pkt_len(skb);
+ if (cl->leaf.deficit[level] < 0) {
+ cl->leaf.deficit[level] += cl->quantum;
+ htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr :
&q->hlevel[0].hprio[prio].ptr);
}
/* this used to be after charge_class but this constelation
* gives us slightly better performance
*/
- if (!cl->un.leaf.q->q.qlen)
+ if (!cl->leaf.q->q.qlen)
htb_deactivate(q, cl);
htb_charge_class(q, cl, level, skb);
}
@@ -972,10 +956,10 @@ static void htb_reset(struct Qdisc *sch)
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
if (cl->level)
- memset(&cl->un.inner, 0, sizeof(cl->un.inner));
+ memset(&cl->inner, 0, sizeof(cl->inner));
else {
- if (cl->un.leaf.q)
- qdisc_reset(cl->un.leaf.q);
+ if (cl->leaf.q)
+ qdisc_reset(cl->leaf.q);
}
cl->prio_activity = 0;
cl->cmode = HTB_CAN_SEND;
@@ -1098,8 +1082,8 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
*/
tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
tcm->tcm_handle = cl->common.classid;
- if (!cl->level && cl->un.leaf.q)
- tcm->tcm_info = cl->un.leaf.q->handle;
+ if (!cl->level && cl->leaf.q)
+ tcm->tcm_info = cl->leaf.q->handle;
nest = nla_nest_start(skb, TCA_OPTIONS);
if (nest == NULL)
@@ -1142,9 +1126,9 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
};
__u32 qlen = 0;
- if (!cl->level && cl->un.leaf.q) {
- qlen = cl->un.leaf.q->q.qlen;
- qs.backlog = cl->un.leaf.q->qstats.backlog;
+ if (!cl->level && cl->leaf.q) {
+ qlen = cl->leaf.q->q.qlen;
+ qs.backlog = cl->leaf.q->qstats.backlog;
}
cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
INT_MIN, INT_MAX);
@@ -1172,14 +1156,14 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
cl->common.classid, extack)) == NULL)
return -ENOBUFS;
- *old = qdisc_replace(sch, new, &cl->un.leaf.q);
+ *old = qdisc_replace(sch, new, &cl->leaf.q);
return 0;
}
static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
{
struct htb_class *cl = (struct htb_class *)arg;
- return !cl->level ? cl->un.leaf.q : NULL;
+ return !cl->level ? cl->leaf.q : NULL;
}
static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
@@ -1205,15 +1189,15 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
{
struct htb_class *parent = cl->parent;
- WARN_ON(cl->level || !cl->un.leaf.q || cl->prio_activity);
+ WARN_ON(cl->level || !cl->leaf.q || cl->prio_activity);
if (parent->cmode != HTB_CAN_SEND)
htb_safe_rb_erase(&parent->pq_node,
&q->hlevel[parent->level].wait_pq);
parent->level = 0;
- memset(&parent->un.inner, 0, sizeof(parent->un.inner));
- parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
+ memset(&parent->inner, 0, sizeof(parent->inner));
+ parent->leaf.q = new_q ? new_q : &noop_qdisc;
parent->tokens = parent->buffer;
parent->ctokens = parent->cbuffer;
parent->t_c = ktime_get_ns();
@@ -1223,8 +1207,8 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
{
if (!cl->level) {
- WARN_ON(!cl->un.leaf.q);
- qdisc_destroy(cl->un.leaf.q);
+ WARN_ON(!cl->leaf.q);
+ qdisc_destroy(cl->leaf.q);
}
gen_kill_estimator(&cl->rate_est);
tcf_block_put(cl->block);
@@ -1286,11 +1270,11 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
sch_tree_lock(sch);
if (!cl->level) {
- unsigned int qlen = cl->un.leaf.q->q.qlen;
- unsigned int backlog = cl->un.leaf.q->qstats.backlog;
+ unsigned int qlen = cl->leaf.q->q.qlen;
+ unsigned int backlog = cl->leaf.q->qstats.backlog;
- qdisc_reset(cl->un.leaf.q);
- qdisc_tree_reduce_backlog(cl->un.leaf.q, qlen, backlog);
+ qdisc_reset(cl->leaf.q);
+ qdisc_tree_reduce_backlog(cl->leaf.q, qlen, backlog);
}
/* delete from hash and active; remainder in destroy_class */
@@ -1419,13 +1403,13 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
classid, NULL);
sch_tree_lock(sch);
if (parent && !parent->level) {
- unsigned int qlen = parent->un.leaf.q->q.qlen;
- unsigned int backlog = parent->un.leaf.q->qstats.backlog;
+ unsigned int qlen = parent->leaf.q->q.qlen;
+ unsigned int backlog = parent->leaf.q->qstats.backlog;
/* turn parent into inner node */
- qdisc_reset(parent->un.leaf.q);
- qdisc_tree_reduce_backlog(parent->un.leaf.q, qlen, backlog);
- qdisc_destroy(parent->un.leaf.q);
+ qdisc_reset(parent->leaf.q);
+ qdisc_tree_reduce_backlog(parent->leaf.q, qlen, backlog);
+ qdisc_destroy(parent->leaf.q);
if (parent->prio_activity)
htb_deactivate(q, parent);
@@ -1436,10 +1420,10 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
}
parent->level = (parent->parent ? parent->parent->level
: TC_HTB_MAXDEPTH) - 1;
- memset(&parent->un.inner, 0, sizeof(parent->un.inner));
+ memset(&parent->inner, 0, sizeof(parent->inner));
}
/* leaf (we) needs elementary qdisc */
- cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
+ cl->leaf.q = new_q ? new_q : &noop_qdisc;
cl->common.classid = classid;
cl->parent = parent;
@@ -1455,8 +1439,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
qdisc_class_hash_insert(&q->clhash, &cl->common);
if (parent)
parent->children++;
- if (cl->un.leaf.q != &noop_qdisc)
- qdisc_hash_add(cl->un.leaf.q, true);
+ if (cl->leaf.q != &noop_qdisc)
+ qdisc_hash_add(cl->leaf.q, true);
} else {
if (tca[TCA_RATE]) {
err = gen_replace_estimator(&cl->bstats, NULL,
@@ -1478,7 +1462,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
/* it used to be a nasty bug here, we have to check that node
- * is really leaf before changing cl->un.leaf !
+ * is really leaf before changing cl->leaf !
*/
if (!cl->level) {
u64 quantum = cl->rate.rate_bytes_ps;
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index ad18a2052416..506e1960ed7f 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -412,16 +412,6 @@ static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
return segs;
}
-static void netem_enqueue_skb_head(struct qdisc_skb_head *qh, struct sk_buff *skb)
-{
- skb->next = qh->head;
-
- if (!qh->head)
- qh->tail = skb;
- qh->head = skb;
- qh->qlen++;
-}
-
/*
* Insert one skb into qdisc.
* Note: parent depends on return value to account for queue length.
@@ -570,7 +560,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
cb->time_to_send = ktime_get_ns();
q->counter = 0;
- netem_enqueue_skb_head(&sch->q, skb);
+ __qdisc_enqueue_head(skb, &sch->q);
sch->qstats.requeues++;
}
@@ -578,7 +568,7 @@ finish_segs:
if (segs) {
while (segs) {
skb2 = segs->next;
- segs->next = NULL;
+ skb_mark_not_on_list(segs);
qdisc_skb_cb(segs)->pkt_len = segs->len;
last_len = segs->len;
rc = qdisc_enqueue(segs, sch, to_free);
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 6f74a426f159..a4530e85bd02 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -162,7 +162,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch,
nb = 0;
while (segs) {
nskb = segs->next;
- segs->next = NULL;
+ skb_mark_not_on_list(segs);
qdisc_skb_cb(segs)->pkt_len = segs->len;
len += segs->len;
ret = qdisc_enqueue(segs, q->qdisc, to_free);
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 0b427100b0d4..331cc734e3db 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -459,7 +459,7 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ul
* element in the queue, then count it towards
* possible PD.
*/
- if (pos == ulpq->reasm.next) {
+ if (skb_queue_is_first(&ulpq->reasm, pos)) {
pd_first = pos;
pd_last = pos;
pd_len = pos->len;
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 418f03d0be90..91891041e5e1 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -577,7 +577,7 @@ static int tipc_l2_rcv_msg(struct sk_buff *skb, struct net_device *dev,
rcu_dereference_rtnl(orig_dev->tipc_ptr);
if (likely(b && test_bit(0, &b->up) &&
(skb->pkt_type <= PACKET_MULTICAST))) {
- skb->next = NULL;
+ skb_mark_not_on_list(skb);
tipc_rcv(dev_net(b->pt.dev), skb, b);
rcu_read_unlock();
return NET_RX_SUCCESS;
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
index 2627b5d812e9..d8956f7daac4 100644
--- a/net/tipc/topsrv.c
+++ b/net/tipc/topsrv.c
@@ -57,16 +57,12 @@
* @idr_lock: protect the connection identifier set
* @idr_in_use: amount of allocated identifier entry
* @net: network namspace instance
- * @rcvbuf_cache: memory cache of server receive buffer
+ * @awork: accept work item
* @rcv_wq: receive workqueue
* @send_wq: send workqueue
* @max_rcvbuf_size: maximum permitted receive message length
- * @tipc_conn_new: callback will be called when new connection is incoming
- * @tipc_conn_release: callback will be called before releasing the connection
- * @tipc_conn_recvmsg: callback will be called when message arrives
+ * @listener: topsrv listener socket
* @name: server name
- * @imp: message importance
- * @type: socket type
*/
struct tipc_topsrv {
struct idr conn_idr;
@@ -90,9 +86,7 @@ struct tipc_topsrv {
* @server: pointer to connected server
* @sub_list: lsit to all pertaing subscriptions
* @sub_lock: lock protecting the subscription list
- * @outqueue_lock: control access to the outqueue
* @rwork: receive work item
- * @rx_action: what to do when connection socket is active
* @outqueue: pointer to first outbound message in queue
* @outqueue_lock: control access to the outqueue
* @swork: send work item
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index b9c6ecfbcfea..5ff51bac8b46 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -43,12 +43,133 @@
#define MAX_IV_SIZE TLS_CIPHER_AES_GCM_128_IV_SIZE
+static int __skb_nsg(struct sk_buff *skb, int offset, int len,
+ unsigned int recursion_level)
+{
+ int start = skb_headlen(skb);
+ int i, chunk = start - offset;
+ struct sk_buff *frag_iter;
+ int elt = 0;
+
+ if (unlikely(recursion_level >= 24))
+ return -EMSGSIZE;
+
+ if (chunk > 0) {
+ if (chunk > len)
+ chunk = len;
+ elt++;
+ len -= chunk;
+ if (len == 0)
+ return elt;
+ offset += chunk;
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ int end;
+
+ WARN_ON(start > offset + len);
+
+ end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
+ chunk = end - offset;
+ if (chunk > 0) {
+ if (chunk > len)
+ chunk = len;
+ elt++;
+ len -= chunk;
+ if (len == 0)
+ return elt;
+ offset += chunk;
+ }
+ start = end;
+ }
+
+ if (unlikely(skb_has_frag_list(skb))) {
+ skb_walk_frags(skb, frag_iter) {
+ int end, ret;
+
+ WARN_ON(start > offset + len);
+
+ end = start + frag_iter->len;
+ chunk = end - offset;
+ if (chunk > 0) {
+ if (chunk > len)
+ chunk = len;
+ ret = __skb_nsg(frag_iter, offset - start, chunk,
+ recursion_level + 1);
+ if (unlikely(ret < 0))
+ return ret;
+ elt += ret;
+ len -= chunk;
+ if (len == 0)
+ return elt;
+ offset += chunk;
+ }
+ start = end;
+ }
+ }
+ BUG_ON(len);
+ return elt;
+}
+
+/* Return the number of scatterlist elements required to completely map the
+ * skb, or -EMSGSIZE if the recursion depth is exceeded.
+ */
+static int skb_nsg(struct sk_buff *skb, int offset, int len)
+{
+ return __skb_nsg(skb, offset, len, 0);
+}
+
+static void tls_decrypt_done(struct crypto_async_request *req, int err)
+{
+ struct aead_request *aead_req = (struct aead_request *)req;
+ struct scatterlist *sgout = aead_req->dst;
+ struct tls_sw_context_rx *ctx;
+ struct tls_context *tls_ctx;
+ struct scatterlist *sg;
+ struct sk_buff *skb;
+ unsigned int pages;
+ int pending;
+
+ skb = (struct sk_buff *)req->data;
+ tls_ctx = tls_get_ctx(skb->sk);
+ ctx = tls_sw_ctx_rx(tls_ctx);
+ pending = atomic_dec_return(&ctx->decrypt_pending);
+
+ /* Propagate if there was an err */
+ if (err) {
+ ctx->async_wait.err = err;
+ tls_err_abort(skb->sk, err);
+ }
+
+ /* After using skb->sk to propagate sk through crypto async callback
+ * we need to NULL it again.
+ */
+ skb->sk = NULL;
+
+ /* Release the skb, pages and memory allocated for crypto req */
+ kfree_skb(skb);
+
+ /* Skip the first S/G entry as it points to AAD */
+ for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
+ if (!sg)
+ break;
+ put_page(sg_page(sg));
+ }
+
+ kfree(aead_req);
+
+ if (!pending && READ_ONCE(ctx->async_notify))
+ complete(&ctx->async_wait.completion);
+}
+
static int tls_do_decryption(struct sock *sk,
+ struct sk_buff *skb,
struct scatterlist *sgin,
struct scatterlist *sgout,
char *iv_recv,
size_t data_len,
- struct aead_request *aead_req)
+ struct aead_request *aead_req,
+ bool async)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
@@ -59,10 +180,36 @@ static int tls_do_decryption(struct sock *sk,
aead_request_set_crypt(aead_req, sgin, sgout,
data_len + tls_ctx->rx.tag_size,
(u8 *)iv_recv);
- aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- crypto_req_done, &ctx->async_wait);
- ret = crypto_wait_req(crypto_aead_decrypt(aead_req), &ctx->async_wait);
+ if (async) {
+ /* Using skb->sk to push sk through to crypto async callback
+ * handler. This allows propagating errors up to the socket
+ * if needed. It _must_ be cleared in the async handler
+ * before kfree_skb is called. We _know_ skb->sk is NULL
+ * because it is a clone from strparser.
+ */
+ skb->sk = sk;
+ aead_request_set_callback(aead_req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ tls_decrypt_done, skb);
+ atomic_inc(&ctx->decrypt_pending);
+ } else {
+ aead_request_set_callback(aead_req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &ctx->async_wait);
+ }
+
+ ret = crypto_aead_decrypt(aead_req);
+ if (ret == -EINPROGRESS) {
+ if (async)
+ return ret;
+
+ ret = crypto_wait_req(ret, &ctx->async_wait);
+ }
+
+ if (async)
+ atomic_dec(&ctx->decrypt_pending);
+
return ret;
}
@@ -354,7 +501,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
- int ret = 0;
+ int ret;
int required_size;
long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
bool eor = !(msg->msg_flags & MSG_MORE);
@@ -370,7 +517,8 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
lock_sock(sk);
- if (tls_complete_pending_work(sk, tls_ctx, msg->msg_flags, &timeo))
+ ret = tls_complete_pending_work(sk, tls_ctx, msg->msg_flags, &timeo);
+ if (ret)
goto send_end;
if (unlikely(msg->msg_controllen)) {
@@ -505,7 +653,7 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
- int ret = 0;
+ int ret;
long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
bool eor;
size_t orig_size = size;
@@ -525,7 +673,8 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
- if (tls_complete_pending_work(sk, tls_ctx, flags, &timeo))
+ ret = tls_complete_pending_work(sk, tls_ctx, flags, &timeo);
+ if (ret)
goto sendpage_end;
/* Call the sk_stream functions to manage the sndbuf mem. */
@@ -684,12 +833,14 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1;
else
n_sgout = sg_nents(out_sg);
+ n_sgin = skb_nsg(skb, rxm->offset + tls_ctx->rx.prepend_size,
+ rxm->full_len - tls_ctx->rx.prepend_size);
} else {
n_sgout = 0;
*zc = false;
+ n_sgin = skb_cow_data(skb, 0, &unused);
}
- n_sgin = skb_cow_data(skb, 0, &unused);
if (n_sgin < 1)
return -EBADMSG;
@@ -769,7 +920,10 @@ fallback_to_reg_recv:
}
/* Prepare and submit AEAD request */
- err = tls_do_decryption(sk, sgin, sgout, iv, data_len, aead_req);
+ err = tls_do_decryption(sk, skb, sgin, sgout, iv,
+ data_len, aead_req, *zc);
+ if (err == -EINPROGRESS)
+ return err;
/* Release the pages in case iov was mapped to pages */
for (; pages > 0; pages--)
@@ -794,8 +948,12 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
#endif
if (!ctx->decrypted) {
err = decrypt_internal(sk, skb, dest, NULL, chunk, zc);
- if (err < 0)
+ if (err < 0) {
+ if (err == -EINPROGRESS)
+ tls_advance_record_sn(sk, &tls_ctx->rx);
+
return err;
+ }
} else {
*zc = false;
}
@@ -823,18 +981,20 @@ static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
- struct strp_msg *rxm = strp_msg(skb);
- if (len < rxm->full_len) {
- rxm->offset += len;
- rxm->full_len -= len;
+ if (skb) {
+ struct strp_msg *rxm = strp_msg(skb);
- return false;
+ if (len < rxm->full_len) {
+ rxm->offset += len;
+ rxm->full_len -= len;
+ return false;
+ }
+ kfree_skb(skb);
}
/* Finished with message */
ctx->recv_pkt = NULL;
- kfree_skb(skb);
__strp_unpause(&ctx->strp);
return true;
@@ -857,6 +1017,7 @@ int tls_sw_recvmsg(struct sock *sk,
int target, err = 0;
long timeo;
bool is_kvec = msg->msg_iter.type & ITER_KVEC;
+ int num_async = 0;
flags |= nonblock;
@@ -869,6 +1030,7 @@ int tls_sw_recvmsg(struct sock *sk,
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
do {
bool zc = false;
+ bool async = false;
int chunk = 0;
skb = tls_wait_data(sk, flags, timeo, &err);
@@ -876,6 +1038,7 @@ int tls_sw_recvmsg(struct sock *sk,
goto recv_end;
rxm = strp_msg(skb);
+
if (!cmsg) {
int cerr;
@@ -902,26 +1065,39 @@ int tls_sw_recvmsg(struct sock *sk,
err = decrypt_skb_update(sk, skb, &msg->msg_iter,
&chunk, &zc);
- if (err < 0) {
+ if (err < 0 && err != -EINPROGRESS) {
tls_err_abort(sk, EBADMSG);
goto recv_end;
}
+
+ if (err == -EINPROGRESS) {
+ async = true;
+ num_async++;
+ goto pick_next_record;
+ }
+
ctx->decrypted = true;
}
if (!zc) {
chunk = min_t(unsigned int, rxm->full_len, len);
+
err = skb_copy_datagram_msg(skb, rxm->offset, msg,
chunk);
if (err < 0)
goto recv_end;
}
+pick_next_record:
copied += chunk;
len -= chunk;
if (likely(!(flags & MSG_PEEK))) {
u8 control = ctx->control;
+ /* For async, drop current skb reference */
+ if (async)
+ skb = NULL;
+
if (tls_sw_advance_skb(sk, skb, chunk)) {
/* Return full control message to
* userspace before trying to parse
@@ -930,6 +1106,8 @@ int tls_sw_recvmsg(struct sock *sk,
msg->msg_flags |= MSG_EOR;
if (control != TLS_RECORD_TYPE_DATA)
goto recv_end;
+ } else {
+ break;
}
} else {
/* MSG_PEEK right now cannot look beyond current skb
@@ -946,6 +1124,22 @@ int tls_sw_recvmsg(struct sock *sk,
} while (len);
recv_end:
+ if (num_async) {
+ /* Wait for all previously submitted records to be decrypted */
+ smp_store_mb(ctx->async_notify, true);
+ if (atomic_read(&ctx->decrypt_pending)) {
+ err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
+ if (err) {
+ /* one of async decrypt failed */
+ tls_err_abort(sk, err);
+ copied = 0;
+ }
+ } else {
+ reinit_completion(&ctx->async_wait.completion);
+ }
+ WRITE_ONCE(ctx->async_notify, false);
+ }
+
release_sock(sk);
return copied ? : err;
}
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 4b8ec659e797..d5f9b5235cdd 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -4095,6 +4095,9 @@ static void nl80211_calculate_ap_params(struct cfg80211_ap_settings *params)
cap = cfg80211_find_ie(WLAN_EID_VHT_CAPABILITY, ies, ies_len);
if (cap && cap[1] >= sizeof(*params->vht_cap))
params->vht_cap = (void *)(cap + 2);
+ cap = cfg80211_find_ext_ie(WLAN_EID_EXT_HE_CAPABILITY, ies, ies_len);
+ if (cap && cap[1] >= sizeof(*params->he_cap) + 1)
+ params->he_cap = (void *)(cap + 3);
}
static bool nl80211_get_ap_channel(struct cfg80211_registered_device *rdev,
@@ -4724,10 +4727,11 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid,
PUT_SINFO_U64(RX_DROP_MISC, rx_dropped_misc);
PUT_SINFO_U64(BEACON_RX, rx_beacon);
PUT_SINFO(BEACON_SIGNAL_AVG, rx_beacon_signal_avg, u8);
- PUT_SINFO(ACK_SIGNAL, ack_signal, u8);
if (wiphy_ext_feature_isset(&rdev->wiphy,
- NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT))
- PUT_SINFO(DATA_ACK_SIGNAL_AVG, avg_ack_signal, s8);
+ NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT)) {
+ PUT_SINFO(ACK_SIGNAL, ack_signal, u8);
+ PUT_SINFO(ACK_SIGNAL_AVG, avg_ack_signal, s8);
+ }
#undef PUT_SINFO
#undef PUT_SINFO_U64
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 2f702adf2912..56be68a27bb9 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -847,22 +847,36 @@ static bool valid_regdb(const u8 *data, unsigned int size)
return true;
}
-static void set_wmm_rule(struct ieee80211_reg_rule *rrule,
- struct fwdb_wmm_rule *wmm)
-{
- struct ieee80211_wmm_rule *rule = &rrule->wmm_rule;
- unsigned int i;
+static void set_wmm_rule(const struct fwdb_header *db,
+ const struct fwdb_country *country,
+ const struct fwdb_rule *rule,
+ struct ieee80211_reg_rule *rrule)
+{
+ struct ieee80211_wmm_rule *wmm_rule = &rrule->wmm_rule;
+ struct fwdb_wmm_rule *wmm;
+ unsigned int i, wmm_ptr;
+
+ wmm_ptr = be16_to_cpu(rule->wmm_ptr) << 2;
+ wmm = (void *)((u8 *)db + wmm_ptr);
+
+ if (!valid_wmm(wmm)) {
+ pr_err("Invalid regulatory WMM rule %u-%u in domain %c%c\n",
+ be32_to_cpu(rule->start), be32_to_cpu(rule->end),
+ country->alpha2[0], country->alpha2[1]);
+ return;
+ }
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- rule->client[i].cw_min =
+ wmm_rule->client[i].cw_min =
ecw2cw((wmm->client[i].ecw & 0xf0) >> 4);
- rule->client[i].cw_max = ecw2cw(wmm->client[i].ecw & 0x0f);
- rule->client[i].aifsn = wmm->client[i].aifsn;
- rule->client[i].cot = 1000 * be16_to_cpu(wmm->client[i].cot);
- rule->ap[i].cw_min = ecw2cw((wmm->ap[i].ecw & 0xf0) >> 4);
- rule->ap[i].cw_max = ecw2cw(wmm->ap[i].ecw & 0x0f);
- rule->ap[i].aifsn = wmm->ap[i].aifsn;
- rule->ap[i].cot = 1000 * be16_to_cpu(wmm->ap[i].cot);
+ wmm_rule->client[i].cw_max = ecw2cw(wmm->client[i].ecw & 0x0f);
+ wmm_rule->client[i].aifsn = wmm->client[i].aifsn;
+ wmm_rule->client[i].cot =
+ 1000 * be16_to_cpu(wmm->client[i].cot);
+ wmm_rule->ap[i].cw_min = ecw2cw((wmm->ap[i].ecw & 0xf0) >> 4);
+ wmm_rule->ap[i].cw_max = ecw2cw(wmm->ap[i].ecw & 0x0f);
+ wmm_rule->ap[i].aifsn = wmm->ap[i].aifsn;
+ wmm_rule->ap[i].cot = 1000 * be16_to_cpu(wmm->ap[i].cot);
}
rrule->has_wmm = true;
@@ -870,7 +884,7 @@ static void set_wmm_rule(struct ieee80211_reg_rule *rrule,
static int __regdb_query_wmm(const struct fwdb_header *db,
const struct fwdb_country *country, int freq,
- struct ieee80211_reg_rule *rule)
+ struct ieee80211_reg_rule *rrule)
{
unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2;
struct fwdb_collection *coll = (void *)((u8 *)db + ptr);
@@ -879,18 +893,14 @@ static int __regdb_query_wmm(const struct fwdb_header *db,
for (i = 0; i < coll->n_rules; i++) {
__be16 *rules_ptr = (void *)((u8 *)coll + ALIGN(coll->len, 2));
unsigned int rule_ptr = be16_to_cpu(rules_ptr[i]) << 2;
- struct fwdb_rule *rrule = (void *)((u8 *)db + rule_ptr);
- struct fwdb_wmm_rule *wmm;
- unsigned int wmm_ptr;
+ struct fwdb_rule *rule = (void *)((u8 *)db + rule_ptr);
- if (rrule->len < offsetofend(struct fwdb_rule, wmm_ptr))
+ if (rule->len < offsetofend(struct fwdb_rule, wmm_ptr))
continue;
- if (freq >= KHZ_TO_MHZ(be32_to_cpu(rrule->start)) &&
- freq <= KHZ_TO_MHZ(be32_to_cpu(rrule->end))) {
- wmm_ptr = be16_to_cpu(rrule->wmm_ptr) << 2;
- wmm = (void *)((u8 *)db + wmm_ptr);
- set_wmm_rule(rule, wmm);
+ if (freq >= KHZ_TO_MHZ(be32_to_cpu(rule->start)) &&
+ freq <= KHZ_TO_MHZ(be32_to_cpu(rule->end))) {
+ set_wmm_rule(db, country, rule, rrule);
return 0;
}
}
@@ -972,12 +982,8 @@ static int regdb_query_country(const struct fwdb_header *db,
if (rule->len >= offsetofend(struct fwdb_rule, cac_timeout))
rrule->dfs_cac_ms =
1000 * be16_to_cpu(rule->cac_timeout);
- if (rule->len >= offsetofend(struct fwdb_rule, wmm_ptr)) {
- u32 wmm_ptr = be16_to_cpu(rule->wmm_ptr) << 2;
- struct fwdb_wmm_rule *wmm = (void *)((u8 *)db + wmm_ptr);
-
- set_wmm_rule(rrule, wmm);
- }
+ if (rule->len >= offsetofend(struct fwdb_rule, wmm_ptr))
+ set_wmm_rule(db, country, rule, rrule);
}
return reg_schedule_apply(regdom);
@@ -3184,13 +3190,59 @@ static void restore_regulatory_settings(bool reset_user)
schedule_work(&reg_work);
}
+static bool is_wiphy_all_set_reg_flag(enum ieee80211_regulatory_flags flag)
+{
+ struct cfg80211_registered_device *rdev;
+ struct wireless_dev *wdev;
+
+ list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+ list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
+ wdev_lock(wdev);
+ if (!(wdev->wiphy->regulatory_flags & flag)) {
+ wdev_unlock(wdev);
+ return false;
+ }
+ wdev_unlock(wdev);
+ }
+ }
+
+ return true;
+}
+
void regulatory_hint_disconnect(void)
{
+ /* Restore of regulatory settings is not required when wiphy(s)
+ * ignore IE from connected access point but clearance of beacon hints
+ * is required when wiphy(s) supports beacon hints.
+ */
+ if (is_wiphy_all_set_reg_flag(REGULATORY_COUNTRY_IE_IGNORE)) {
+ struct reg_beacon *reg_beacon, *btmp;
+
+ if (is_wiphy_all_set_reg_flag(REGULATORY_DISABLE_BEACON_HINTS))
+ return;
+
+ spin_lock_bh(&reg_pending_beacons_lock);
+ list_for_each_entry_safe(reg_beacon, btmp,
+ &reg_pending_beacons, list) {
+ list_del(&reg_beacon->list);
+ kfree(reg_beacon);
+ }
+ spin_unlock_bh(&reg_pending_beacons_lock);
+
+ list_for_each_entry_safe(reg_beacon, btmp,
+ &reg_beacon_list, list) {
+ list_del(&reg_beacon->list);
+ kfree(reg_beacon);
+ }
+
+ return;
+ }
+
pr_debug("All devices are disconnected, going to restore regulatory settings\n");
restore_regulatory_settings(false);
}
-static bool freq_is_chan_12_13_14(u16 freq)
+static bool freq_is_chan_12_13_14(u32 freq)
{
if (freq == ieee80211_channel_to_frequency(12, NL80211_BAND_2GHZ) ||
freq == ieee80211_channel_to_frequency(13, NL80211_BAND_2GHZ) ||
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 7c73510b161f..5e7eec849200 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -112,7 +112,7 @@
} while (0)
#define CHAN_ENTRY __field(enum nl80211_band, band) \
- __field(u16, center_freq)
+ __field(u32, center_freq)
#define CHAN_ASSIGN(chan) \
do { \
if (chan) { \
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 959ed3acd240..ef14d80ca03e 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -5,17 +5,20 @@
* Copyright 2007-2009 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
*/
#include <linux/export.h>
#include <linux/bitops.h>
#include <linux/etherdevice.h>
#include <linux/slab.h>
+#include <linux/ieee80211.h>
#include <net/cfg80211.h>
#include <net/ip.h>
#include <net/dsfield.h>
#include <linux/if_vlan.h>
#include <linux/mpls.h>
#include <linux/gcd.h>
+#include <linux/bitfield.h>
#include "core.h"
#include "rdev-ops.h"
@@ -88,7 +91,7 @@ int ieee80211_channel_to_frequency(int chan, enum nl80211_band band)
return 5000 + chan * 5;
break;
case NL80211_BAND_60GHZ:
- if (chan < 5)
+ if (chan < 7)
return 56160 + chan * 2160;
break;
default:
@@ -109,7 +112,7 @@ int ieee80211_frequency_to_channel(int freq)
return (freq - 4000) / 5;
else if (freq <= 45000) /* DMG band lower limit */
return (freq - 5000) / 5;
- else if (freq >= 58320 && freq <= 64800)
+ else if (freq >= 58320 && freq <= 70200)
return (freq - 56160) / 2160;
else
return 0;
@@ -1568,7 +1571,7 @@ bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef,
}
/* 56.16 GHz, channel 1..4 */
- if (freq >= 56160 + 2160 * 1 && freq <= 56160 + 2160 * 4) {
+ if (freq >= 56160 + 2160 * 1 && freq <= 56160 + 2160 * 6) {
if (chandef->width >= NL80211_CHAN_WIDTH_40)
return false;
@@ -1893,3 +1896,154 @@ EXPORT_SYMBOL(rfc1042_header);
const unsigned char bridge_tunnel_header[] __aligned(2) =
{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
EXPORT_SYMBOL(bridge_tunnel_header);
+
+/* Layer 2 Update frame (802.2 Type 1 LLC XID Update response) */
+struct iapp_layer2_update {
+ u8 da[ETH_ALEN]; /* broadcast */
+ u8 sa[ETH_ALEN]; /* STA addr */
+ __be16 len; /* 6 */
+ u8 dsap; /* 0 */
+ u8 ssap; /* 0 */
+ u8 control;
+ u8 xid_info[3];
+} __packed;
+
+void cfg80211_send_layer2_update(struct net_device *dev, const u8 *addr)
+{
+ struct iapp_layer2_update *msg;
+ struct sk_buff *skb;
+
+ /* Send Level 2 Update Frame to update forwarding tables in layer 2
+ * bridge devices */
+
+ skb = dev_alloc_skb(sizeof(*msg));
+ if (!skb)
+ return;
+ msg = skb_put(skb, sizeof(*msg));
+
+ /* 802.2 Type 1 Logical Link Control (LLC) Exchange Identifier (XID)
+ * Update response frame; IEEE Std 802.2-1998, 5.4.1.2.1 */
+
+ eth_broadcast_addr(msg->da);
+ ether_addr_copy(msg->sa, addr);
+ msg->len = htons(6);
+ msg->dsap = 0;
+ msg->ssap = 0x01; /* NULL LSAP, CR Bit: Response */
+ msg->control = 0xaf; /* XID response lsb.1111F101.
+ * F=0 (no poll command; unsolicited frame) */
+ msg->xid_info[0] = 0x81; /* XID format identifier */
+ msg->xid_info[1] = 1; /* LLC types/classes: Type 1 LLC */
+ msg->xid_info[2] = 0; /* XID sender's receive window size (RW) */
+
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+ memset(skb->cb, 0, sizeof(skb->cb));
+ netif_rx_ni(skb);
+}
+EXPORT_SYMBOL(cfg80211_send_layer2_update);
+
+int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
+ enum ieee80211_vht_chanwidth bw,
+ int mcs, bool ext_nss_bw_capable)
+{
+ u16 map = le16_to_cpu(cap->supp_mcs.rx_mcs_map);
+ int max_vht_nss = 0;
+ int ext_nss_bw;
+ int supp_width;
+ int i, mcs_encoding;
+
+ if (map == 0xffff)
+ return 0;
+
+ if (WARN_ON(mcs > 9))
+ return 0;
+ if (mcs <= 7)
+ mcs_encoding = 0;
+ else if (mcs == 8)
+ mcs_encoding = 1;
+ else
+ mcs_encoding = 2;
+
+ /* find max_vht_nss for the given MCS */
+ for (i = 7; i >= 0; i--) {
+ int supp = (map >> (2 * i)) & 3;
+
+ if (supp == 3)
+ continue;
+
+ if (supp >= mcs_encoding) {
+ max_vht_nss = i;
+ break;
+ }
+ }
+
+ if (!(cap->supp_mcs.tx_mcs_map &
+ cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE)))
+ return max_vht_nss;
+
+ ext_nss_bw = le32_get_bits(cap->vht_cap_info,
+ IEEE80211_VHT_CAP_EXT_NSS_BW_MASK);
+ supp_width = le32_get_bits(cap->vht_cap_info,
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK);
+
+ /* if not capable, treat ext_nss_bw as 0 */
+ if (!ext_nss_bw_capable)
+ ext_nss_bw = 0;
+
+ /* This is invalid */
+ if (supp_width == 3)
+ return 0;
+
+ /* This is an invalid combination so pretend nothing is supported */
+ if (supp_width == 2 && (ext_nss_bw == 1 || ext_nss_bw == 2))
+ return 0;
+
+ /*
+ * Cover all the special cases according to IEEE 802.11-2016
+ * Table 9-250. All other cases are either factor of 1 or not
+ * valid/supported.
+ */
+ switch (bw) {
+ case IEEE80211_VHT_CHANWIDTH_USE_HT:
+ case IEEE80211_VHT_CHANWIDTH_80MHZ:
+ if ((supp_width == 1 || supp_width == 2) &&
+ ext_nss_bw == 3)
+ return 2 * max_vht_nss;
+ break;
+ case IEEE80211_VHT_CHANWIDTH_160MHZ:
+ if (supp_width == 0 &&
+ (ext_nss_bw == 1 || ext_nss_bw == 2))
+ return DIV_ROUND_UP(max_vht_nss, 2);
+ if (supp_width == 0 &&
+ ext_nss_bw == 3)
+ return DIV_ROUND_UP(3 * max_vht_nss, 4);
+ if (supp_width == 1 &&
+ ext_nss_bw == 3)
+ return 2 * max_vht_nss;
+ break;
+ case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
+ if (supp_width == 0 &&
+ (ext_nss_bw == 1 || ext_nss_bw == 2))
+ return 0; /* not possible */
+ if (supp_width == 0 &&
+ ext_nss_bw == 2)
+ return DIV_ROUND_UP(max_vht_nss, 2);
+ if (supp_width == 0 &&
+ ext_nss_bw == 3)
+ return DIV_ROUND_UP(3 * max_vht_nss, 4);
+ if (supp_width == 1 &&
+ ext_nss_bw == 0)
+ return 0; /* not possible */
+ if (supp_width == 1 &&
+ ext_nss_bw == 1)
+ return DIV_ROUND_UP(max_vht_nss, 2);
+ if (supp_width == 1 &&
+ ext_nss_bw == 2)
+ return DIV_ROUND_UP(3 * max_vht_nss, 4);
+ break;
+ }
+
+ /* not covered or invalid combination received */
+ return max_vht_nss;
+}
+EXPORT_SYMBOL(ieee80211_get_vht_max_nss);
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index bfe2dbea480b..b3b632c5aeae 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -76,8 +76,6 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_async_xmit)
return force_zc ? -EOPNOTSUPP : 0; /* fail or fallback */
- bpf.command = XDP_QUERY_XSK_UMEM;
-
rtnl_lock();
err = xdp_umem_query(dev, queue_id);
if (err) {
@@ -314,8 +312,8 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
umem->pid = get_task_pid(current, PIDTYPE_PID);
umem->address = (unsigned long)addr;
- umem->props.chunk_mask = ~((u64)chunk_size - 1);
- umem->props.size = size;
+ umem->chunk_mask = ~((u64)chunk_size - 1);
+ umem->size = size;
umem->headroom = headroom;
umem->chunk_size_nohr = chunk_size - headroom;
umem->npgs = size / PAGE_SIZE;
diff --git a/net/xdp/xdp_umem.h b/net/xdp/xdp_umem.h
index f11560334f88..c8be1ad3eb88 100644
--- a/net/xdp/xdp_umem.h
+++ b/net/xdp/xdp_umem.h
@@ -8,16 +8,6 @@
#include <net/xdp_sock.h>
-static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
-{
- return umem->pages[addr >> PAGE_SHIFT].addr + (addr & (PAGE_SIZE - 1));
-}
-
-static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
-{
- return umem->pages[addr >> PAGE_SHIFT].dma + (addr & (PAGE_SIZE - 1));
-}
-
int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
u32 queue_id, u16 flags);
bool xdp_umem_validate_queues(struct xdp_umem *umem);
diff --git a/net/xdp/xdp_umem_props.h b/net/xdp/xdp_umem_props.h
deleted file mode 100644
index 40eab10dfc49..000000000000
--- a/net/xdp/xdp_umem_props.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* XDP user-space packet buffer
- * Copyright(c) 2018 Intel Corporation.
- */
-
-#ifndef XDP_UMEM_PROPS_H_
-#define XDP_UMEM_PROPS_H_
-
-struct xdp_umem_props {
- u64 chunk_mask;
- u64 size;
-};
-
-#endif /* XDP_UMEM_PROPS_H_ */
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 4e937cd7c17d..5a432dfee4ee 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -55,20 +55,30 @@ EXPORT_SYMBOL(xsk_umem_discard_addr);
static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
{
- void *buffer;
+ void *to_buf, *from_buf;
+ u32 metalen;
u64 addr;
int err;
if (!xskq_peek_addr(xs->umem->fq, &addr) ||
- len > xs->umem->chunk_size_nohr) {
+ len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) {
xs->rx_dropped++;
return -ENOSPC;
}
addr += xs->umem->headroom;
- buffer = xdp_umem_get_data(xs->umem, addr);
- memcpy(buffer, xdp->data, len);
+ if (unlikely(xdp_data_meta_unsupported(xdp))) {
+ from_buf = xdp->data;
+ metalen = 0;
+ } else {
+ from_buf = xdp->data_meta;
+ metalen = xdp->data - xdp->data_meta;
+ }
+
+ to_buf = xdp_umem_get_data(xs->umem, addr);
+ memcpy(to_buf, from_buf, len + metalen);
+ addr += metalen;
err = xskq_produce_batch_desc(xs->rx, addr, len);
if (!err) {
xskq_discard_addr(xs->umem->fq);
@@ -111,6 +121,7 @@ void xsk_flush(struct xdp_sock *xs)
int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
{
+ u32 metalen = xdp->data - xdp->data_meta;
u32 len = xdp->data_end - xdp->data;
void *buffer;
u64 addr;
@@ -120,7 +131,7 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
return -EINVAL;
if (!xskq_peek_addr(xs->umem->fq, &addr) ||
- len > xs->umem->chunk_size_nohr) {
+ len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) {
xs->rx_dropped++;
return -ENOSPC;
}
@@ -128,7 +139,8 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
addr += xs->umem->headroom;
buffer = xdp_umem_get_data(xs->umem, addr);
- memcpy(buffer, xdp->data, len);
+ memcpy(buffer, xdp->data_meta, len + metalen);
+ addr += metalen;
err = xskq_produce_batch_desc(xs->rx, addr, len);
if (!err) {
xskq_discard_addr(xs->umem->fq);
@@ -458,8 +470,10 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
goto out_unlock;
} else {
/* This xsk has its own umem. */
- xskq_set_umem(xs->umem->fq, &xs->umem->props);
- xskq_set_umem(xs->umem->cq, &xs->umem->props);
+ xskq_set_umem(xs->umem->fq, xs->umem->size,
+ xs->umem->chunk_mask);
+ xskq_set_umem(xs->umem->cq, xs->umem->size,
+ xs->umem->chunk_mask);
err = xdp_umem_assign_dev(xs->umem, dev, qid, flags);
if (err)
@@ -469,8 +483,8 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
xs->dev = dev;
xs->zc = xs->umem->zc;
xs->queue_id = qid;
- xskq_set_umem(xs->rx, &xs->umem->props);
- xskq_set_umem(xs->tx, &xs->umem->props);
+ xskq_set_umem(xs->rx, xs->umem->size, xs->umem->chunk_mask);
+ xskq_set_umem(xs->tx, xs->umem->size, xs->umem->chunk_mask);
xdp_add_sk_umem(xs->umem, xs);
out_unlock:
diff --git a/net/xdp/xsk_queue.c b/net/xdp/xsk_queue.c
index 6c32e92e98fc..2dc1384d9f27 100644
--- a/net/xdp/xsk_queue.c
+++ b/net/xdp/xsk_queue.c
@@ -7,12 +7,13 @@
#include "xsk_queue.h"
-void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props)
+void xskq_set_umem(struct xsk_queue *q, u64 size, u64 chunk_mask)
{
if (!q)
return;
- q->umem_props = *umem_props;
+ q->size = size;
+ q->chunk_mask = chunk_mask;
}
static u32 xskq_umem_get_ring_size(struct xsk_queue *q)
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index 8a64b150be54..82252cccb4e0 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -31,7 +31,8 @@ struct xdp_umem_ring {
};
struct xsk_queue {
- struct xdp_umem_props umem_props;
+ u64 chunk_mask;
+ u64 size;
u32 ring_mask;
u32 nentries;
u32 prod_head;
@@ -78,7 +79,7 @@ static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt)
static inline bool xskq_is_valid_addr(struct xsk_queue *q, u64 addr)
{
- if (addr >= q->umem_props.size) {
+ if (addr >= q->size) {
q->invalid_descs++;
return false;
}
@@ -92,7 +93,7 @@ static inline u64 *xskq_validate_addr(struct xsk_queue *q, u64 *addr)
struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
unsigned int idx = q->cons_tail & q->ring_mask;
- *addr = READ_ONCE(ring->desc[idx]) & q->umem_props.chunk_mask;
+ *addr = READ_ONCE(ring->desc[idx]) & q->chunk_mask;
if (xskq_is_valid_addr(q, *addr))
return addr;
@@ -173,8 +174,8 @@ static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d)
if (!xskq_is_valid_addr(q, d->addr))
return false;
- if (((d->addr + d->len) & q->umem_props.chunk_mask) !=
- (d->addr & q->umem_props.chunk_mask)) {
+ if (((d->addr + d->len) & q->chunk_mask) !=
+ (d->addr & q->chunk_mask)) {
q->invalid_descs++;
return false;
}
@@ -253,7 +254,7 @@ static inline bool xskq_empty_desc(struct xsk_queue *q)
return xskq_nb_free(q, q->prod_tail, q->nentries) == q->nentries;
}
-void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props);
+void xskq_set_umem(struct xsk_queue *q, u64 size, u64 chunk_mask);
struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
void xskq_destroy(struct xsk_queue *q_ops);
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index 5611b7521020..260fbba4f03e 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -99,7 +99,7 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
do {
struct sk_buff *nskb = skb2->next;
- skb2->next = NULL;
+ skb_mark_not_on_list(skb2);
xo = xfrm_offload(skb2);
xo->flags |= XFRM_DEV_RESUME;
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 45ba07ab3e4f..2d42cb0c94b8 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -189,7 +189,7 @@ static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb
struct sk_buff *nskb = segs->next;
int err;
- segs->next = NULL;
+ skb_mark_not_on_list(segs);
err = xfrm_output2(net, sk, segs);
if (unlikely(err)) {