aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/6lowpan/iphc.c11
-rw-r--r--net/bluetooth/hci_event.c13
-rw-r--r--net/bluetooth/hci_request.c6
-rw-r--r--net/bluetooth/l2cap_sock.c2
-rw-r--r--net/bridge/br_stp.c2
-rw-r--r--net/bridge/netfilter/ebtables.c4
-rw-r--r--net/bridge/netfilter/nft_reject_bridge.c20
-rw-r--r--net/can/raw.c2
-rw-r--r--net/ceph/ceph_common.c4
-rw-r--r--net/ceph/debugfs.c17
-rw-r--r--net/ceph/messenger.c35
-rw-r--r--net/ceph/mon_client.c457
-rw-r--r--net/ceph/osd_client.c109
-rw-r--r--net/ceph/pagelist.c4
-rw-r--r--net/ceph/pagevec.c30
-rw-r--r--net/core/datagram.c9
-rw-r--r--net/core/dev.c3
-rw-r--r--net/core/devlink.c22
-rw-r--r--net/core/filter.c11
-rw-r--r--net/core/rtnetlink.c1
-rw-r--r--net/core/sock.c106
-rw-r--r--net/core/sock_diag.c1
-rw-r--r--net/dccp/ipv4.c9
-rw-r--r--net/dccp/ipv6.c9
-rw-r--r--net/dsa/slave.c59
-rw-r--r--net/ipv4/af_inet.c3
-rw-r--r--net/ipv4/cipso_ipv4.c3
-rw-r--r--net/ipv4/fib_semantics.c34
-rw-r--r--net/ipv4/fou.c70
-rw-r--r--net/ipv4/gre_offload.c8
-rw-r--r--net/ipv4/inet_connection_sock.c3
-rw-r--r--net/ipv4/inet_diag.c10
-rw-r--r--net/ipv4/inet_hashtables.c80
-rw-r--r--net/ipv4/ip_gre.c13
-rw-r--r--net/ipv4/ip_sockglue.c13
-rw-r--r--net/ipv4/ip_tunnel_core.c20
-rw-r--r--net/ipv4/netfilter/arp_tables.c43
-rw-r--r--net/ipv4/netfilter/ip_tables.c48
-rw-r--r--net/ipv4/netfilter/ipt_SYNPROXY.c54
-rw-r--r--net/ipv4/ping.c7
-rw-r--r--net/ipv4/raw.c13
-rw-r--r--net/ipv4/sysctl_net_ipv4.c11
-rw-r--r--net/ipv4/tcp.c22
-rw-r--r--net/ipv4/tcp_input.c54
-rw-r--r--net/ipv4/tcp_ipv4.c82
-rw-r--r--net/ipv4/tcp_minisocks.c5
-rw-r--r--net/ipv4/udp.c354
-rw-r--r--net/ipv4/udp_diag.c18
-rw-r--r--net/ipv4/udp_offload.c113
-rw-r--r--net/ipv4/udp_tunnel.c2
-rw-r--r--net/ipv6/Makefile5
-rw-r--r--net/ipv6/af_inet6.c9
-rw-r--r--net/ipv6/datagram.c9
-rw-r--r--net/ipv6/icmp.c6
-rw-r--r--net/ipv6/ila/ila_xlat.c3
-rw-r--r--net/ipv6/inet6_hashtables.c62
-rw-r--r--net/ipv6/ip6_flowlabel.c3
-rw-r--r--net/ipv6/ip6_offload.c2
-rw-r--r--net/ipv6/ip6_offload.h3
-rw-r--r--net/ipv6/ip6_output.c23
-rw-r--r--net/ipv6/ip6_tunnel.c2
-rw-r--r--net/ipv6/ipv6_sockglue.c9
-rw-r--r--net/ipv6/netfilter/ip6_tables.c48
-rw-r--r--net/ipv6/ping.c3
-rw-r--r--net/ipv6/raw.c7
-rw-r--r--net/ipv6/tcp_ipv6.c34
-rw-r--r--net/ipv6/udp.c249
-rw-r--r--net/ipv6/udp_offload.c11
-rw-r--r--net/l2tp/l2tp_ip.c8
-rw-r--r--net/l2tp/l2tp_ip6.c18
-rw-r--r--net/mac80211/agg-tx.c5
-rw-r--r--net/mac80211/cfg.c18
-rw-r--r--net/mac80211/chan.c4
-rw-r--r--net/mac80211/debugfs.c3
-rw-r--r--net/mac80211/debugfs_sta.c134
-rw-r--r--net/mac80211/driver-ops.h15
-rw-r--r--net/mac80211/ibss.c17
-rw-r--r--net/mac80211/ieee80211_i.h15
-rw-r--r--net/mac80211/iface.c2
-rw-r--r--net/mac80211/key.c1
-rw-r--r--net/mac80211/main.c4
-rw-r--r--net/mac80211/mesh.c38
-rw-r--r--net/mac80211/mesh.h71
-rw-r--r--net/mac80211/mesh_hwmp.c6
-rw-r--r--net/mac80211/mesh_pathtbl.c965
-rw-r--r--net/mac80211/mesh_plink.c13
-rw-r--r--net/mac80211/mlme.c16
-rw-r--r--net/mac80211/ocb.c2
-rw-r--r--net/mac80211/rate.h4
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c77
-rw-r--r--net/mac80211/rx.c479
-rw-r--r--net/mac80211/scan.c8
-rw-r--r--net/mac80211/sta_info.c303
-rw-r--r--net/mac80211/sta_info.h131
-rw-r--r--net/mac80211/status.c4
-rw-r--r--net/mac80211/tdls.c43
-rw-r--r--net/mac80211/trace.h12
-rw-r--r--net/mac80211/tx.c210
-rw-r--r--net/mac80211/util.c5
-rw-r--r--net/mac80211/vht.c30
-rw-r--r--net/mac80211/wpa.c26
-rw-r--r--net/mpls/af_mpls.c3
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_gen.h2
-rw-r--r--net/netfilter/ipset/ip_set_core.c33
-rw-r--r--net/netfilter/ipset/ip_set_hash_gen.h2
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c2
-rw-r--r--net/netfilter/nfnetlink_queue.c7
-rw-r--r--net/netfilter/nft_hash.c4
-rw-r--r--net/netfilter/xt_socket.c6
-rw-r--r--net/netlink/af_netlink.c3
-rw-r--r--net/openvswitch/Kconfig4
-rw-r--r--net/openvswitch/conntrack.c24
-rw-r--r--net/packet/af_packet.c32
-rw-r--r--net/rds/ib_recv.c2
-rw-r--r--net/rds/page.c4
-rw-r--r--net/rfkill/core.c36
-rw-r--r--net/rxrpc/Kconfig2
-rw-r--r--net/rxrpc/Makefile7
-rw-r--r--net/rxrpc/af_rxrpc.c9
-rw-r--r--net/rxrpc/ar-accept.c4
-rw-r--r--net/rxrpc/ar-ack.c81
-rw-r--r--net/rxrpc/ar-call.c11
-rw-r--r--net/rxrpc/ar-connection.c14
-rw-r--r--net/rxrpc/ar-connevent.c27
-rw-r--r--net/rxrpc/ar-input.c18
-rw-r--r--net/rxrpc/ar-internal.h70
-rw-r--r--net/rxrpc/ar-output.c6
-rw-r--r--net/rxrpc/ar-proc.c2
-rw-r--r--net/rxrpc/ar-recvmsg.c18
-rw-r--r--net/rxrpc/ar-security.c166
-rw-r--r--net/rxrpc/insecure.c83
-rw-r--r--net/rxrpc/misc.c89
-rw-r--r--net/rxrpc/rxkad.c61
-rw-r--r--net/sctp/output.c9
-rw-r--r--net/sctp/proc.c3
-rw-r--r--net/socket.c12
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c8
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c2
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_wrap.c4
-rw-r--r--net/sunrpc/cache.c4
-rw-r--r--net/sunrpc/rpc_pipe.c4
-rw-r--r--net/sunrpc/socklib.c8
-rw-r--r--net/sunrpc/svcsock.c5
-rw-r--r--net/sunrpc/xdr.c50
-rw-r--r--net/sunrpc/xprtsock.c5
-rw-r--r--net/switchdev/switchdev.c2
-rw-r--r--net/tipc/bearer.c101
-rw-r--r--net/tipc/discover.c7
-rw-r--r--net/tipc/discover.h2
-rw-r--r--net/tipc/msg.h5
-rw-r--r--net/vmw_vsock/vmci_transport.c4
-rw-r--r--net/wireless/core.c7
-rw-r--r--net/wireless/nl80211.c158
-rw-r--r--net/wireless/scan.c2
-rw-r--r--net/wireless/sme.c16
-rw-r--r--net/wireless/trace.h6
-rw-r--r--net/wireless/wext-core.c5
-rw-r--r--net/xfrm/xfrm_input.c3
158 files changed, 3738 insertions, 2730 deletions
diff --git a/net/6lowpan/iphc.c b/net/6lowpan/iphc.c
index 99bb22aea346..68c80f3c9add 100644
--- a/net/6lowpan/iphc.c
+++ b/net/6lowpan/iphc.c
@@ -148,6 +148,11 @@
(((a)->s6_addr16[6]) == 0) && \
(((a)->s6_addr[14]) == 0))
+#define lowpan_is_linklocal_zero_padded(a) \
+ (!(hdr->saddr.s6_addr[1] & 0x3f) && \
+ !hdr->saddr.s6_addr16[1] && \
+ !hdr->saddr.s6_addr32[1])
+
#define LOWPAN_IPHC_CID_DCI(cid) (cid & 0x0f)
#define LOWPAN_IPHC_CID_SCI(cid) ((cid & 0xf0) >> 4)
@@ -1101,7 +1106,8 @@ int lowpan_header_compress(struct sk_buff *skb, const struct net_device *dev,
true);
iphc1 |= LOWPAN_IPHC_SAC;
} else {
- if (ipv6_saddr_type & IPV6_ADDR_LINKLOCAL) {
+ if (ipv6_saddr_type & IPV6_ADDR_LINKLOCAL &&
+ lowpan_is_linklocal_zero_padded(hdr->saddr)) {
iphc1 |= lowpan_compress_addr_64(&hc_ptr,
&hdr->saddr,
saddr, true);
@@ -1135,7 +1141,8 @@ int lowpan_header_compress(struct sk_buff *skb, const struct net_device *dev,
false);
iphc1 |= LOWPAN_IPHC_DAC;
} else {
- if (ipv6_daddr_type & IPV6_ADDR_LINKLOCAL) {
+ if (ipv6_daddr_type & IPV6_ADDR_LINKLOCAL &&
+ lowpan_is_linklocal_zero_padded(hdr->daddr)) {
iphc1 |= lowpan_compress_addr_64(&hc_ptr,
&hdr->daddr,
daddr, false);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index c162af5d16bf..d4b3dd5413be 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -4727,6 +4727,19 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
u32 flags;
u8 *ptr, real_len;
+ switch (type) {
+ case LE_ADV_IND:
+ case LE_ADV_DIRECT_IND:
+ case LE_ADV_SCAN_IND:
+ case LE_ADV_NONCONN_IND:
+ case LE_ADV_SCAN_RSP:
+ break;
+ default:
+ BT_ERR_RATELIMITED("Unknown advetising packet type: 0x%02x",
+ type);
+ return;
+ }
+
/* Find the end of the data in case the report contains padded zero
* bytes at the end causing an invalid length value.
*
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index 6e125d76df0d..c045b3c54768 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -1065,6 +1065,9 @@ static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
flags |= LE_AD_LIMITED;
+ if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
+ flags |= LE_AD_NO_BREDR;
+
if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
/* If a discovery flag wasn't provided, simply use the global
* settings.
@@ -1072,9 +1075,6 @@ static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
if (!flags)
flags |= mgmt_get_adv_discov_flags(hdev);
- if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
- flags |= LE_AD_NO_BREDR;
-
/* If flags would still be empty, then there is no need to
* include the "Flags" AD field".
*/
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index e4cae72895a7..388ee8b59145 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -778,7 +778,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
}
if (sec.level < BT_SECURITY_LOW ||
- sec.level > BT_SECURITY_HIGH) {
+ sec.level > BT_SECURITY_FIPS) {
err = -EINVAL;
break;
}
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index e23449094188..9cb7044d0801 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -582,7 +582,7 @@ int br_set_ageing_time(struct net_bridge *br, u32 ageing_time)
int err;
err = switchdev_port_attr_set(br->dev, &attr);
- if (err)
+ if (err && err != -EOPNOTSUPP)
return err;
br->ageing_time = t;
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 67b2e27999aa..8570bc7744c2 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1521,6 +1521,8 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
if (copy_from_user(&tmp, user, sizeof(tmp)))
return -EFAULT;
+ tmp.name[sizeof(tmp.name) - 1] = '\0';
+
t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
if (!t)
return ret;
@@ -2332,6 +2334,8 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
if (copy_from_user(&tmp, user, sizeof(tmp)))
return -EFAULT;
+ tmp.name[sizeof(tmp.name) - 1] = '\0';
+
t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
if (!t)
return ret;
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
index adc8d7221dbb..77f7e7a9ebe1 100644
--- a/net/bridge/netfilter/nft_reject_bridge.c
+++ b/net/bridge/netfilter/nft_reject_bridge.c
@@ -40,7 +40,8 @@ static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb,
/* We cannot use oldskb->dev, it can be either bridge device (NF_BRIDGE INPUT)
* or the bridge port (NF_BRIDGE PREROUTING).
*/
-static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb,
+static void nft_reject_br_send_v4_tcp_reset(struct net *net,
+ struct sk_buff *oldskb,
const struct net_device *dev,
int hook)
{
@@ -48,7 +49,6 @@ static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb,
struct iphdr *niph;
const struct tcphdr *oth;
struct tcphdr _oth;
- struct net *net = sock_net(oldskb->sk);
if (!nft_bridge_iphdr_validate(oldskb))
return;
@@ -75,7 +75,8 @@ static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb,
br_deliver(br_port_get_rcu(dev), nskb);
}
-static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb,
+static void nft_reject_br_send_v4_unreach(struct net *net,
+ struct sk_buff *oldskb,
const struct net_device *dev,
int hook, u8 code)
{
@@ -86,7 +87,6 @@ static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb,
void *payload;
__wsum csum;
u8 proto;
- struct net *net = sock_net(oldskb->sk);
if (oldskb->csum_bad || !nft_bridge_iphdr_validate(oldskb))
return;
@@ -273,17 +273,17 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr,
case htons(ETH_P_IP):
switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH:
- nft_reject_br_send_v4_unreach(pkt->skb, pkt->in,
- pkt->hook,
+ nft_reject_br_send_v4_unreach(pkt->net, pkt->skb,
+ pkt->in, pkt->hook,
priv->icmp_code);
break;
case NFT_REJECT_TCP_RST:
- nft_reject_br_send_v4_tcp_reset(pkt->skb, pkt->in,
- pkt->hook);
+ nft_reject_br_send_v4_tcp_reset(pkt->net, pkt->skb,
+ pkt->in, pkt->hook);
break;
case NFT_REJECT_ICMPX_UNREACH:
- nft_reject_br_send_v4_unreach(pkt->skb, pkt->in,
- pkt->hook,
+ nft_reject_br_send_v4_unreach(pkt->net, pkt->skb,
+ pkt->in, pkt->hook,
nft_reject_icmp_code(priv->icmp_code));
break;
}
diff --git a/net/can/raw.c b/net/can/raw.c
index 2e67b1423cd3..972c187d40ab 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -755,7 +755,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
if (err < 0)
goto free_skb;
- sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
+ sock_tx_timestamp(sk, sk->sk_tsflags, &skb_shinfo(skb)->tx_flags);
skb->dev = dev;
skb->sk = sk;
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index bcbec33c6a14..dcc18c6f7cf9 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -361,7 +361,6 @@ ceph_parse_options(char *options, const char *dev_name,
opt->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT;
opt->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT;
opt->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT;
- opt->monc_ping_timeout = CEPH_MONC_PING_TIMEOUT_DEFAULT;
/* get mon ip(s) */
/* ip1[:port1][,ip2[:port2]...] */
@@ -686,6 +685,9 @@ int __ceph_open_session(struct ceph_client *client, unsigned long started)
return client->auth_err;
}
+ pr_info("client%llu fsid %pU\n", ceph_client_id(client), &client->fsid);
+ ceph_debugfs_client_init(client);
+
return 0;
}
EXPORT_SYMBOL(__ceph_open_session);
diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c
index 593dc2eabcc8..b902fbc7863e 100644
--- a/net/ceph/debugfs.c
+++ b/net/ceph/debugfs.c
@@ -112,15 +112,20 @@ static int monc_show(struct seq_file *s, void *p)
struct ceph_mon_generic_request *req;
struct ceph_mon_client *monc = &client->monc;
struct rb_node *rp;
+ int i;
mutex_lock(&monc->mutex);
- if (monc->have_mdsmap)
- seq_printf(s, "have mdsmap %u\n", (unsigned int)monc->have_mdsmap);
- if (monc->have_osdmap)
- seq_printf(s, "have osdmap %u\n", (unsigned int)monc->have_osdmap);
- if (monc->want_next_osdmap)
- seq_printf(s, "want next osdmap\n");
+ for (i = 0; i < ARRAY_SIZE(monc->subs); i++) {
+ seq_printf(s, "have %s %u", ceph_sub_str[i],
+ monc->subs[i].have);
+ if (monc->subs[i].want)
+ seq_printf(s, " want %llu%s",
+ le64_to_cpu(monc->subs[i].item.start),
+ (monc->subs[i].item.flags &
+ CEPH_SUBSCRIBE_ONETIME ? "" : "+"));
+ seq_putc(s, '\n');
+ }
for (rp = rb_first(&monc->generic_request_tree); rp; rp = rb_next(rp)) {
__u16 op;
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 9382619a405b..a5502898ea33 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -235,18 +235,12 @@ static struct workqueue_struct *ceph_msgr_wq;
static int ceph_msgr_slab_init(void)
{
BUG_ON(ceph_msg_cache);
- ceph_msg_cache = kmem_cache_create("ceph_msg",
- sizeof (struct ceph_msg),
- __alignof__(struct ceph_msg), 0, NULL);
-
+ ceph_msg_cache = KMEM_CACHE(ceph_msg, 0);
if (!ceph_msg_cache)
return -ENOMEM;
BUG_ON(ceph_msg_data_cache);
- ceph_msg_data_cache = kmem_cache_create("ceph_msg_data",
- sizeof (struct ceph_msg_data),
- __alignof__(struct ceph_msg_data),
- 0, NULL);
+ ceph_msg_data_cache = KMEM_CACHE(ceph_msg_data, 0);
if (ceph_msg_data_cache)
return 0;
@@ -275,7 +269,7 @@ static void _ceph_msgr_exit(void)
}
BUG_ON(zero_page == NULL);
- page_cache_release(zero_page);
+ put_page(zero_page);
zero_page = NULL;
ceph_msgr_slab_exit();
@@ -288,7 +282,7 @@ int ceph_msgr_init(void)
BUG_ON(zero_page != NULL);
zero_page = ZERO_PAGE(0);
- page_cache_get(zero_page);
+ get_page(zero_page);
/*
* The number of active work items is limited by the number of
@@ -1221,25 +1215,19 @@ static void prepare_message_data(struct ceph_msg *msg, u32 data_len)
static void prepare_write_message_footer(struct ceph_connection *con)
{
struct ceph_msg *m = con->out_msg;
- int v = con->out_kvec_left;
m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE;
dout("prepare_write_message_footer %p\n", con);
- con->out_kvec[v].iov_base = &m->footer;
+ con_out_kvec_add(con, sizeof_footer(con), &m->footer);
if (con->peer_features & CEPH_FEATURE_MSG_AUTH) {
if (con->ops->sign_message)
con->ops->sign_message(m);
else
m->footer.sig = 0;
- con->out_kvec[v].iov_len = sizeof(m->footer);
- con->out_kvec_bytes += sizeof(m->footer);
} else {
m->old_footer.flags = m->footer.flags;
- con->out_kvec[v].iov_len = sizeof(m->old_footer);
- con->out_kvec_bytes += sizeof(m->old_footer);
}
- con->out_kvec_left++;
con->out_more = m->more_to_follow;
con->out_msg_done = true;
}
@@ -1614,7 +1602,7 @@ static int write_partial_skip(struct ceph_connection *con)
dout("%s %p %d left\n", __func__, con, con->out_skip);
while (con->out_skip > 0) {
- size_t size = min(con->out_skip, (int) PAGE_CACHE_SIZE);
+ size_t size = min(con->out_skip, (int) PAGE_SIZE);
ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, true);
if (ret <= 0)
@@ -2409,11 +2397,7 @@ static int read_partial_message(struct ceph_connection *con)
}
/* footer */
- if (need_sign)
- size = sizeof(m->footer);
- else
- size = sizeof(m->old_footer);
-
+ size = sizeof_footer(con);
end += size;
ret = read_partial(con, end, size, &m->footer);
if (ret <= 0)
@@ -3089,10 +3073,7 @@ void ceph_msg_revoke(struct ceph_msg *msg)
con->out_skip += con_out_kvec_skip(con);
} else {
BUG_ON(!msg->data_length);
- if (con->peer_features & CEPH_FEATURE_MSG_AUTH)
- con->out_skip += sizeof(msg->footer);
- else
- con->out_skip += sizeof(msg->old_footer);
+ con->out_skip += sizeof_footer(con);
}
/* data, middle, front */
if (msg->data_length)
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index de85dddc3dc0..cf638c009cfa 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -122,51 +122,91 @@ static void __close_session(struct ceph_mon_client *monc)
ceph_msg_revoke(monc->m_subscribe);
ceph_msg_revoke_incoming(monc->m_subscribe_ack);
ceph_con_close(&monc->con);
- monc->cur_mon = -1;
+
monc->pending_auth = 0;
ceph_auth_reset(monc->auth);
}
/*
- * Open a session with a (new) monitor.
+ * Pick a new monitor at random and set cur_mon. If we are repicking
+ * (i.e. cur_mon is already set), be sure to pick a different one.
*/
-static int __open_session(struct ceph_mon_client *monc)
+static void pick_new_mon(struct ceph_mon_client *monc)
{
- char r;
- int ret;
+ int old_mon = monc->cur_mon;
- if (monc->cur_mon < 0) {
- get_random_bytes(&r, 1);
- monc->cur_mon = r % monc->monmap->num_mon;
- dout("open_session num=%d r=%d -> mon%d\n",
- monc->monmap->num_mon, r, monc->cur_mon);
- monc->sub_sent = 0;
- monc->sub_renew_after = jiffies; /* i.e., expired */
- monc->want_next_osdmap = !!monc->want_next_osdmap;
-
- dout("open_session mon%d opening\n", monc->cur_mon);
- ceph_con_open(&monc->con,
- CEPH_ENTITY_TYPE_MON, monc->cur_mon,
- &monc->monmap->mon_inst[monc->cur_mon].addr);
-
- /* send an initial keepalive to ensure our timestamp is
- * valid by the time we are in an OPENED state */
- ceph_con_keepalive(&monc->con);
-
- /* initiatiate authentication handshake */
- ret = ceph_auth_build_hello(monc->auth,
- monc->m_auth->front.iov_base,
- monc->m_auth->front_alloc_len);
- __send_prepared_auth_request(monc, ret);
+ BUG_ON(monc->monmap->num_mon < 1);
+
+ if (monc->monmap->num_mon == 1) {
+ monc->cur_mon = 0;
} else {
- dout("open_session mon%d already open\n", monc->cur_mon);
+ int max = monc->monmap->num_mon;
+ int o = -1;
+ int n;
+
+ if (monc->cur_mon >= 0) {
+ if (monc->cur_mon < monc->monmap->num_mon)
+ o = monc->cur_mon;
+ if (o >= 0)
+ max--;
+ }
+
+ n = prandom_u32() % max;
+ if (o >= 0 && n >= o)
+ n++;
+
+ monc->cur_mon = n;
}
- return 0;
+
+ dout("%s mon%d -> mon%d out of %d mons\n", __func__, old_mon,
+ monc->cur_mon, monc->monmap->num_mon);
+}
+
+/*
+ * Open a session with a new monitor.
+ */
+static void __open_session(struct ceph_mon_client *monc)
+{
+ int ret;
+
+ pick_new_mon(monc);
+
+ monc->hunting = true;
+ if (monc->had_a_connection) {
+ monc->hunt_mult *= CEPH_MONC_HUNT_BACKOFF;
+ if (monc->hunt_mult > CEPH_MONC_HUNT_MAX_MULT)
+ monc->hunt_mult = CEPH_MONC_HUNT_MAX_MULT;
+ }
+
+ monc->sub_renew_after = jiffies; /* i.e., expired */
+ monc->sub_renew_sent = 0;
+
+ dout("%s opening mon%d\n", __func__, monc->cur_mon);
+ ceph_con_open(&monc->con, CEPH_ENTITY_TYPE_MON, monc->cur_mon,
+ &monc->monmap->mon_inst[monc->cur_mon].addr);
+
+ /*
+ * send an initial keepalive to ensure our timestamp is valid
+ * by the time we are in an OPENED state
+ */
+ ceph_con_keepalive(&monc->con);
+
+ /* initiate authentication handshake */
+ ret = ceph_auth_build_hello(monc->auth,
+ monc->m_auth->front.iov_base,
+ monc->m_auth->front_alloc_len);
+ BUG_ON(ret <= 0);
+ __send_prepared_auth_request(monc, ret);
}
-static bool __sub_expired(struct ceph_mon_client *monc)
+static void reopen_session(struct ceph_mon_client *monc)
{
- return time_after_eq(jiffies, monc->sub_renew_after);
+ if (!monc->hunting)
+ pr_info("mon%d %s session lost, hunting for new mon\n",
+ monc->cur_mon, ceph_pr_addr(&monc->con.peer_addr.in_addr));
+
+ __close_session(monc);
+ __open_session(monc);
}
/*
@@ -174,74 +214,70 @@ static bool __sub_expired(struct ceph_mon_client *monc)
*/
static void __schedule_delayed(struct ceph_mon_client *monc)
{
- struct ceph_options *opt = monc->client->options;
unsigned long delay;
- if (monc->cur_mon < 0 || __sub_expired(monc)) {
- delay = 10 * HZ;
- } else {
- delay = 20 * HZ;
- if (opt->monc_ping_timeout > 0)
- delay = min(delay, opt->monc_ping_timeout / 3);
- }
+ if (monc->hunting)
+ delay = CEPH_MONC_HUNT_INTERVAL * monc->hunt_mult;
+ else
+ delay = CEPH_MONC_PING_INTERVAL;
+
dout("__schedule_delayed after %lu\n", delay);
- schedule_delayed_work(&monc->delayed_work,
- round_jiffies_relative(delay));
+ mod_delayed_work(system_wq, &monc->delayed_work,
+ round_jiffies_relative(delay));
}
+const char *ceph_sub_str[] = {
+ [CEPH_SUB_MDSMAP] = "mdsmap",
+ [CEPH_SUB_MONMAP] = "monmap",
+ [CEPH_SUB_OSDMAP] = "osdmap",
+};
+
/*
- * Send subscribe request for mdsmap and/or osdmap.
+ * Send subscribe request for one or more maps, according to
+ * monc->subs.
*/
static void __send_subscribe(struct ceph_mon_client *monc)
{
- dout("__send_subscribe sub_sent=%u exp=%u want_osd=%d\n",
- (unsigned int)monc->sub_sent, __sub_expired(monc),
- monc->want_next_osdmap);
- if ((__sub_expired(monc) && !monc->sub_sent) ||
- monc->want_next_osdmap == 1) {
- struct ceph_msg *msg = monc->m_subscribe;
- struct ceph_mon_subscribe_item *i;
- void *p, *end;
- int num;
-
- p = msg->front.iov_base;
- end = p + msg->front_alloc_len;
-
- num = 1 + !!monc->want_next_osdmap + !!monc->want_mdsmap;
- ceph_encode_32(&p, num);
-
- if (monc->want_next_osdmap) {
- dout("__send_subscribe to 'osdmap' %u\n",
- (unsigned int)monc->have_osdmap);
- ceph_encode_string(&p, end, "osdmap", 6);
- i = p;
- i->have = cpu_to_le64(monc->have_osdmap);
- i->onetime = 1;
- p += sizeof(*i);
- monc->want_next_osdmap = 2; /* requested */
- }
- if (monc->want_mdsmap) {
- dout("__send_subscribe to 'mdsmap' %u+\n",
- (unsigned int)monc->have_mdsmap);
- ceph_encode_string(&p, end, "mdsmap", 6);
- i = p;
- i->have = cpu_to_le64(monc->have_mdsmap);
- i->onetime = 0;
- p += sizeof(*i);
- }
- ceph_encode_string(&p, end, "monmap", 6);
- i = p;
- i->have = 0;
- i->onetime = 0;
- p += sizeof(*i);
-
- msg->front.iov_len = p - msg->front.iov_base;
- msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
- ceph_msg_revoke(msg);
- ceph_con_send(&monc->con, ceph_msg_get(msg));
-
- monc->sub_sent = jiffies | 1; /* never 0 */
+ struct ceph_msg *msg = monc->m_subscribe;
+ void *p = msg->front.iov_base;
+ void *const end = p + msg->front_alloc_len;
+ int num = 0;
+ int i;
+
+ dout("%s sent %lu\n", __func__, monc->sub_renew_sent);
+
+ BUG_ON(monc->cur_mon < 0);
+
+ if (!monc->sub_renew_sent)
+ monc->sub_renew_sent = jiffies | 1; /* never 0 */
+
+ msg->hdr.version = cpu_to_le16(2);
+
+ for (i = 0; i < ARRAY_SIZE(monc->subs); i++) {
+ if (monc->subs[i].want)
+ num++;
}
+ BUG_ON(num < 1); /* monmap sub is always there */
+ ceph_encode_32(&p, num);
+ for (i = 0; i < ARRAY_SIZE(monc->subs); i++) {
+ const char *s = ceph_sub_str[i];
+
+ if (!monc->subs[i].want)
+ continue;
+
+ dout("%s %s start %llu flags 0x%x\n", __func__, s,
+ le64_to_cpu(monc->subs[i].item.start),
+ monc->subs[i].item.flags);
+ ceph_encode_string(&p, end, s, strlen(s));
+ memcpy(p, &monc->subs[i].item, sizeof(monc->subs[i].item));
+ p += sizeof(monc->subs[i].item);
+ }
+
+ BUG_ON(p != (end - 35 - (ARRAY_SIZE(monc->subs) - num) * 19));
+ msg->front.iov_len = p - msg->front.iov_base;
+ msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
+ ceph_msg_revoke(msg);
+ ceph_con_send(&monc->con, ceph_msg_get(msg));
}
static void handle_subscribe_ack(struct ceph_mon_client *monc,
@@ -255,15 +291,16 @@ static void handle_subscribe_ack(struct ceph_mon_client *monc,
seconds = le32_to_cpu(h->duration);
mutex_lock(&monc->mutex);
- if (monc->hunting) {
- pr_info("mon%d %s session established\n",
- monc->cur_mon,
- ceph_pr_addr(&monc->con.peer_addr.in_addr));
- monc->hunting = false;
+ if (monc->sub_renew_sent) {
+ monc->sub_renew_after = monc->sub_renew_sent +
+ (seconds >> 1) * HZ - 1;
+ dout("%s sent %lu duration %d renew after %lu\n", __func__,
+ monc->sub_renew_sent, seconds, monc->sub_renew_after);
+ monc->sub_renew_sent = 0;
+ } else {
+ dout("%s sent %lu renew after %lu, ignoring\n", __func__,
+ monc->sub_renew_sent, monc->sub_renew_after);
}
- dout("handle_subscribe_ack after %d seconds\n", seconds);
- monc->sub_renew_after = monc->sub_sent + (seconds >> 1)*HZ - 1;
- monc->sub_sent = 0;
mutex_unlock(&monc->mutex);
return;
bad:
@@ -272,36 +309,82 @@ bad:
}
/*
- * Keep track of which maps we have
+ * Register interest in a map
+ *
+ * @sub: one of CEPH_SUB_*
+ * @epoch: X for "every map since X", or 0 for "just the latest"
*/
-int ceph_monc_got_mdsmap(struct ceph_mon_client *monc, u32 got)
+static bool __ceph_monc_want_map(struct ceph_mon_client *monc, int sub,
+ u32 epoch, bool continuous)
+{
+ __le64 start = cpu_to_le64(epoch);
+ u8 flags = !continuous ? CEPH_SUBSCRIBE_ONETIME : 0;
+
+ dout("%s %s epoch %u continuous %d\n", __func__, ceph_sub_str[sub],
+ epoch, continuous);
+
+ if (monc->subs[sub].want &&
+ monc->subs[sub].item.start == start &&
+ monc->subs[sub].item.flags == flags)
+ return false;
+
+ monc->subs[sub].item.start = start;
+ monc->subs[sub].item.flags = flags;
+ monc->subs[sub].want = true;
+
+ return true;
+}
+
+bool ceph_monc_want_map(struct ceph_mon_client *monc, int sub, u32 epoch,
+ bool continuous)
{
+ bool need_request;
+
mutex_lock(&monc->mutex);
- monc->have_mdsmap = got;
+ need_request = __ceph_monc_want_map(monc, sub, epoch, continuous);
mutex_unlock(&monc->mutex);
- return 0;
+
+ return need_request;
}
-EXPORT_SYMBOL(ceph_monc_got_mdsmap);
+EXPORT_SYMBOL(ceph_monc_want_map);
-int ceph_monc_got_osdmap(struct ceph_mon_client *monc, u32 got)
+/*
+ * Keep track of which maps we have
+ *
+ * @sub: one of CEPH_SUB_*
+ */
+static void __ceph_monc_got_map(struct ceph_mon_client *monc, int sub,
+ u32 epoch)
+{
+ dout("%s %s epoch %u\n", __func__, ceph_sub_str[sub], epoch);
+
+ if (monc->subs[sub].want) {
+ if (monc->subs[sub].item.flags & CEPH_SUBSCRIBE_ONETIME)
+ monc->subs[sub].want = false;
+ else
+ monc->subs[sub].item.start = cpu_to_le64(epoch + 1);
+ }
+
+ monc->subs[sub].have = epoch;
+}
+
+void ceph_monc_got_map(struct ceph_mon_client *monc, int sub, u32 epoch)
{
mutex_lock(&monc->mutex);
- monc->have_osdmap = got;
- monc->want_next_osdmap = 0;
+ __ceph_monc_got_map(monc, sub, epoch);
mutex_unlock(&monc->mutex);
- return 0;
}
+EXPORT_SYMBOL(ceph_monc_got_map);
/*
* Register interest in the next osdmap
*/
void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc)
{
- dout("request_next_osdmap have %u\n", monc->have_osdmap);
+ dout("%s have %u\n", __func__, monc->subs[CEPH_SUB_OSDMAP].have);
mutex_lock(&monc->mutex);
- if (!monc->want_next_osdmap)
- monc->want_next_osdmap = 1;
- if (monc->want_next_osdmap < 2)
+ if (__ceph_monc_want_map(monc, CEPH_SUB_OSDMAP,
+ monc->subs[CEPH_SUB_OSDMAP].have + 1, false))
__send_subscribe(monc);
mutex_unlock(&monc->mutex);
}
@@ -320,15 +403,15 @@ int ceph_monc_wait_osdmap(struct ceph_mon_client *monc, u32 epoch,
long ret;
mutex_lock(&monc->mutex);
- while (monc->have_osdmap < epoch) {
+ while (monc->subs[CEPH_SUB_OSDMAP].have < epoch) {
mutex_unlock(&monc->mutex);
if (timeout && time_after_eq(jiffies, started + timeout))
return -ETIMEDOUT;
ret = wait_event_interruptible_timeout(monc->client->auth_wq,
- monc->have_osdmap >= epoch,
- ceph_timeout_jiffies(timeout));
+ monc->subs[CEPH_SUB_OSDMAP].have >= epoch,
+ ceph_timeout_jiffies(timeout));
if (ret < 0)
return ret;
@@ -341,11 +424,14 @@ int ceph_monc_wait_osdmap(struct ceph_mon_client *monc, u32 epoch,
EXPORT_SYMBOL(ceph_monc_wait_osdmap);
/*
- *
+ * Open a session with a random monitor. Request monmap and osdmap,
+ * which are waited upon in __ceph_open_session().
*/
int ceph_monc_open_session(struct ceph_mon_client *monc)
{
mutex_lock(&monc->mutex);
+ __ceph_monc_want_map(monc, CEPH_SUB_MONMAP, 0, true);
+ __ceph_monc_want_map(monc, CEPH_SUB_OSDMAP, 0, false);
__open_session(monc);
__schedule_delayed(monc);
mutex_unlock(&monc->mutex);
@@ -353,29 +439,15 @@ int ceph_monc_open_session(struct ceph_mon_client *monc)
}
EXPORT_SYMBOL(ceph_monc_open_session);
-/*
- * We require the fsid and global_id in order to initialize our
- * debugfs dir.
- */
-static bool have_debugfs_info(struct ceph_mon_client *monc)
-{
- dout("have_debugfs_info fsid %d globalid %lld\n",
- (int)monc->client->have_fsid, monc->auth->global_id);
- return monc->client->have_fsid && monc->auth->global_id > 0;
-}
-
static void ceph_monc_handle_map(struct ceph_mon_client *monc,
struct ceph_msg *msg)
{
struct ceph_client *client = monc->client;
struct ceph_monmap *monmap = NULL, *old = monc->monmap;
void *p, *end;
- int had_debugfs_info, init_debugfs = 0;
mutex_lock(&monc->mutex);
- had_debugfs_info = have_debugfs_info(monc);
-
dout("handle_monmap\n");
p = msg->front.iov_base;
end = p + msg->front.iov_len;
@@ -395,29 +467,11 @@ static void ceph_monc_handle_map(struct ceph_mon_client *monc,
client->monc.monmap = monmap;
kfree(old);
- if (!client->have_fsid) {
- client->have_fsid = true;
- if (!had_debugfs_info && have_debugfs_info(monc)) {
- pr_info("client%lld fsid %pU\n",
- ceph_client_id(monc->client),
- &monc->client->fsid);
- init_debugfs = 1;
- }
- mutex_unlock(&monc->mutex);
-
- if (init_debugfs) {
- /*
- * do debugfs initialization without mutex to avoid
- * creating a locking dependency
- */
- ceph_debugfs_client_init(monc->client);
- }
+ __ceph_monc_got_map(monc, CEPH_SUB_MONMAP, monc->monmap->epoch);
+ client->have_fsid = true;
- goto out_unlocked;
- }
out:
mutex_unlock(&monc->mutex);
-out_unlocked:
wake_up_all(&client->auth_wq);
}
@@ -745,18 +799,15 @@ static void delayed_work(struct work_struct *work)
dout("monc delayed_work\n");
mutex_lock(&monc->mutex);
if (monc->hunting) {
- __close_session(monc);
- __open_session(monc); /* continue hunting */
+ dout("%s continuing hunt\n", __func__);
+ reopen_session(monc);
} else {
- struct ceph_options *opt = monc->client->options;
int is_auth = ceph_auth_is_authenticated(monc->auth);
if (ceph_con_keepalive_expired(&monc->con,
- opt->monc_ping_timeout)) {
+ CEPH_MONC_PING_TIMEOUT)) {
dout("monc keepalive timeout\n");
is_auth = 0;
- __close_session(monc);
- monc->hunting = true;
- __open_session(monc);
+ reopen_session(monc);
}
if (!monc->hunting) {
@@ -764,8 +815,14 @@ static void delayed_work(struct work_struct *work)
__validate_auth(monc);
}
- if (is_auth)
- __send_subscribe(monc);
+ if (is_auth) {
+ unsigned long now = jiffies;
+
+ dout("%s renew subs? now %lu renew after %lu\n",
+ __func__, now, monc->sub_renew_after);
+ if (time_after_eq(now, monc->sub_renew_after))
+ __send_subscribe(monc);
+ }
}
__schedule_delayed(monc);
mutex_unlock(&monc->mutex);
@@ -852,18 +909,14 @@ int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
&monc->client->msgr);
monc->cur_mon = -1;
- monc->hunting = true;
- monc->sub_renew_after = jiffies;
- monc->sub_sent = 0;
+ monc->had_a_connection = false;
+ monc->hunt_mult = 1;
INIT_DELAYED_WORK(&monc->delayed_work, delayed_work);
monc->generic_request_tree = RB_ROOT;
monc->num_generic_requests = 0;
monc->last_tid = 0;
- monc->have_mdsmap = 0;
- monc->have_osdmap = 0;
- monc->want_next_osdmap = 1;
return 0;
out_auth_reply:
@@ -888,7 +941,7 @@ void ceph_monc_stop(struct ceph_mon_client *monc)
mutex_lock(&monc->mutex);
__close_session(monc);
-
+ monc->cur_mon = -1;
mutex_unlock(&monc->mutex);
/*
@@ -910,26 +963,40 @@ void ceph_monc_stop(struct ceph_mon_client *monc)
}
EXPORT_SYMBOL(ceph_monc_stop);
+static void finish_hunting(struct ceph_mon_client *monc)
+{
+ if (monc->hunting) {
+ dout("%s found mon%d\n", __func__, monc->cur_mon);
+ monc->hunting = false;
+ monc->had_a_connection = true;
+ monc->hunt_mult /= 2; /* reduce by 50% */
+ if (monc->hunt_mult < 1)
+ monc->hunt_mult = 1;
+ }
+}
+
static void handle_auth_reply(struct ceph_mon_client *monc,
struct ceph_msg *msg)
{
int ret;
int was_auth = 0;
- int had_debugfs_info, init_debugfs = 0;
mutex_lock(&monc->mutex);
- had_debugfs_info = have_debugfs_info(monc);
was_auth = ceph_auth_is_authenticated(monc->auth);
monc->pending_auth = 0;
ret = ceph_handle_auth_reply(monc->auth, msg->front.iov_base,
msg->front.iov_len,
monc->m_auth->front.iov_base,
monc->m_auth->front_alloc_len);
+ if (ret > 0) {
+ __send_prepared_auth_request(monc, ret);
+ goto out;
+ }
+
+ finish_hunting(monc);
+
if (ret < 0) {
monc->client->auth_err = ret;
- wake_up_all(&monc->client->auth_wq);
- } else if (ret > 0) {
- __send_prepared_auth_request(monc, ret);
} else if (!was_auth && ceph_auth_is_authenticated(monc->auth)) {
dout("authenticated, starting session\n");
@@ -939,23 +1006,15 @@ static void handle_auth_reply(struct ceph_mon_client *monc,
__send_subscribe(monc);
__resend_generic_request(monc);
- }
- if (!had_debugfs_info && have_debugfs_info(monc)) {
- pr_info("client%lld fsid %pU\n",
- ceph_client_id(monc->client),
- &monc->client->fsid);
- init_debugfs = 1;
+ pr_info("mon%d %s session established\n", monc->cur_mon,
+ ceph_pr_addr(&monc->con.peer_addr.in_addr));
}
- mutex_unlock(&monc->mutex);
- if (init_debugfs) {
- /*
- * do debugfs initialization without mutex to avoid
- * creating a locking dependency
- */
- ceph_debugfs_client_init(monc->client);
- }
+out:
+ mutex_unlock(&monc->mutex);
+ if (monc->client->auth_err < 0)
+ wake_up_all(&monc->client->auth_wq);
}
static int __validate_auth(struct ceph_mon_client *monc)
@@ -1096,29 +1155,17 @@ static void mon_fault(struct ceph_connection *con)
{
struct ceph_mon_client *monc = con->private;
- if (!monc)
- return;
-
- dout("mon_fault\n");
mutex_lock(&monc->mutex);
- if (!con->private)
- goto out;
-
- if (!monc->hunting)
- pr_info("mon%d %s session lost, "
- "hunting for new mon\n", monc->cur_mon,
- ceph_pr_addr(&monc->con.peer_addr.in_addr));
-
- __close_session(monc);
- if (!monc->hunting) {
- /* start hunting */
- monc->hunting = true;
- __open_session(monc);
- } else {
- /* already hunting, let's wait a bit */
- __schedule_delayed(monc);
+ dout("%s mon%d\n", __func__, monc->cur_mon);
+ if (monc->cur_mon >= 0) {
+ if (!monc->hunting) {
+ dout("%s hunting for new mon\n", __func__);
+ reopen_session(monc);
+ __schedule_delayed(monc);
+ } else {
+ dout("%s already hunting\n", __func__);
+ }
}
-out:
mutex_unlock(&monc->mutex);
}
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 5bc053778fed..32355d9d0103 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -338,9 +338,10 @@ static void ceph_osdc_release_request(struct kref *kref)
ceph_put_snap_context(req->r_snapc);
if (req->r_mempool)
mempool_free(req, req->r_osdc->req_mempool);
- else
+ else if (req->r_num_ops <= CEPH_OSD_SLAB_OPS)
kmem_cache_free(ceph_osd_request_cache, req);
-
+ else
+ kfree(req);
}
void ceph_osdc_get_request(struct ceph_osd_request *req)
@@ -369,28 +370,22 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
struct ceph_msg *msg;
size_t msg_size;
- BUILD_BUG_ON(CEPH_OSD_MAX_OP > U16_MAX);
- BUG_ON(num_ops > CEPH_OSD_MAX_OP);
-
- msg_size = 4 + 4 + 8 + 8 + 4+8;
- msg_size += 2 + 4 + 8 + 4 + 4; /* oloc */
- msg_size += 1 + 8 + 4 + 4; /* pg_t */
- msg_size += 4 + CEPH_MAX_OID_NAME_LEN; /* oid */
- msg_size += 2 + num_ops*sizeof(struct ceph_osd_op);
- msg_size += 8; /* snapid */
- msg_size += 8; /* snap_seq */
- msg_size += 8 * (snapc ? snapc->num_snaps : 0); /* snaps */
- msg_size += 4;
-
if (use_mempool) {
+ BUG_ON(num_ops > CEPH_OSD_SLAB_OPS);
req = mempool_alloc(osdc->req_mempool, gfp_flags);
- memset(req, 0, sizeof(*req));
+ } else if (num_ops <= CEPH_OSD_SLAB_OPS) {
+ req = kmem_cache_alloc(ceph_osd_request_cache, gfp_flags);
} else {
- req = kmem_cache_zalloc(ceph_osd_request_cache, gfp_flags);
+ BUG_ON(num_ops > CEPH_OSD_MAX_OPS);
+ req = kmalloc(sizeof(*req) + num_ops * sizeof(req->r_ops[0]),
+ gfp_flags);
}
- if (req == NULL)
+ if (unlikely(!req))
return NULL;
+ /* req only, each op is zeroed in _osd_req_op_init() */
+ memset(req, 0, sizeof(*req));
+
req->r_osdc = osdc;
req->r_mempool = use_mempool;
req->r_num_ops = num_ops;
@@ -408,18 +403,36 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
req->r_base_oloc.pool = -1;
req->r_target_oloc.pool = -1;
+ msg_size = OSD_OPREPLY_FRONT_LEN;
+ if (num_ops > CEPH_OSD_SLAB_OPS) {
+ /* ceph_osd_op and rval */
+ msg_size += (num_ops - CEPH_OSD_SLAB_OPS) *
+ (sizeof(struct ceph_osd_op) + 4);
+ }
+
/* create reply message */
if (use_mempool)
msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
else
- msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY,
- OSD_OPREPLY_FRONT_LEN, gfp_flags, true);
+ msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, msg_size,
+ gfp_flags, true);
if (!msg) {
ceph_osdc_put_request(req);
return NULL;
}
req->r_reply = msg;
+ msg_size = 4 + 4 + 4; /* client_inc, osdmap_epoch, flags */
+ msg_size += 4 + 4 + 4 + 8; /* mtime, reassert_version */
+ msg_size += 2 + 4 + 8 + 4 + 4; /* oloc */
+ msg_size += 1 + 8 + 4 + 4; /* pgid */
+ msg_size += 4 + CEPH_MAX_OID_NAME_LEN; /* oid */
+ msg_size += 2 + num_ops * sizeof(struct ceph_osd_op);
+ msg_size += 8; /* snapid */
+ msg_size += 8; /* snap_seq */
+ msg_size += 4 + 8 * (snapc ? snapc->num_snaps : 0); /* snaps */
+ msg_size += 4; /* retry_attempt */
+
/* create request message; allow space for oid */
if (use_mempool)
msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
@@ -498,7 +511,7 @@ void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL)
payload_len += length;
- op->payload_len = payload_len;
+ op->indata_len = payload_len;
}
EXPORT_SYMBOL(osd_req_op_extent_init);
@@ -517,10 +530,32 @@ void osd_req_op_extent_update(struct ceph_osd_request *osd_req,
BUG_ON(length > previous);
op->extent.length = length;
- op->payload_len -= previous - length;
+ op->indata_len -= previous - length;
}
EXPORT_SYMBOL(osd_req_op_extent_update);
+void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req,
+ unsigned int which, u64 offset_inc)
+{
+ struct ceph_osd_req_op *op, *prev_op;
+
+ BUG_ON(which + 1 >= osd_req->r_num_ops);
+
+ prev_op = &osd_req->r_ops[which];
+ op = _osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags);
+ /* dup previous one */
+ op->indata_len = prev_op->indata_len;
+ op->outdata_len = prev_op->outdata_len;
+ op->extent = prev_op->extent;
+ /* adjust offset */
+ op->extent.offset += offset_inc;
+ op->extent.length -= offset_inc;
+
+ if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
+ op->indata_len -= offset_inc;
+}
+EXPORT_SYMBOL(osd_req_op_extent_dup_last);
+
void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
u16 opcode, const char *class, const char *method)
{
@@ -554,7 +589,7 @@ void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
op->cls.argc = 0; /* currently unused */
- op->payload_len = payload_len;
+ op->indata_len = payload_len;
}
EXPORT_SYMBOL(osd_req_op_cls_init);
@@ -587,7 +622,7 @@ int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
op->xattr.cmp_mode = cmp_mode;
ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist);
- op->payload_len = payload_len;
+ op->indata_len = payload_len;
return 0;
}
EXPORT_SYMBOL(osd_req_op_xattr_init);
@@ -707,7 +742,7 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req,
BUG_ON(osd_data->type == CEPH_OSD_DATA_TYPE_NONE);
dst->cls.indata_len = cpu_to_le32(data_length);
ceph_osdc_msg_data_add(req->r_request, osd_data);
- src->payload_len += data_length;
+ src->indata_len += data_length;
request_data_len += data_length;
}
osd_data = &src->cls.response_data;
@@ -750,7 +785,7 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req,
dst->op = cpu_to_le16(src->op);
dst->flags = cpu_to_le32(src->flags);
- dst->payload_len = cpu_to_le32(src->payload_len);
+ dst->payload_len = cpu_to_le32(src->indata_len);
return request_data_len;
}
@@ -1810,7 +1845,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg)
ceph_decode_need(&p, end, 4, bad_put);
numops = ceph_decode_32(&p);
- if (numops > CEPH_OSD_MAX_OP)
+ if (numops > CEPH_OSD_MAX_OPS)
goto bad_put;
if (numops != req->r_num_ops)
goto bad_put;
@@ -1821,7 +1856,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg)
int len;
len = le32_to_cpu(op->payload_len);
- req->r_reply_op_len[i] = len;
+ req->r_ops[i].outdata_len = len;
dout(" op %d has %d bytes\n", i, len);
payload_len += len;
p += sizeof(*op);
@@ -1836,7 +1871,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg)
ceph_decode_need(&p, end, 4 + numops * 4, bad_put);
retry_attempt = ceph_decode_32(&p);
for (i = 0; i < numops; i++)
- req->r_reply_op_result[i] = ceph_decode_32(&p);
+ req->r_ops[i].rval = ceph_decode_32(&p);
if (le16_to_cpu(msg->hdr.version) >= 6) {
p += 8 + 4; /* skip replay_version */
@@ -2187,7 +2222,8 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
goto bad;
done:
downgrade_write(&osdc->map_sem);
- ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch);
+ ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
+ osdc->osdmap->epoch);
/*
* subscribe to subsequent osdmap updates if full to ensure
@@ -2646,8 +2682,8 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
round_jiffies_relative(osdc->client->options->osd_idle_ttl));
err = -ENOMEM;
- osdc->req_mempool = mempool_create_kmalloc_pool(10,
- sizeof(struct ceph_osd_request));
+ osdc->req_mempool = mempool_create_slab_pool(10,
+ ceph_osd_request_cache);
if (!osdc->req_mempool)
goto out;
@@ -2782,11 +2818,12 @@ EXPORT_SYMBOL(ceph_osdc_writepages);
int ceph_osdc_setup(void)
{
+ size_t size = sizeof(struct ceph_osd_request) +
+ CEPH_OSD_SLAB_OPS * sizeof(struct ceph_osd_req_op);
+
BUG_ON(ceph_osd_request_cache);
- ceph_osd_request_cache = kmem_cache_create("ceph_osd_request",
- sizeof (struct ceph_osd_request),
- __alignof__(struct ceph_osd_request),
- 0, NULL);
+ ceph_osd_request_cache = kmem_cache_create("ceph_osd_request", size,
+ 0, 0, NULL);
return ceph_osd_request_cache ? 0 : -ENOMEM;
}
diff --git a/net/ceph/pagelist.c b/net/ceph/pagelist.c
index c7c220a736e5..6864007e64fc 100644
--- a/net/ceph/pagelist.c
+++ b/net/ceph/pagelist.c
@@ -56,7 +56,7 @@ int ceph_pagelist_append(struct ceph_pagelist *pl, const void *buf, size_t len)
size_t bit = pl->room;
int ret;
- memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK),
+ memcpy(pl->mapped_tail + (pl->length & ~PAGE_MASK),
buf, bit);
pl->length += bit;
pl->room -= bit;
@@ -67,7 +67,7 @@ int ceph_pagelist_append(struct ceph_pagelist *pl, const void *buf, size_t len)
return ret;
}
- memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK), buf, len);
+ memcpy(pl->mapped_tail + (pl->length & ~PAGE_MASK), buf, len);
pl->length += len;
pl->room -= len;
return 0;
diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c
index 10297f7a89ba..00d2601407c5 100644
--- a/net/ceph/pagevec.c
+++ b/net/ceph/pagevec.c
@@ -95,19 +95,19 @@ int ceph_copy_user_to_page_vector(struct page **pages,
loff_t off, size_t len)
{
int i = 0;
- int po = off & ~PAGE_CACHE_MASK;
+ int po = off & ~PAGE_MASK;
int left = len;
int l, bad;
while (left > 0) {
- l = min_t(int, PAGE_CACHE_SIZE-po, left);
+ l = min_t(int, PAGE_SIZE-po, left);
bad = copy_from_user(page_address(pages[i]) + po, data, l);
if (bad == l)
return -EFAULT;
data += l - bad;
left -= l - bad;
po += l - bad;
- if (po == PAGE_CACHE_SIZE) {
+ if (po == PAGE_SIZE) {
po = 0;
i++;
}
@@ -121,17 +121,17 @@ void ceph_copy_to_page_vector(struct page **pages,
loff_t off, size_t len)
{
int i = 0;
- size_t po = off & ~PAGE_CACHE_MASK;
+ size_t po = off & ~PAGE_MASK;
size_t left = len;
while (left > 0) {
- size_t l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
+ size_t l = min_t(size_t, PAGE_SIZE-po, left);
memcpy(page_address(pages[i]) + po, data, l);
data += l;
left -= l;
po += l;
- if (po == PAGE_CACHE_SIZE) {
+ if (po == PAGE_SIZE) {
po = 0;
i++;
}
@@ -144,17 +144,17 @@ void ceph_copy_from_page_vector(struct page **pages,
loff_t off, size_t len)
{
int i = 0;
- size_t po = off & ~PAGE_CACHE_MASK;
+ size_t po = off & ~PAGE_MASK;
size_t left = len;
while (left > 0) {
- size_t l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
+ size_t l = min_t(size_t, PAGE_SIZE-po, left);
memcpy(data, page_address(pages[i]) + po, l);
data += l;
left -= l;
po += l;
- if (po == PAGE_CACHE_SIZE) {
+ if (po == PAGE_SIZE) {
po = 0;
i++;
}
@@ -168,25 +168,25 @@ EXPORT_SYMBOL(ceph_copy_from_page_vector);
*/
void ceph_zero_page_vector_range(int off, int len, struct page **pages)
{
- int i = off >> PAGE_CACHE_SHIFT;
+ int i = off >> PAGE_SHIFT;
- off &= ~PAGE_CACHE_MASK;
+ off &= ~PAGE_MASK;
dout("zero_page_vector_page %u~%u\n", off, len);
/* leading partial page? */
if (off) {
- int end = min((int)PAGE_CACHE_SIZE, off + len);
+ int end = min((int)PAGE_SIZE, off + len);
dout("zeroing %d %p head from %d\n", i, pages[i],
(int)off);
zero_user_segment(pages[i], off, end);
len -= (end - off);
i++;
}
- while (len >= PAGE_CACHE_SIZE) {
+ while (len >= PAGE_SIZE) {
dout("zeroing %d %p len=%d\n", i, pages[i], len);
- zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
- len -= PAGE_CACHE_SIZE;
+ zero_user_segment(pages[i], 0, PAGE_SIZE);
+ len -= PAGE_SIZE;
i++;
}
/* trailing partial page? */
diff --git a/net/core/datagram.c b/net/core/datagram.c
index fa9dc6450b08..b7de71f8d5d3 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -301,16 +301,19 @@ void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
}
EXPORT_SYMBOL(skb_free_datagram);
-void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb)
+void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len)
{
bool slow;
if (likely(atomic_read(&skb->users) == 1))
smp_rmb();
- else if (likely(!atomic_dec_and_test(&skb->users)))
+ else if (likely(!atomic_dec_and_test(&skb->users))) {
+ sk_peek_offset_bwd(sk, len);
return;
+ }
slow = lock_sock_fast(sk);
+ sk_peek_offset_bwd(sk, len);
skb_orphan(skb);
sk_mem_reclaim_partial(sk);
unlock_sock_fast(sk, slow);
@@ -318,7 +321,7 @@ void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb)
/* skb is now orphaned, can be freed outside of locked section */
__kfree_skb(skb);
}
-EXPORT_SYMBOL(skb_free_datagram_locked);
+EXPORT_SYMBOL(__skb_free_datagram_locked);
/**
* skb_kill_datagram - Free a datagram skbuff forcibly
diff --git a/net/core/dev.c b/net/core/dev.c
index b9bcbe77d913..d51343a821ed 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4439,6 +4439,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
NAPI_GRO_CB(skb)->flush = 0;
NAPI_GRO_CB(skb)->free = 0;
NAPI_GRO_CB(skb)->encap_mark = 0;
+ NAPI_GRO_CB(skb)->is_fou = 0;
NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
/* Setup for GRO checksum validation */
@@ -4663,6 +4664,8 @@ static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
if (unlikely(skb_gro_header_hard(skb, hlen))) {
eth = skb_gro_header_slow(skb, hlen, 0);
if (unlikely(!eth)) {
+ net_warn_ratelimited("%s: dropping impossible skb from %s\n",
+ __func__, napi->dev->name);
napi_reuse_skb(napi, skb);
return NULL;
}
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 590fa561cb7f..b84cf0df4a0e 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -119,7 +119,8 @@ static struct devlink_port *devlink_port_get_from_info(struct devlink *devlink,
return devlink_port_get_from_attrs(devlink, info->attrs);
}
-#define DEVLINK_NL_FLAG_NEED_PORT BIT(0)
+#define DEVLINK_NL_FLAG_NEED_DEVLINK BIT(0)
+#define DEVLINK_NL_FLAG_NEED_PORT BIT(1)
static int devlink_nl_pre_doit(const struct genl_ops *ops,
struct sk_buff *skb, struct genl_info *info)
@@ -132,8 +133,9 @@ static int devlink_nl_pre_doit(const struct genl_ops *ops,
mutex_unlock(&devlink_mutex);
return PTR_ERR(devlink);
}
- info->user_ptr[0] = devlink;
- if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT) {
+ if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_DEVLINK) {
+ info->user_ptr[0] = devlink;
+ } else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT) {
struct devlink_port *devlink_port;
mutex_lock(&devlink_port_mutex);
@@ -143,7 +145,7 @@ static int devlink_nl_pre_doit(const struct genl_ops *ops,
mutex_unlock(&devlink_mutex);
return PTR_ERR(devlink_port);
}
- info->user_ptr[1] = devlink_port;
+ info->user_ptr[0] = devlink_port;
}
return 0;
}
@@ -356,8 +358,8 @@ out:
static int devlink_nl_cmd_port_get_doit(struct sk_buff *skb,
struct genl_info *info)
{
- struct devlink *devlink = info->user_ptr[0];
- struct devlink_port *devlink_port = info->user_ptr[1];
+ struct devlink_port *devlink_port = info->user_ptr[0];
+ struct devlink *devlink = devlink_port->devlink;
struct sk_buff *msg;
int err;
@@ -436,8 +438,8 @@ static int devlink_port_type_set(struct devlink *devlink,
static int devlink_nl_cmd_port_set_doit(struct sk_buff *skb,
struct genl_info *info)
{
- struct devlink *devlink = info->user_ptr[0];
- struct devlink_port *devlink_port = info->user_ptr[1];
+ struct devlink_port *devlink_port = info->user_ptr[0];
+ struct devlink *devlink = devlink_port->devlink;
int err;
if (info->attrs[DEVLINK_ATTR_PORT_TYPE]) {
@@ -511,6 +513,7 @@ static const struct genl_ops devlink_nl_ops[] = {
.doit = devlink_nl_cmd_get_doit,
.dumpit = devlink_nl_cmd_get_dumpit,
.policy = devlink_nl_policy,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
/* can be retrieved by unprivileged users */
},
{
@@ -533,12 +536,14 @@ static const struct genl_ops devlink_nl_ops[] = {
.doit = devlink_nl_cmd_port_split_doit,
.policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
},
{
.cmd = DEVLINK_CMD_PORT_UNSPLIT,
.doit = devlink_nl_cmd_port_unsplit_doit,
.policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
},
};
@@ -630,7 +635,6 @@ int devlink_port_register(struct devlink *devlink,
}
devlink_port->devlink = devlink;
devlink_port->index = port_index;
- devlink_port->type = DEVLINK_PORT_TYPE_NOTSET;
devlink_port->registered = true;
list_add_tail(&devlink_port->list, &devlink->port_list);
mutex_unlock(&devlink_port_mutex);
diff --git a/net/core/filter.c b/net/core/filter.c
index b7177d01ecb0..e8486ba601ea 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1166,7 +1166,7 @@ static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
}
old_fp = rcu_dereference_protected(sk->sk_filter,
- sock_owned_by_user(sk));
+ lockdep_sock_is_held(sk));
rcu_assign_pointer(sk->sk_filter, fp);
if (old_fp)
@@ -1764,6 +1764,7 @@ static u64 bpf_skb_get_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
switch (size) {
case offsetof(struct bpf_tunnel_key, tunnel_label):
+ case offsetof(struct bpf_tunnel_key, tunnel_ext):
goto set_compat;
case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
/* Fixup deprecated structure layouts here, so we have
@@ -1849,6 +1850,7 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
switch (size) {
case offsetof(struct bpf_tunnel_key, tunnel_label):
+ case offsetof(struct bpf_tunnel_key, tunnel_ext):
case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
/* Fixup deprecated structure layouts here, so we have
* a common path later on.
@@ -1861,7 +1863,8 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
return -EINVAL;
}
}
- if (unlikely(!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label))
+ if (unlikely((!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label) ||
+ from->tunnel_ext))
return -EINVAL;
skb_dst_drop(skb);
@@ -2256,7 +2259,7 @@ int sk_detach_filter(struct sock *sk)
return -EPERM;
filter = rcu_dereference_protected(sk->sk_filter,
- sock_owned_by_user(sk));
+ lockdep_sock_is_held(sk));
if (filter) {
RCU_INIT_POINTER(sk->sk_filter, NULL);
sk_filter_uncharge(sk, filter);
@@ -2276,7 +2279,7 @@ int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
lock_sock(sk);
filter = rcu_dereference_protected(sk->sk_filter,
- sock_owned_by_user(sk));
+ lockdep_sock_is_held(sk));
if (!filter)
goto out;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index f2066772d0f3..a75f7e94b445 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -909,6 +909,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
+ rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
+ nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
+ nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
+ + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
+ nla_total_size(1); /* IFLA_PROTO_DOWN */
}
diff --git a/net/core/sock.c b/net/core/sock.c
index b67b9aedb230..e16a5db853c6 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -221,7 +221,8 @@ static const char *const af_family_key_strings[AF_MAX+1] = {
"sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
"sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
"sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
- "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_MAX"
+ "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_KCM" ,
+ "sk_lock-AF_MAX"
};
static const char *const af_family_slock_key_strings[AF_MAX+1] = {
"slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
@@ -237,7 +238,8 @@ static const char *const af_family_slock_key_strings[AF_MAX+1] = {
"slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
"slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
"slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
- "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_MAX"
+ "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_KCM" ,
+ "slock-AF_MAX"
};
static const char *const af_family_clock_key_strings[AF_MAX+1] = {
"clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
@@ -253,7 +255,8 @@ static const char *const af_family_clock_key_strings[AF_MAX+1] = {
"clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
"clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
"clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
- "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_MAX"
+ "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_KCM" ,
+ "clock-AF_MAX"
};
/*
@@ -402,9 +405,8 @@ static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
}
-int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
- int err;
unsigned long flags;
struct sk_buff_head *list = &sk->sk_receive_queue;
@@ -414,10 +416,6 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
return -ENOMEM;
}
- err = sk_filter(sk, skb);
- if (err)
- return err;
-
if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
atomic_inc(&sk->sk_drops);
return -ENOBUFS;
@@ -440,6 +438,18 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
sk->sk_data_ready(sk);
return 0;
}
+EXPORT_SYMBOL(__sock_queue_rcv_skb);
+
+int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ int err;
+
+ err = sk_filter(sk, skb);
+ if (err)
+ return err;
+
+ return __sock_queue_rcv_skb(sk, skb);
+}
EXPORT_SYMBOL(sock_queue_rcv_skb);
int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
@@ -832,7 +842,8 @@ set_rcvbuf:
!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
if (sk->sk_protocol == IPPROTO_TCP &&
sk->sk_type == SOCK_STREAM) {
- if (sk->sk_state != TCP_ESTABLISHED) {
+ if ((1 << sk->sk_state) &
+ (TCPF_CLOSE | TCPF_LISTEN)) {
ret = -EINVAL;
break;
}
@@ -1418,8 +1429,12 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
}
EXPORT_SYMBOL(sk_alloc);
-void sk_destruct(struct sock *sk)
+/* Sockets having SOCK_RCU_FREE will call this function after one RCU
+ * grace period. This is the case for UDP sockets and TCP listeners.
+ */
+static void __sk_destruct(struct rcu_head *head)
{
+ struct sock *sk = container_of(head, struct sock, sk_rcu);
struct sk_filter *filter;
if (sk->sk_destruct)
@@ -1448,6 +1463,14 @@ void sk_destruct(struct sock *sk)
sk_prot_free(sk->sk_prot_creator, sk);
}
+void sk_destruct(struct sock *sk)
+{
+ if (sock_flag(sk, SOCK_RCU_FREE))
+ call_rcu(&sk->sk_rcu, __sk_destruct);
+ else
+ __sk_destruct(&sk->sk_rcu);
+}
+
static void __sk_free(struct sock *sk)
{
if (unlikely(sock_diag_has_destroy_listeners(sk) && sk->sk_net_refcnt))
@@ -1512,6 +1535,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
newsk->sk_dst_cache = NULL;
newsk->sk_wmem_queued = 0;
newsk->sk_forward_alloc = 0;
+ atomic_set(&newsk->sk_drops, 0);
newsk->sk_send_head = NULL;
newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
@@ -1866,27 +1890,51 @@ struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
}
EXPORT_SYMBOL(sock_alloc_send_skb);
+int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
+ struct sockcm_cookie *sockc)
+{
+ u32 tsflags;
+
+ switch (cmsg->cmsg_type) {
+ case SO_MARK:
+ if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
+ return -EPERM;
+ if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
+ return -EINVAL;
+ sockc->mark = *(u32 *)CMSG_DATA(cmsg);
+ break;
+ case SO_TIMESTAMPING:
+ if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
+ return -EINVAL;
+
+ tsflags = *(u32 *)CMSG_DATA(cmsg);
+ if (tsflags & ~SOF_TIMESTAMPING_TX_RECORD_MASK)
+ return -EINVAL;
+
+ sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK;
+ sockc->tsflags |= tsflags;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(__sock_cmsg_send);
+
int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
struct sockcm_cookie *sockc)
{
struct cmsghdr *cmsg;
+ int ret;
for_each_cmsghdr(cmsg, msg) {
if (!CMSG_OK(msg, cmsg))
return -EINVAL;
if (cmsg->cmsg_level != SOL_SOCKET)
continue;
- switch (cmsg->cmsg_type) {
- case SO_MARK:
- if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
- return -EPERM;
- if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
- return -EINVAL;
- sockc->mark = *(u32 *)CMSG_DATA(cmsg);
- break;
- default:
- return -EINVAL;
- }
+ ret = __sock_cmsg_send(sk, msg, cmsg, sockc);
+ if (ret)
+ return ret;
}
return 0;
}
@@ -2142,6 +2190,15 @@ void __sk_mem_reclaim(struct sock *sk, int amount)
}
EXPORT_SYMBOL(__sk_mem_reclaim);
+int sk_set_peek_off(struct sock *sk, int val)
+{
+ if (val < 0)
+ return -EINVAL;
+
+ sk->sk_peek_off = val;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sk_set_peek_off);
/*
* Set of default routines for initialising struct proto_ops when
@@ -2429,11 +2486,6 @@ EXPORT_SYMBOL(lock_sock_nested);
void release_sock(struct sock *sk)
{
- /*
- * The sk_lock has mutex_unlock() semantics:
- */
- mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
-
spin_lock_bh(&sk->sk_lock.slock);
if (sk->sk_backlog.tail)
__release_sock(sk);
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index a996ce8c8fb2..ca9e35bbe13c 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -67,6 +67,7 @@ int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype)
mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len;
+ mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
return nla_put(skb, attrtype, sizeof(mem), &mem);
}
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 9c67a961ba53..f6d183f8f332 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -62,7 +62,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
nexthop = daddr = usin->sin_addr.s_addr;
inet_opt = rcu_dereference_protected(inet->inet_opt,
- sock_owned_by_user(sk));
+ lockdep_sock_is_held(sk));
if (inet_opt != NULL && inet_opt->opt.srr) {
if (daddr == 0)
return -EINVAL;
@@ -764,6 +764,7 @@ static int dccp_v4_rcv(struct sk_buff *skb)
{
const struct dccp_hdr *dh;
const struct iphdr *iph;
+ bool refcounted;
struct sock *sk;
int min_cov;
@@ -801,7 +802,7 @@ static int dccp_v4_rcv(struct sk_buff *skb)
lookup:
sk = __inet_lookup_skb(&dccp_hashinfo, skb, __dccp_hdr_len(dh),
- dh->dccph_sport, dh->dccph_dport);
+ dh->dccph_sport, dh->dccph_dport, &refcounted);
if (!sk) {
dccp_pr_debug("failed to look up flow ID in table and "
"get corresponding socket\n");
@@ -830,6 +831,7 @@ lookup:
goto lookup;
}
sock_hold(sk);
+ refcounted = true;
nsk = dccp_check_req(sk, skb, req);
if (!nsk) {
reqsk_put(req);
@@ -886,7 +888,8 @@ discard_it:
return 0;
discard_and_relse:
- sock_put(sk);
+ if (refcounted)
+ sock_put(sk);
goto discard_it;
}
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 4663a01d5039..8ceb3cebcad4 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -642,6 +642,7 @@ discard:
static int dccp_v6_rcv(struct sk_buff *skb)
{
const struct dccp_hdr *dh;
+ bool refcounted;
struct sock *sk;
int min_cov;
@@ -670,7 +671,7 @@ static int dccp_v6_rcv(struct sk_buff *skb)
lookup:
sk = __inet6_lookup_skb(&dccp_hashinfo, skb, __dccp_hdr_len(dh),
dh->dccph_sport, dh->dccph_dport,
- inet6_iif(skb));
+ inet6_iif(skb), &refcounted);
if (!sk) {
dccp_pr_debug("failed to look up flow ID in table and "
"get corresponding socket\n");
@@ -699,6 +700,7 @@ lookup:
goto lookup;
}
sock_hold(sk);
+ refcounted = true;
nsk = dccp_check_req(sk, skb, req);
if (!nsk) {
reqsk_put(req);
@@ -752,7 +754,8 @@ discard_it:
return 0;
discard_and_relse:
- sock_put(sk);
+ if (refcounted)
+ sock_put(sk);
goto discard_it;
}
@@ -865,7 +868,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
fl6.fl6_sport = inet->inet_sport;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
- opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
+ opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
final_p = fl6_update_dst(&fl6, opt, &final);
dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index a575f0350d5a..2dae0d064359 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -104,8 +104,8 @@ static int dsa_slave_open(struct net_device *dev)
goto clear_promisc;
}
- if (ds->drv->port_stp_update)
- ds->drv->port_stp_update(ds, p->port, stp_state);
+ if (ds->drv->port_stp_state_set)
+ ds->drv->port_stp_state_set(ds, p->port, stp_state);
if (p->phy)
phy_start(p->phy);
@@ -147,8 +147,8 @@ static int dsa_slave_close(struct net_device *dev)
if (ds->drv->port_disable)
ds->drv->port_disable(ds, p->port, p->phy);
- if (ds->drv->port_stp_update)
- ds->drv->port_stp_update(ds, p->port, BR_STATE_DISABLED);
+ if (ds->drv->port_stp_state_set)
+ ds->drv->port_stp_state_set(ds, p->port, BR_STATE_DISABLED);
return 0;
}
@@ -207,21 +207,16 @@ static int dsa_slave_port_vlan_add(struct net_device *dev,
{
struct dsa_slave_priv *p = netdev_priv(dev);
struct dsa_switch *ds = p->parent;
- int err;
if (switchdev_trans_ph_prepare(trans)) {
if (!ds->drv->port_vlan_prepare || !ds->drv->port_vlan_add)
return -EOPNOTSUPP;
- err = ds->drv->port_vlan_prepare(ds, p->port, vlan, trans);
- if (err)
- return err;
- } else {
- err = ds->drv->port_vlan_add(ds, p->port, vlan, trans);
- if (err)
- return err;
+ return ds->drv->port_vlan_prepare(ds, p->port, vlan, trans);
}
+ ds->drv->port_vlan_add(ds, p->port, vlan, trans);
+
return 0;
}
@@ -256,17 +251,17 @@ static int dsa_slave_port_fdb_add(struct net_device *dev,
{
struct dsa_slave_priv *p = netdev_priv(dev);
struct dsa_switch *ds = p->parent;
- int ret;
- if (!ds->drv->port_fdb_prepare || !ds->drv->port_fdb_add)
- return -EOPNOTSUPP;
+ if (switchdev_trans_ph_prepare(trans)) {
+ if (!ds->drv->port_fdb_prepare || !ds->drv->port_fdb_add)
+ return -EOPNOTSUPP;
- if (switchdev_trans_ph_prepare(trans))
- ret = ds->drv->port_fdb_prepare(ds, p->port, fdb, trans);
- else
- ret = ds->drv->port_fdb_add(ds, p->port, fdb, trans);
+ return ds->drv->port_fdb_prepare(ds, p->port, fdb, trans);
+ }
- return ret;
+ ds->drv->port_fdb_add(ds, p->port, fdb, trans);
+
+ return 0;
}
static int dsa_slave_port_fdb_del(struct net_device *dev,
@@ -305,16 +300,19 @@ static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EOPNOTSUPP;
}
-static int dsa_slave_stp_update(struct net_device *dev, u8 state)
+static int dsa_slave_stp_state_set(struct net_device *dev,
+ const struct switchdev_attr *attr,
+ struct switchdev_trans *trans)
{
struct dsa_slave_priv *p = netdev_priv(dev);
struct dsa_switch *ds = p->parent;
- int ret = -EOPNOTSUPP;
- if (ds->drv->port_stp_update)
- ret = ds->drv->port_stp_update(ds, p->port, state);
+ if (switchdev_trans_ph_prepare(trans))
+ return ds->drv->port_stp_state_set ? 0 : -EOPNOTSUPP;
- return ret;
+ ds->drv->port_stp_state_set(ds, p->port, attr->u.stp_state);
+
+ return 0;
}
static int dsa_slave_vlan_filtering(struct net_device *dev,
@@ -339,17 +337,11 @@ static int dsa_slave_port_attr_set(struct net_device *dev,
const struct switchdev_attr *attr,
struct switchdev_trans *trans)
{
- struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->parent;
int ret;
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
- if (switchdev_trans_ph_prepare(trans))
- ret = ds->drv->port_stp_update ? 0 : -EOPNOTSUPP;
- else
- ret = ds->drv->port_stp_update(ds, p->port,
- attr->u.stp_state);
+ ret = dsa_slave_stp_state_set(dev, attr, trans);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
ret = dsa_slave_vlan_filtering(dev, attr, trans);
@@ -468,7 +460,8 @@ static void dsa_slave_bridge_port_leave(struct net_device *dev)
/* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer,
* so allow it to be in BR_STATE_FORWARDING to be kept functional
*/
- dsa_slave_stp_update(dev, BR_STATE_FORWARDING);
+ if (ds->drv->port_stp_state_set)
+ ds->drv->port_stp_state_set(ds, p->port, BR_STATE_FORWARDING);
}
static int dsa_slave_port_attr_get(struct net_device *dev,
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 9e481992dbae..8217cd22f921 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -948,6 +948,7 @@ const struct proto_ops inet_dgram_ops = {
.recvmsg = inet_recvmsg,
.mmap = sock_no_mmap,
.sendpage = inet_sendpage,
+ .set_peek_off = sk_set_peek_off,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_sock_common_setsockopt,
.compat_getsockopt = compat_sock_common_getsockopt,
@@ -1106,7 +1107,7 @@ static int inet_sk_reselect_saddr(struct sock *sk)
struct ip_options_rcu *inet_opt;
inet_opt = rcu_dereference_protected(inet->inet_opt,
- sock_owned_by_user(sk));
+ lockdep_sock_is_held(sk));
if (inet_opt && inet_opt->opt.srr)
daddr = inet_opt->opt.faddr;
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index bdb2a07ec363..40d6b87713a1 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -1933,7 +1933,8 @@ int cipso_v4_sock_setattr(struct sock *sk,
sk_inet = inet_sk(sk);
- old = rcu_dereference_protected(sk_inet->inet_opt, sock_owned_by_user(sk));
+ old = rcu_dereference_protected(sk_inet->inet_opt,
+ lockdep_sock_is_held(sk));
if (sk_inet->is_icsk) {
sk_conn = inet_csk(sk);
if (old)
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index d97268e8ff10..ab64d9f2eef9 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1559,21 +1559,45 @@ int fib_sync_up(struct net_device *dev, unsigned int nh_flags)
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH
+static bool fib_good_nh(const struct fib_nh *nh)
+{
+ int state = NUD_REACHABLE;
+
+ if (nh->nh_scope == RT_SCOPE_LINK) {
+ struct neighbour *n;
+
+ rcu_read_lock_bh();
+
+ n = __ipv4_neigh_lookup_noref(nh->nh_dev, nh->nh_gw);
+ if (n)
+ state = n->nud_state;
+
+ rcu_read_unlock_bh();
+ }
+
+ return !!(state & NUD_VALID);
+}
void fib_select_multipath(struct fib_result *res, int hash)
{
struct fib_info *fi = res->fi;
+ struct net *net = fi->fib_net;
+ bool first = false;
for_nexthops(fi) {
if (hash > atomic_read(&nh->nh_upper_bound))
continue;
- res->nh_sel = nhsel;
- return;
+ if (!net->ipv4.sysctl_fib_multipath_use_neigh ||
+ fib_good_nh(nh)) {
+ res->nh_sel = nhsel;
+ return;
+ }
+ if (!first) {
+ res->nh_sel = nhsel;
+ first = true;
+ }
} endfor_nexthops(fi);
-
- /* Race condition: route has just become dead. */
- res->nh_sel = 0;
}
#endif
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index a0586b4a197d..d039f8fff57f 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -22,7 +22,6 @@ struct fou {
u8 flags;
__be16 port;
u16 type;
- struct udp_offload udp_offloads;
struct list_head list;
struct rcu_head rcu;
};
@@ -186,15 +185,26 @@ drop:
return 0;
}
-static struct sk_buff **fou_gro_receive(struct sk_buff **head,
- struct sk_buff *skb,
- struct udp_offload *uoff)
+static struct sk_buff **fou_gro_receive(struct sock *sk,
+ struct sk_buff **head,
+ struct sk_buff *skb)
{
const struct net_offload *ops;
struct sk_buff **pp = NULL;
- u8 proto = NAPI_GRO_CB(skb)->proto;
+ u8 proto = fou_from_sock(sk)->protocol;
const struct net_offload **offloads;
+ /* We can clear the encap_mark for FOU as we are essentially doing
+ * one of two possible things. We are either adding an L4 tunnel
+ * header to the outer L3 tunnel header, or we are are simply
+ * treating the GRE tunnel header as though it is a UDP protocol
+ * specific header such as VXLAN or GENEVE.
+ */
+ NAPI_GRO_CB(skb)->encap_mark = 0;
+
+ /* Flag this frame as already having an outer encap header */
+ NAPI_GRO_CB(skb)->is_fou = 1;
+
rcu_read_lock();
offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
ops = rcu_dereference(offloads[proto]);
@@ -209,11 +219,11 @@ out_unlock:
return pp;
}
-static int fou_gro_complete(struct sk_buff *skb, int nhoff,
- struct udp_offload *uoff)
+static int fou_gro_complete(struct sock *sk, struct sk_buff *skb,
+ int nhoff)
{
const struct net_offload *ops;
- u8 proto = NAPI_GRO_CB(skb)->proto;
+ u8 proto = fou_from_sock(sk)->protocol;
int err = -ENOSYS;
const struct net_offload **offloads;
@@ -256,9 +266,9 @@ static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
return guehdr;
}
-static struct sk_buff **gue_gro_receive(struct sk_buff **head,
- struct sk_buff *skb,
- struct udp_offload *uoff)
+static struct sk_buff **gue_gro_receive(struct sock *sk,
+ struct sk_buff **head,
+ struct sk_buff *skb)
{
const struct net_offload **offloads;
const struct net_offload *ops;
@@ -269,7 +279,7 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
void *data;
u16 doffset = 0;
int flush = 1;
- struct fou *fou = container_of(uoff, struct fou, udp_offloads);
+ struct fou *fou = fou_from_sock(sk);
struct gro_remcsum grc;
skb_gro_remcsum_init(&grc);
@@ -352,6 +362,17 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
}
}
+ /* We can clear the encap_mark for GUE as we are essentially doing
+ * one of two possible things. We are either adding an L4 tunnel
+ * header to the outer L3 tunnel header, or we are are simply
+ * treating the GRE tunnel header as though it is a UDP protocol
+ * specific header such as VXLAN or GENEVE.
+ */
+ NAPI_GRO_CB(skb)->encap_mark = 0;
+
+ /* Flag this frame as already having an outer encap header */
+ NAPI_GRO_CB(skb)->is_fou = 1;
+
rcu_read_lock();
offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
ops = rcu_dereference(offloads[guehdr->proto_ctype]);
@@ -370,8 +391,7 @@ out:
return pp;
}
-static int gue_gro_complete(struct sk_buff *skb, int nhoff,
- struct udp_offload *uoff)
+static int gue_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
{
const struct net_offload **offloads;
struct guehdr *guehdr = (struct guehdr *)(skb->data + nhoff);
@@ -419,10 +439,7 @@ static int fou_add_to_port_list(struct net *net, struct fou *fou)
static void fou_release(struct fou *fou)
{
struct socket *sock = fou->sock;
- struct sock *sk = sock->sk;
- if (sk->sk_family == AF_INET)
- udp_del_offload(&fou->udp_offloads);
list_del(&fou->list);
udp_tunnel_sock_release(sock);
@@ -432,11 +449,9 @@ static void fou_release(struct fou *fou)
static int fou_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
{
udp_sk(sk)->encap_rcv = fou_udp_recv;
- fou->protocol = cfg->protocol;
- fou->udp_offloads.callbacks.gro_receive = fou_gro_receive;
- fou->udp_offloads.callbacks.gro_complete = fou_gro_complete;
- fou->udp_offloads.port = cfg->udp_config.local_udp_port;
- fou->udp_offloads.ipproto = cfg->protocol;
+ udp_sk(sk)->gro_receive = fou_gro_receive;
+ udp_sk(sk)->gro_complete = fou_gro_complete;
+ fou_from_sock(sk)->protocol = cfg->protocol;
return 0;
}
@@ -444,9 +459,8 @@ static int fou_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
static int gue_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
{
udp_sk(sk)->encap_rcv = gue_udp_recv;
- fou->udp_offloads.callbacks.gro_receive = gue_gro_receive;
- fou->udp_offloads.callbacks.gro_complete = gue_gro_complete;
- fou->udp_offloads.port = cfg->udp_config.local_udp_port;
+ udp_sk(sk)->gro_receive = gue_gro_receive;
+ udp_sk(sk)->gro_complete = gue_gro_complete;
return 0;
}
@@ -505,12 +519,6 @@ static int fou_create(struct net *net, struct fou_cfg *cfg,
sk->sk_allocation = GFP_ATOMIC;
- if (cfg->udp_config.family == AF_INET) {
- err = udp_add_offload(net, &fou->udp_offloads);
- if (err)
- goto error;
- }
-
err = fou_add_to_port_list(net, fou);
if (err)
goto error;
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index c47539d04b88..6a5bd4317866 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -150,6 +150,14 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
if ((greh->flags & ~(GRE_KEY|GRE_CSUM)) != 0)
goto out;
+ /* We can only support GRE_CSUM if we can track the location of
+ * the GRE header. In the case of FOU/GUE we cannot because the
+ * outer UDP header displaces the GRE header leaving us in a state
+ * of limbo.
+ */
+ if ((greh->flags & GRE_CSUM) && NAPI_GRO_CB(skb)->is_fou)
+ goto out;
+
type = greh->protocol;
rcu_read_lock();
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index bc5196ea1bdf..ab69da2d2a77 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -661,6 +661,9 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num);
newsk->sk_write_space = sk_stream_write_space;
+ /* listeners have SOCK_RCU_FREE, not the children */
+ sock_reset_flag(newsk, SOCK_RCU_FREE);
+
newsk->sk_mark = inet_rsk(req)->ir_mark;
atomic64_set(&newsk->sk_cookie,
atomic64_read(&inet_rsk(req)->ir_cookie));
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 5fdb02f5598e..bd591eb81ec9 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -356,6 +356,7 @@ struct sock *inet_diag_find_one_icsk(struct net *net,
{
struct sock *sk;
+ rcu_read_lock();
if (req->sdiag_family == AF_INET)
sk = inet_lookup(net, hashinfo, NULL, 0, req->id.idiag_dst[0],
req->id.idiag_dport, req->id.idiag_src[0],
@@ -376,9 +377,11 @@ struct sock *inet_diag_find_one_icsk(struct net *net,
req->id.idiag_if);
}
#endif
- else
+ else {
+ rcu_read_unlock();
return ERR_PTR(-EINVAL);
-
+ }
+ rcu_read_unlock();
if (!sk)
return ERR_PTR(-ENOENT);
@@ -772,13 +775,12 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
for (i = s_i; i < INET_LHTABLE_SIZE; i++) {
struct inet_listen_hashbucket *ilb;
- struct hlist_nulls_node *node;
struct sock *sk;
num = 0;
ilb = &hashinfo->listening_hash[i];
spin_lock_bh(&ilb->lock);
- sk_nulls_for_each(sk, node, &ilb->head) {
+ sk_for_each(sk, &ilb->head) {
struct inet_sock *inet = inet_sk(sk);
if (!net_eq(sock_net(sk), net))
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index bc68eced0105..fcadb670f50b 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -198,13 +198,13 @@ static inline int compute_score(struct sock *sk, struct net *net,
}
/*
- * Don't inline this cruft. Here are some nice properties to exploit here. The
- * BSD API does not allow a listening sock to specify the remote port nor the
+ * Here are some nice properties to exploit here. The BSD API
+ * does not allow a listening sock to specify the remote port nor the
* remote address for the connection. So always assume those are both
* wildcarded during the search since they can never be otherwise.
*/
-
+/* called with rcu_read_lock() : No refcount taken on the socket */
struct sock *__inet_lookup_listener(struct net *net,
struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
@@ -212,38 +212,27 @@ struct sock *__inet_lookup_listener(struct net *net,
const __be32 daddr, const unsigned short hnum,
const int dif)
{
- struct sock *sk, *result;
- struct hlist_nulls_node *node;
unsigned int hash = inet_lhashfn(net, hnum);
struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash];
- int score, hiscore, matches = 0, reuseport = 0;
- bool select_ok = true;
+ int score, hiscore = 0, matches = 0, reuseport = 0;
+ struct sock *sk, *result = NULL;
u32 phash = 0;
- rcu_read_lock();
-begin:
- result = NULL;
- hiscore = 0;
- sk_nulls_for_each_rcu(sk, node, &ilb->head) {
+ sk_for_each_rcu(sk, &ilb->head) {
score = compute_score(sk, net, hnum, daddr, dif);
if (score > hiscore) {
- result = sk;
- hiscore = score;
reuseport = sk->sk_reuseport;
if (reuseport) {
phash = inet_ehashfn(net, daddr, hnum,
saddr, sport);
- if (select_ok) {
- struct sock *sk2;
- sk2 = reuseport_select_sock(sk, phash,
- skb, doff);
- if (sk2) {
- result = sk2;
- goto found;
- }
- }
+ result = reuseport_select_sock(sk, phash,
+ skb, doff);
+ if (result)
+ return result;
matches = 1;
}
+ result = sk;
+ hiscore = score;
} else if (score == hiscore && reuseport) {
matches++;
if (reciprocal_scale(phash, matches) == 0)
@@ -251,25 +240,6 @@ begin:
phash = next_pseudo_random32(phash);
}
}
- /*
- * if the nulls value we got at the end of this lookup is
- * not the expected one, we must restart lookup.
- * We probably met an item that was moved to another chain.
- */
- if (get_nulls_value(node) != hash + LISTENING_NULLS_BASE)
- goto begin;
- if (result) {
-found:
- if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
- result = NULL;
- else if (unlikely(compute_score(result, net, hnum, daddr,
- dif) < hiscore)) {
- sock_put(result);
- select_ok = false;
- goto begin;
- }
- }
- rcu_read_unlock();
return result;
}
EXPORT_SYMBOL_GPL(__inet_lookup_listener);
@@ -312,7 +282,6 @@ struct sock *__inet_lookup_established(struct net *net,
unsigned int slot = hash & hashinfo->ehash_mask;
struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
- rcu_read_lock();
begin:
sk_nulls_for_each_rcu(sk, node, &head->chain) {
if (sk->sk_hash != hash)
@@ -339,7 +308,6 @@ begin:
out:
sk = NULL;
found:
- rcu_read_unlock();
return sk;
}
EXPORT_SYMBOL_GPL(__inet_lookup_established);
@@ -471,10 +439,9 @@ static int inet_reuseport_add_sock(struct sock *sk,
bool match_wildcard))
{
struct sock *sk2;
- struct hlist_nulls_node *node;
kuid_t uid = sock_i_uid(sk);
- sk_nulls_for_each_rcu(sk2, node, &ilb->head) {
+ sk_for_each_rcu(sk2, &ilb->head) {
if (sk2 != sk &&
sk2->sk_family == sk->sk_family &&
ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
@@ -512,7 +479,8 @@ int __inet_hash(struct sock *sk, struct sock *osk,
if (err)
goto unlock;
}
- __sk_nulls_add_node_rcu(sk, &ilb->head);
+ hlist_add_head_rcu(&sk->sk_node, &ilb->head);
+ sock_set_flag(sk, SOCK_RCU_FREE);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
unlock:
spin_unlock(&ilb->lock);
@@ -539,20 +507,25 @@ void inet_unhash(struct sock *sk)
{
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
spinlock_t *lock;
+ bool listener = false;
int done;
if (sk_unhashed(sk))
return;
- if (sk->sk_state == TCP_LISTEN)
+ if (sk->sk_state == TCP_LISTEN) {
lock = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)].lock;
- else
+ listener = true;
+ } else {
lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
-
+ }
spin_lock_bh(lock);
if (rcu_access_pointer(sk->sk_reuseport_cb))
reuseport_detach_sock(sk);
- done = __sk_nulls_del_node_init_rcu(sk);
+ if (listener)
+ done = __sk_del_node_init(sk);
+ else
+ done = __sk_nulls_del_node_init_rcu(sk);
if (done)
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
spin_unlock_bh(lock);
@@ -688,9 +661,8 @@ void inet_hashinfo_init(struct inet_hashinfo *h)
for (i = 0; i < INET_LHTABLE_SIZE; i++) {
spin_lock_init(&h->listening_hash[i].lock);
- INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].head,
- i + LISTENING_NULLS_BASE);
- }
+ INIT_HLIST_HEAD(&h->listening_hash[i].head);
+ }
}
EXPORT_SYMBOL_GPL(inet_hashinfo_init);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 31936d387cfd..af5d1f38217f 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -862,9 +862,16 @@ static void __gre_tunnel_init(struct net_device *dev)
dev->hw_features |= GRE_FEATURES;
if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
- /* TCP offload with GRE SEQ is not supported. */
- dev->features |= NETIF_F_GSO_SOFTWARE;
- dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+ /* TCP offload with GRE SEQ is not supported, nor
+ * can we support 2 levels of outer headers requiring
+ * an update.
+ */
+ if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
+ (tunnel->encap.type == TUNNEL_ENCAP_NONE)) {
+ dev->features |= NETIF_F_GSO_SOFTWARE;
+ dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+ }
+
/* Can use a lockless transmit, unless we generate
* output sequences
*/
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 035ad645a8d9..89b5f3bd6694 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -219,11 +219,12 @@ void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb,
}
EXPORT_SYMBOL(ip_cmsg_recv_offset);
-int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc,
+int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
bool allow_ipv6)
{
int err, val;
struct cmsghdr *cmsg;
+ struct net *net = sock_net(sk);
for_each_cmsghdr(cmsg, msg) {
if (!CMSG_OK(msg, cmsg))
@@ -244,6 +245,12 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc,
continue;
}
#endif
+ if (cmsg->cmsg_level == SOL_SOCKET) {
+ if (__sock_cmsg_send(sk, msg, cmsg, &ipc->sockc))
+ return -EINVAL;
+ continue;
+ }
+
if (cmsg->cmsg_level != SOL_IP)
continue;
switch (cmsg->cmsg_type) {
@@ -635,7 +642,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
if (err)
break;
old = rcu_dereference_protected(inet->inet_opt,
- sock_owned_by_user(sk));
+ lockdep_sock_is_held(sk));
if (inet->is_icsk) {
struct inet_connection_sock *icsk = inet_csk(sk);
#if IS_ENABLED(CONFIG_IPV6)
@@ -1295,7 +1302,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
struct ip_options_rcu *inet_opt;
inet_opt = rcu_dereference_protected(inet->inet_opt,
- sock_owned_by_user(sk));
+ lockdep_sock_is_held(sk));
opt->optlen = 0;
if (inet_opt)
memcpy(optbuf, &inet_opt->opt,
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 02dd990af542..43445df61efd 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -86,15 +86,15 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
}
EXPORT_SYMBOL_GPL(iptunnel_xmit);
-int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto,
- bool xnet)
+int __iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
+ __be16 inner_proto, bool raw_proto, bool xnet)
{
if (unlikely(!pskb_may_pull(skb, hdr_len)))
return -ENOMEM;
skb_pull_rcsum(skb, hdr_len);
- if (inner_proto == htons(ETH_P_TEB)) {
+ if (!raw_proto && inner_proto == htons(ETH_P_TEB)) {
struct ethhdr *eh;
if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
@@ -117,7 +117,7 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto,
return iptunnel_pull_offloads(skb);
}
-EXPORT_SYMBOL_GPL(iptunnel_pull_header);
+EXPORT_SYMBOL_GPL(__iptunnel_pull_header);
struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
gfp_t flags)
@@ -247,10 +247,10 @@ static int ip_tun_build_state(struct net_device *dev, struct nlattr *attr,
tun_info->key.tun_id = nla_get_be64(tb[LWTUNNEL_IP_ID]);
if (tb[LWTUNNEL_IP_DST])
- tun_info->key.u.ipv4.dst = nla_get_be32(tb[LWTUNNEL_IP_DST]);
+ tun_info->key.u.ipv4.dst = nla_get_in_addr(tb[LWTUNNEL_IP_DST]);
if (tb[LWTUNNEL_IP_SRC])
- tun_info->key.u.ipv4.src = nla_get_be32(tb[LWTUNNEL_IP_SRC]);
+ tun_info->key.u.ipv4.src = nla_get_in_addr(tb[LWTUNNEL_IP_SRC]);
if (tb[LWTUNNEL_IP_TTL])
tun_info->key.ttl = nla_get_u8(tb[LWTUNNEL_IP_TTL]);
@@ -275,8 +275,8 @@ static int ip_tun_fill_encap_info(struct sk_buff *skb,
struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
if (nla_put_be64(skb, LWTUNNEL_IP_ID, tun_info->key.tun_id) ||
- nla_put_be32(skb, LWTUNNEL_IP_DST, tun_info->key.u.ipv4.dst) ||
- nla_put_be32(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) ||
+ nla_put_in_addr(skb, LWTUNNEL_IP_DST, tun_info->key.u.ipv4.dst) ||
+ nla_put_in_addr(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) ||
nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.tos) ||
nla_put_u8(skb, LWTUNNEL_IP_TTL, tun_info->key.ttl) ||
nla_put_be16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags))
@@ -372,8 +372,8 @@ static int ip6_tun_fill_encap_info(struct sk_buff *skb,
if (nla_put_be64(skb, LWTUNNEL_IP6_ID, tun_info->key.tun_id) ||
nla_put_in6_addr(skb, LWTUNNEL_IP6_DST, &tun_info->key.u.ipv6.dst) ||
nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) ||
- nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.tos) ||
- nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.ttl) ||
+ nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.tos) ||
+ nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.ttl) ||
nla_put_be16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags))
return -ENOMEM;
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index bf081927e06b..4133b0f513af 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -359,11 +359,12 @@ unsigned int arpt_do_table(struct sk_buff *skb,
}
/* All zeroes == unconditional rule. */
-static inline bool unconditional(const struct arpt_arp *arp)
+static inline bool unconditional(const struct arpt_entry *e)
{
static const struct arpt_arp uncond;
- return memcmp(arp, &uncond, sizeof(uncond)) == 0;
+ return e->target_offset == sizeof(struct arpt_entry) &&
+ memcmp(&e->arp, &uncond, sizeof(uncond)) == 0;
}
/* Figures out from what hook each rule can be called: returns 0 if
@@ -402,11 +403,10 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
|= ((1 << hook) | (1 << NF_ARP_NUMHOOKS));
/* Unconditional return/END. */
- if ((e->target_offset == sizeof(struct arpt_entry) &&
+ if ((unconditional(e) &&
(strcmp(t->target.u.user.name,
XT_STANDARD_TARGET) == 0) &&
- t->verdict < 0 && unconditional(&e->arp)) ||
- visited) {
+ t->verdict < 0) || visited) {
unsigned int oldpos, size;
if ((strcmp(t->target.u.user.name,
@@ -474,14 +474,12 @@ next:
return 1;
}
-static inline int check_entry(const struct arpt_entry *e, const char *name)
+static inline int check_entry(const struct arpt_entry *e)
{
const struct xt_entry_target *t;
- if (!arp_checkentry(&e->arp)) {
- duprintf("arp_tables: arp check failed %p %s.\n", e, name);
+ if (!arp_checkentry(&e->arp))
return -EINVAL;
- }
if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset)
return -EINVAL;
@@ -522,10 +520,6 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size)
struct xt_target *target;
int ret;
- ret = check_entry(e, name);
- if (ret)
- return ret;
-
e->counters.pcnt = xt_percpu_counter_alloc();
if (IS_ERR_VALUE(e->counters.pcnt))
return -ENOMEM;
@@ -557,7 +551,7 @@ static bool check_underflow(const struct arpt_entry *e)
const struct xt_entry_target *t;
unsigned int verdict;
- if (!unconditional(&e->arp))
+ if (!unconditional(e))
return false;
t = arpt_get_target_c(e);
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
@@ -576,9 +570,11 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
unsigned int valid_hooks)
{
unsigned int h;
+ int err;
if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 ||
- (unsigned char *)e + sizeof(struct arpt_entry) >= limit) {
+ (unsigned char *)e + sizeof(struct arpt_entry) >= limit ||
+ (unsigned char *)e + e->next_offset > limit) {
duprintf("Bad offset %p\n", e);
return -EINVAL;
}
@@ -590,6 +586,10 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
return -EINVAL;
}
+ err = check_entry(e);
+ if (err)
+ return err;
+
/* Check hooks & underflows */
for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
if (!(valid_hooks & (1 << h)))
@@ -598,9 +598,9 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
newinfo->hook_entry[h] = hook_entries[h];
if ((unsigned char *)e - base == underflows[h]) {
if (!check_underflow(e)) {
- pr_err("Underflows must be unconditional and "
- "use the STANDARD target with "
- "ACCEPT/DROP\n");
+ pr_debug("Underflows must be unconditional and "
+ "use the STANDARD target with "
+ "ACCEPT/DROP\n");
return -EINVAL;
}
newinfo->underflow[h] = underflows[h];
@@ -969,6 +969,7 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
sizeof(struct arpt_get_entries) + get.size);
return -EINVAL;
}
+ get.name[sizeof(get.name) - 1] = '\0';
t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
if (!IS_ERR_OR_NULL(t)) {
@@ -1233,7 +1234,8 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
duprintf("check_compat_entry_size_and_hooks %p\n", e);
if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 ||
- (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit) {
+ (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit ||
+ (unsigned char *)e + e->next_offset > limit) {
duprintf("Bad offset %p, limit = %p\n", e, limit);
return -EINVAL;
}
@@ -1246,7 +1248,7 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
}
/* For purposes of check_entry casting the compat entry is fine */
- ret = check_entry((struct arpt_entry *)e, name);
+ ret = check_entry((struct arpt_entry *)e);
if (ret)
return ret;
@@ -1662,6 +1664,7 @@ static int compat_get_entries(struct net *net,
*len, sizeof(get) + get.size);
return -EINVAL;
}
+ get.name[sizeof(get.name) - 1] = '\0';
xt_compat_lock(NFPROTO_ARP);
t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index e53f8d6f326d..631c100a1338 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -168,11 +168,12 @@ get_entry(const void *base, unsigned int offset)
/* All zeroes == unconditional rule. */
/* Mildly perf critical (only if packet tracing is on) */
-static inline bool unconditional(const struct ipt_ip *ip)
+static inline bool unconditional(const struct ipt_entry *e)
{
static const struct ipt_ip uncond;
- return memcmp(ip, &uncond, sizeof(uncond)) == 0;
+ return e->target_offset == sizeof(struct ipt_entry) &&
+ memcmp(&e->ip, &uncond, sizeof(uncond)) == 0;
#undef FWINV
}
@@ -229,11 +230,10 @@ get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e,
} else if (s == e) {
(*rulenum)++;
- if (s->target_offset == sizeof(struct ipt_entry) &&
+ if (unconditional(s) &&
strcmp(t->target.u.kernel.target->name,
XT_STANDARD_TARGET) == 0 &&
- t->verdict < 0 &&
- unconditional(&s->ip)) {
+ t->verdict < 0) {
/* Tail of chains: STANDARD target (return/policy) */
*comment = *chainname == hookname
? comments[NF_IP_TRACE_COMMENT_POLICY]
@@ -476,11 +476,10 @@ mark_source_chains(const struct xt_table_info *newinfo,
e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
/* Unconditional return/END. */
- if ((e->target_offset == sizeof(struct ipt_entry) &&
+ if ((unconditional(e) &&
(strcmp(t->target.u.user.name,
XT_STANDARD_TARGET) == 0) &&
- t->verdict < 0 && unconditional(&e->ip)) ||
- visited) {
+ t->verdict < 0) || visited) {
unsigned int oldpos, size;
if ((strcmp(t->target.u.user.name,
@@ -569,14 +568,12 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net)
}
static int
-check_entry(const struct ipt_entry *e, const char *name)
+check_entry(const struct ipt_entry *e)
{
const struct xt_entry_target *t;
- if (!ip_checkentry(&e->ip)) {
- duprintf("ip check failed %p %s.\n", e, name);
+ if (!ip_checkentry(&e->ip))
return -EINVAL;
- }
if (e->target_offset + sizeof(struct xt_entry_target) >
e->next_offset)
@@ -666,10 +663,6 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
struct xt_mtchk_param mtpar;
struct xt_entry_match *ematch;
- ret = check_entry(e, name);
- if (ret)
- return ret;
-
e->counters.pcnt = xt_percpu_counter_alloc();
if (IS_ERR_VALUE(e->counters.pcnt))
return -ENOMEM;
@@ -721,7 +714,7 @@ static bool check_underflow(const struct ipt_entry *e)
const struct xt_entry_target *t;
unsigned int verdict;
- if (!unconditional(&e->ip))
+ if (!unconditional(e))
return false;
t = ipt_get_target_c(e);
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
@@ -741,9 +734,11 @@ check_entry_size_and_hooks(struct ipt_entry *e,
unsigned int valid_hooks)
{
unsigned int h;
+ int err;
if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 ||
- (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
+ (unsigned char *)e + sizeof(struct ipt_entry) >= limit ||
+ (unsigned char *)e + e->next_offset > limit) {
duprintf("Bad offset %p\n", e);
return -EINVAL;
}
@@ -755,6 +750,10 @@ check_entry_size_and_hooks(struct ipt_entry *e,
return -EINVAL;
}
+ err = check_entry(e);
+ if (err)
+ return err;
+
/* Check hooks & underflows */
for (h = 0; h < NF_INET_NUMHOOKS; h++) {
if (!(valid_hooks & (1 << h)))
@@ -763,9 +762,9 @@ check_entry_size_and_hooks(struct ipt_entry *e,
newinfo->hook_entry[h] = hook_entries[h];
if ((unsigned char *)e - base == underflows[h]) {
if (!check_underflow(e)) {
- pr_err("Underflows must be unconditional and "
- "use the STANDARD target with "
- "ACCEPT/DROP\n");
+ pr_debug("Underflows must be unconditional and "
+ "use the STANDARD target with "
+ "ACCEPT/DROP\n");
return -EINVAL;
}
newinfo->underflow[h] = underflows[h];
@@ -1157,6 +1156,7 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr,
*len, sizeof(get) + get.size);
return -EINVAL;
}
+ get.name[sizeof(get.name) - 1] = '\0';
t = xt_find_table_lock(net, AF_INET, get.name);
if (!IS_ERR_OR_NULL(t)) {
@@ -1493,7 +1493,8 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
duprintf("check_compat_entry_size_and_hooks %p\n", e);
if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 ||
- (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
+ (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit ||
+ (unsigned char *)e + e->next_offset > limit) {
duprintf("Bad offset %p, limit = %p\n", e, limit);
return -EINVAL;
}
@@ -1506,7 +1507,7 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
}
/* For purposes of check_entry casting the compat entry is fine */
- ret = check_entry((struct ipt_entry *)e, name);
+ ret = check_entry((struct ipt_entry *)e);
if (ret)
return ret;
@@ -1935,6 +1936,7 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
*len, sizeof(get) + get.size);
return -EINVAL;
}
+ get.name[sizeof(get.name) - 1] = '\0';
xt_compat_lock(AF_INET);
t = xt_find_table_lock(net, AF_INET, get.name);
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c
index 7b8fbb352877..db5b87509446 100644
--- a/net/ipv4/netfilter/ipt_SYNPROXY.c
+++ b/net/ipv4/netfilter/ipt_SYNPROXY.c
@@ -18,10 +18,10 @@
#include <net/netfilter/nf_conntrack_synproxy.h>
static struct iphdr *
-synproxy_build_ip(struct sk_buff *skb, __be32 saddr, __be32 daddr)
+synproxy_build_ip(struct net *net, struct sk_buff *skb, __be32 saddr,
+ __be32 daddr)
{
struct iphdr *iph;
- struct net *net = sock_net(skb->sk);
skb_reset_network_header(skb);
iph = (struct iphdr *)skb_put(skb, sizeof(*iph));
@@ -40,14 +40,12 @@ synproxy_build_ip(struct sk_buff *skb, __be32 saddr, __be32 daddr)
}
static void
-synproxy_send_tcp(const struct synproxy_net *snet,
+synproxy_send_tcp(struct net *net,
const struct sk_buff *skb, struct sk_buff *nskb,
struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo,
struct iphdr *niph, struct tcphdr *nth,
unsigned int tcp_hdr_size)
{
- struct net *net = nf_ct_net(snet->tmpl);
-
nth->check = ~tcp_v4_check(tcp_hdr_size, niph->saddr, niph->daddr, 0);
nskb->ip_summed = CHECKSUM_PARTIAL;
nskb->csum_start = (unsigned char *)nth - nskb->head;
@@ -72,7 +70,7 @@ free_nskb:
}
static void
-synproxy_send_client_synack(const struct synproxy_net *snet,
+synproxy_send_client_synack(struct net *net,
const struct sk_buff *skb, const struct tcphdr *th,
const struct synproxy_options *opts)
{
@@ -91,7 +89,7 @@ synproxy_send_client_synack(const struct synproxy_net *snet,
return;
skb_reserve(nskb, MAX_TCP_HEADER);
- niph = synproxy_build_ip(nskb, iph->daddr, iph->saddr);
+ niph = synproxy_build_ip(net, nskb, iph->daddr, iph->saddr);
skb_reset_transport_header(nskb);
nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
@@ -109,15 +107,16 @@ synproxy_send_client_synack(const struct synproxy_net *snet,
synproxy_build_options(nth, opts);
- synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
+ synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
niph, nth, tcp_hdr_size);
}
static void
-synproxy_send_server_syn(const struct synproxy_net *snet,
+synproxy_send_server_syn(struct net *net,
const struct sk_buff *skb, const struct tcphdr *th,
const struct synproxy_options *opts, u32 recv_seq)
{
+ struct synproxy_net *snet = synproxy_pernet(net);
struct sk_buff *nskb;
struct iphdr *iph, *niph;
struct tcphdr *nth;
@@ -132,7 +131,7 @@ synproxy_send_server_syn(const struct synproxy_net *snet,
return;
skb_reserve(nskb, MAX_TCP_HEADER);
- niph = synproxy_build_ip(nskb, iph->saddr, iph->daddr);
+ niph = synproxy_build_ip(net, nskb, iph->saddr, iph->daddr);
skb_reset_transport_header(nskb);
nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
@@ -153,12 +152,12 @@ synproxy_send_server_syn(const struct synproxy_net *snet,
synproxy_build_options(nth, opts);
- synproxy_send_tcp(snet, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
+ synproxy_send_tcp(net, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
niph, nth, tcp_hdr_size);
}
static void
-synproxy_send_server_ack(const struct synproxy_net *snet,
+synproxy_send_server_ack(struct net *net,
const struct ip_ct_tcp *state,
const struct sk_buff *skb, const struct tcphdr *th,
const struct synproxy_options *opts)
@@ -177,7 +176,7 @@ synproxy_send_server_ack(const struct synproxy_net *snet,
return;
skb_reserve(nskb, MAX_TCP_HEADER);
- niph = synproxy_build_ip(nskb, iph->daddr, iph->saddr);
+ niph = synproxy_build_ip(net, nskb, iph->daddr, iph->saddr);
skb_reset_transport_header(nskb);
nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
@@ -193,11 +192,11 @@ synproxy_send_server_ack(const struct synproxy_net *snet,
synproxy_build_options(nth, opts);
- synproxy_send_tcp(snet, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
+ synproxy_send_tcp(net, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
}
static void
-synproxy_send_client_ack(const struct synproxy_net *snet,
+synproxy_send_client_ack(struct net *net,
const struct sk_buff *skb, const struct tcphdr *th,
const struct synproxy_options *opts)
{
@@ -215,7 +214,7 @@ synproxy_send_client_ack(const struct synproxy_net *snet,
return;
skb_reserve(nskb, MAX_TCP_HEADER);
- niph = synproxy_build_ip(nskb, iph->saddr, iph->daddr);
+ niph = synproxy_build_ip(net, nskb, iph->saddr, iph->daddr);
skb_reset_transport_header(nskb);
nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
@@ -231,15 +230,16 @@ synproxy_send_client_ack(const struct synproxy_net *snet,
synproxy_build_options(nth, opts);
- synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
+ synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
niph, nth, tcp_hdr_size);
}
static bool
-synproxy_recv_client_ack(const struct synproxy_net *snet,
+synproxy_recv_client_ack(struct net *net,
const struct sk_buff *skb, const struct tcphdr *th,
struct synproxy_options *opts, u32 recv_seq)
{
+ struct synproxy_net *snet = synproxy_pernet(net);
int mss;
mss = __cookie_v4_check(ip_hdr(skb), th, ntohl(th->ack_seq) - 1);
@@ -255,7 +255,7 @@ synproxy_recv_client_ack(const struct synproxy_net *snet,
if (opts->options & XT_SYNPROXY_OPT_TIMESTAMP)
synproxy_check_timestamp_cookie(opts);
- synproxy_send_server_syn(snet, skb, th, opts, recv_seq);
+ synproxy_send_server_syn(net, skb, th, opts, recv_seq);
return true;
}
@@ -263,7 +263,8 @@ static unsigned int
synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_synproxy_info *info = par->targinfo;
- struct synproxy_net *snet = synproxy_pernet(par->net);
+ struct net *net = par->net;
+ struct synproxy_net *snet = synproxy_pernet(net);
struct synproxy_options opts = {};
struct tcphdr *th, _th;
@@ -292,12 +293,12 @@ synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par)
XT_SYNPROXY_OPT_SACK_PERM |
XT_SYNPROXY_OPT_ECN);
- synproxy_send_client_synack(snet, skb, th, &opts);
+ synproxy_send_client_synack(net, skb, th, &opts);
return NF_DROP;
} else if (th->ack && !(th->fin || th->rst || th->syn)) {
/* ACK from client */
- synproxy_recv_client_ack(snet, skb, th, &opts, ntohl(th->seq));
+ synproxy_recv_client_ack(net, skb, th, &opts, ntohl(th->seq));
return NF_DROP;
}
@@ -308,7 +309,8 @@ static unsigned int ipv4_synproxy_hook(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *nhs)
{
- struct synproxy_net *snet = synproxy_pernet(nhs->net);
+ struct net *net = nhs->net;
+ struct synproxy_net *snet = synproxy_pernet(net);
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
struct nf_conn_synproxy *synproxy;
@@ -365,7 +367,7 @@ static unsigned int ipv4_synproxy_hook(void *priv,
* therefore we need to add 1 to make the SYN sequence
* number match the one of first SYN.
*/
- if (synproxy_recv_client_ack(snet, skb, th, &opts,
+ if (synproxy_recv_client_ack(net, skb, th, &opts,
ntohl(th->seq) + 1))
this_cpu_inc(snet->stats->cookie_retrans);
@@ -391,12 +393,12 @@ static unsigned int ipv4_synproxy_hook(void *priv,
XT_SYNPROXY_OPT_SACK_PERM);
swap(opts.tsval, opts.tsecr);
- synproxy_send_server_ack(snet, state, skb, th, &opts);
+ synproxy_send_server_ack(net, state, skb, th, &opts);
nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - ntohl(th->seq));
swap(opts.tsval, opts.tsecr);
- synproxy_send_client_ack(snet, skb, th, &opts);
+ synproxy_send_client_ack(net, skb, th, &opts);
consume_skb(skb);
return NF_STOLEN;
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index cf9700b1a106..66ddcb60519a 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -737,6 +737,7 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
/* no remote port */
}
+ ipc.sockc.tsflags = sk->sk_tsflags;
ipc.addr = inet->inet_saddr;
ipc.opt = NULL;
ipc.oif = sk->sk_bound_dev_if;
@@ -744,10 +745,8 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
ipc.ttl = 0;
ipc.tos = -1;
- sock_tx_timestamp(sk, &ipc.tx_flags);
-
if (msg->msg_controllen) {
- err = ip_cmsg_send(sock_net(sk), msg, &ipc, false);
+ err = ip_cmsg_send(sk, msg, &ipc, false);
if (unlikely(err)) {
kfree(ipc.opt);
return err;
@@ -768,6 +767,8 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
rcu_read_unlock();
}
+ sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags);
+
saddr = ipc.addr;
ipc.addr = faddr = daddr;
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 8d22de74080c..438f50c1a676 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -339,8 +339,8 @@ int raw_rcv(struct sock *sk, struct sk_buff *skb)
static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
struct msghdr *msg, size_t length,
- struct rtable **rtp,
- unsigned int flags)
+ struct rtable **rtp, unsigned int flags,
+ const struct sockcm_cookie *sockc)
{
struct inet_sock *inet = inet_sk(sk);
struct net *net = sock_net(sk);
@@ -379,7 +379,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
skb->ip_summed = CHECKSUM_NONE;
- sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
+ sock_tx_timestamp(sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags);
skb->transport_header = skb->network_header;
err = -EFAULT;
@@ -540,6 +540,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
daddr = inet->inet_daddr;
}
+ ipc.sockc.tsflags = sk->sk_tsflags;
ipc.addr = inet->inet_saddr;
ipc.opt = NULL;
ipc.tx_flags = 0;
@@ -548,7 +549,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
ipc.oif = sk->sk_bound_dev_if;
if (msg->msg_controllen) {
- err = ip_cmsg_send(net, msg, &ipc, false);
+ err = ip_cmsg_send(sk, msg, &ipc, false);
if (unlikely(err)) {
kfree(ipc.opt);
goto out;
@@ -638,10 +639,10 @@ back_from_confirm:
if (inet->hdrincl)
err = raw_send_hdrinc(sk, &fl4, msg, len,
- &rt, msg->msg_flags);
+ &rt, msg->msg_flags, &ipc.sockc);
else {
- sock_tx_timestamp(sk, &ipc.tx_flags);
+ sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags);
if (!ipc.addr)
ipc.addr = fl4.daddr;
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 1e1fe6086dd9..bb0419582b8d 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -960,6 +960,17 @@ static struct ctl_table ipv4_net_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
+ {
+ .procname = "fib_multipath_use_neigh",
+ .data = &init_net.ipv4.sysctl_fib_multipath_use_neigh,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+#endif
{ }
};
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 08b8b960a8ed..4d73858991af 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -428,14 +428,16 @@ void tcp_init_sock(struct sock *sk)
}
EXPORT_SYMBOL(tcp_init_sock);
-static void tcp_tx_timestamp(struct sock *sk, struct sk_buff *skb)
+static void tcp_tx_timestamp(struct sock *sk, u16 tsflags, struct sk_buff *skb)
{
- if (sk->sk_tsflags) {
+ if (sk->sk_tsflags || tsflags) {
struct skb_shared_info *shinfo = skb_shinfo(skb);
+ struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
- sock_tx_timestamp(sk, &shinfo->tx_flags);
+ sock_tx_timestamp(sk, tsflags, &shinfo->tx_flags);
if (shinfo->tx_flags & SKBTX_ANY_TSTAMP)
shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1;
+ tcb->txstamp_ack = !!(shinfo->tx_flags & SKBTX_ACK_TSTAMP);
}
}
@@ -957,7 +959,7 @@ new_segment:
offset += copy;
size -= copy;
if (!size) {
- tcp_tx_timestamp(sk, skb);
+ tcp_tx_timestamp(sk, sk->sk_tsflags, skb);
goto out;
}
@@ -1077,6 +1079,7 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
+ struct sockcm_cookie sockc;
int flags, err, copied = 0;
int mss_now = 0, size_goal, copied_syn = 0;
bool sg;
@@ -1119,6 +1122,15 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
/* 'common' sending to sendq */
}
+ sockc.tsflags = sk->sk_tsflags;
+ if (msg->msg_controllen) {
+ err = sock_cmsg_send(sk, msg, &sockc);
+ if (unlikely(err)) {
+ err = -EINVAL;
+ goto out_err;
+ }
+ }
+
/* This should be in poll */
sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
@@ -1237,7 +1249,7 @@ new_segment:
copied += copy;
if (!msg_data_left(msg)) {
- tcp_tx_timestamp(sk, skb);
+ tcp_tx_timestamp(sk, sockc.tsflags, skb);
goto out;
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e6e65f79ade8..983f04c11177 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2252,16 +2252,6 @@ static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit)
}
}
-/* CWND moderation, preventing bursts due to too big ACKs
- * in dubious situations.
- */
-static inline void tcp_moderate_cwnd(struct tcp_sock *tp)
-{
- tp->snd_cwnd = min(tp->snd_cwnd,
- tcp_packets_in_flight(tp) + tcp_max_burst(tp));
- tp->snd_cwnd_stamp = tcp_time_stamp;
-}
-
static bool tcp_tsopt_ecr_before(const struct tcp_sock *tp, u32 when)
{
return tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
@@ -2410,7 +2400,6 @@ static bool tcp_try_undo_recovery(struct sock *sk)
/* Hold old state until something *above* high_seq
* is ACKed. For Reno it is MUST to prevent false
* fast retransmits (RFC2582). SACK TCP is safe. */
- tcp_moderate_cwnd(tp);
if (!tcp_any_retrans_done(sk))
tp->retrans_stamp = 0;
return true;
@@ -3093,7 +3082,7 @@ static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
const struct skb_shared_info *shinfo;
/* Avoid cache line misses to get skb_shinfo() and shinfo->tx_flags */
- if (likely(!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK)))
+ if (likely(!TCP_SKB_CB(skb)->txstamp_ack))
return;
shinfo = skb_shinfo(skb);
@@ -4318,6 +4307,12 @@ static bool tcp_try_coalesce(struct sock *sk,
return true;
}
+static void tcp_drop(struct sock *sk, struct sk_buff *skb)
+{
+ sk_drops_add(sk, skb);
+ __kfree_skb(skb);
+}
+
/* This one checks to see if we can put data from the
* out_of_order queue into the receive_queue.
*/
@@ -4342,7 +4337,7 @@ static void tcp_ofo_queue(struct sock *sk)
__skb_unlink(skb, &tp->out_of_order_queue);
if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
SOCK_DEBUG(sk, "ofo packet was already received\n");
- __kfree_skb(skb);
+ tcp_drop(sk, skb);
continue;
}
SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n",
@@ -4394,7 +4389,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP);
- __kfree_skb(skb);
+ tcp_drop(sk, skb);
return;
}
@@ -4458,7 +4453,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
/* All the bits are present. Drop. */
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
- __kfree_skb(skb);
+ tcp_drop(sk, skb);
skb = NULL;
tcp_dsack_set(sk, seq, end_seq);
goto add_sack;
@@ -4497,7 +4492,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
TCP_SKB_CB(skb1)->end_seq);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
- __kfree_skb(skb1);
+ tcp_drop(sk, skb1);
}
add_sack:
@@ -4580,12 +4575,13 @@ err:
static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
- int eaten = -1;
bool fragstolen = false;
+ int eaten = -1;
- if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq)
- goto drop;
-
+ if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
+ __kfree_skb(skb);
+ return;
+ }
skb_dst_drop(skb);
__skb_pull(skb, tcp_hdr(skb)->doff * 4);
@@ -4667,7 +4663,7 @@ out_of_window:
tcp_enter_quickack_mode(sk);
inet_csk_schedule_ack(sk);
drop:
- __kfree_skb(skb);
+ tcp_drop(sk, skb);
return;
}
@@ -5244,7 +5240,7 @@ syn_challenge:
return true;
discard:
- __kfree_skb(skb);
+ tcp_drop(sk, skb);
return false;
}
@@ -5462,7 +5458,7 @@ csum_error:
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
discard:
- __kfree_skb(skb);
+ tcp_drop(sk, skb);
}
EXPORT_SYMBOL(tcp_rcv_established);
@@ -5693,7 +5689,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
TCP_DELACK_MAX, TCP_RTO_MAX);
discard:
- __kfree_skb(skb);
+ tcp_drop(sk, skb);
return 0;
} else {
tcp_send_ack(sk);
@@ -6054,7 +6050,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
if (!queued) {
discard:
- __kfree_skb(skb);
+ tcp_drop(sk, skb);
}
return 0;
}
@@ -6343,8 +6339,10 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
af_ops->send_synack(sk, dst, &fl, req,
&foc, !want_cookie);
- if (want_cookie)
- goto drop_and_free;
+ if (want_cookie) {
+ reqsk_free(req);
+ return 0;
+ }
}
reqsk_put(req);
return 0;
@@ -6354,7 +6352,7 @@ drop_and_release:
drop_and_free:
reqsk_free(req);
drop:
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
+ tcp_listendrop(sk);
return 0;
}
EXPORT_SYMBOL(tcp_conn_request);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index ad450509029b..f4f2a0a3849d 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -157,7 +157,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
nexthop = daddr = usin->sin_addr.s_addr;
inet_opt = rcu_dereference_protected(inet->inet_opt,
- sock_owned_by_user(sk));
+ lockdep_sock_is_held(sk));
if (inet_opt && inet_opt->opt.srr) {
if (!daddr)
return -EINVAL;
@@ -329,7 +329,7 @@ void tcp_req_err(struct sock *sk, u32 seq, bool abort)
* errors returned from accept().
*/
inet_csk_reqsk_queue_drop(req->rsk_listener, req);
- NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS);
+ tcp_listendrop(req->rsk_listener);
}
reqsk_put(req);
}
@@ -628,6 +628,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
#ifdef CONFIG_TCP_MD5SIG
+ rcu_read_lock();
hash_location = tcp_parse_md5sig_option(th);
if (sk && sk_fullsock(sk)) {
key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
@@ -646,16 +647,18 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
ntohs(th->source), inet_iif(skb));
/* don't send rst if it can't find key */
if (!sk1)
- return;
- rcu_read_lock();
+ goto out;
+
key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
&ip_hdr(skb)->saddr, AF_INET);
if (!key)
- goto release_sk1;
+ goto out;
+
genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
if (genhash || memcmp(hash_location, newhash, 16) != 0)
- goto release_sk1;
+ goto out;
+
}
if (key) {
@@ -698,11 +701,8 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
#ifdef CONFIG_TCP_MD5SIG
-release_sk1:
- if (sk1) {
- rcu_read_unlock();
- sock_put(sk1);
- }
+out:
+ rcu_read_unlock();
#endif
}
@@ -882,8 +882,7 @@ struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
/* caller either holds rcu_read_lock() or socket lock */
md5sig = rcu_dereference_check(tp->md5sig_info,
- sock_owned_by_user(sk) ||
- lockdep_is_held((spinlock_t *)&sk->sk_lock.slock));
+ lockdep_sock_is_held(sk));
if (!md5sig)
return NULL;
#if IS_ENABLED(CONFIG_IPV6)
@@ -928,8 +927,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
}
md5sig = rcu_dereference_protected(tp->md5sig_info,
- sock_owned_by_user(sk) ||
- lockdep_is_held(&sk->sk_lock.slock));
+ lockdep_sock_is_held(sk));
if (!md5sig) {
md5sig = kmalloc(sizeof(*md5sig), gfp);
if (!md5sig)
@@ -1246,7 +1244,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
&tcp_request_sock_ipv4_ops, sk, skb);
drop:
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
+ tcp_listendrop(sk);
return 0;
}
EXPORT_SYMBOL(tcp_v4_conn_request);
@@ -1348,7 +1346,7 @@ exit_overflow:
exit_nonewsk:
dst_release(dst);
exit:
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
+ tcp_listendrop(sk);
return NULL;
put_and_exit:
inet_csk_prepare_forced_close(newsk);
@@ -1538,11 +1536,12 @@ EXPORT_SYMBOL(tcp_prequeue);
int tcp_v4_rcv(struct sk_buff *skb)
{
+ struct net *net = dev_net(skb->dev);
const struct iphdr *iph;
const struct tcphdr *th;
+ bool refcounted;
struct sock *sk;
int ret;
- struct net *net = dev_net(skb->dev);
if (skb->pkt_type != PACKET_HOST)
goto discard_it;
@@ -1588,7 +1587,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
lookup:
sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
- th->dest);
+ th->dest, &refcounted);
if (!sk)
goto no_tcp_socket;
@@ -1609,7 +1608,11 @@ process:
inet_csk_reqsk_queue_drop_and_put(sk, req);
goto lookup;
}
+ /* We own a reference on the listener, increase it again
+ * as we might lose it too soon.
+ */
sock_hold(sk);
+ refcounted = true;
nsk = tcp_check_req(sk, skb, req, false);
if (!nsk) {
reqsk_put(req);
@@ -1665,7 +1668,8 @@ process:
bh_unlock_sock(sk);
put_and_return:
- sock_put(sk);
+ if (refcounted)
+ sock_put(sk);
return ret;
@@ -1688,7 +1692,9 @@ discard_it:
return 0;
discard_and_relse:
- sock_put(sk);
+ sk_drops_add(sk, skb);
+ if (refcounted)
+ sock_put(sk);
goto discard_it;
do_time_wait:
@@ -1712,6 +1718,7 @@ do_time_wait:
if (sk2) {
inet_twsk_deschedule_put(inet_twsk(sk));
sk = sk2;
+ refcounted = false;
goto process;
}
/* Fall through to ACK */
@@ -1845,17 +1852,17 @@ EXPORT_SYMBOL(tcp_v4_destroy_sock);
*/
static void *listening_get_next(struct seq_file *seq, void *cur)
{
- struct inet_connection_sock *icsk;
- struct hlist_nulls_node *node;
- struct sock *sk = cur;
- struct inet_listen_hashbucket *ilb;
struct tcp_iter_state *st = seq->private;
struct net *net = seq_file_net(seq);
+ struct inet_listen_hashbucket *ilb;
+ struct inet_connection_sock *icsk;
+ struct sock *sk = cur;
if (!sk) {
+get_head:
ilb = &tcp_hashinfo.listening_hash[st->bucket];
spin_lock_bh(&ilb->lock);
- sk = sk_nulls_head(&ilb->head);
+ sk = sk_head(&ilb->head);
st->offset = 0;
goto get_sk;
}
@@ -1863,28 +1870,20 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
++st->num;
++st->offset;
- sk = sk_nulls_next(sk);
+ sk = sk_next(sk);
get_sk:
- sk_nulls_for_each_from(sk, node) {
+ sk_for_each_from(sk) {
if (!net_eq(sock_net(sk), net))
continue;
- if (sk->sk_family == st->family) {
- cur = sk;
- goto out;
- }
+ if (sk->sk_family == st->family)
+ return sk;
icsk = inet_csk(sk);
}
spin_unlock_bh(&ilb->lock);
st->offset = 0;
- if (++st->bucket < INET_LHTABLE_SIZE) {
- ilb = &tcp_hashinfo.listening_hash[st->bucket];
- spin_lock_bh(&ilb->lock);
- sk = sk_nulls_head(&ilb->head);
- goto get_sk;
- }
- cur = NULL;
-out:
- return cur;
+ if (++st->bucket < INET_LHTABLE_SIZE)
+ goto get_head;
+ return NULL;
}
static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
@@ -2383,6 +2382,7 @@ static int __net_init tcp_sk_init(struct net *net)
IPPROTO_TCP, net);
if (res)
goto fail;
+ sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
*per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
}
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index acb366dd61e6..4c53e7c86586 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -704,7 +704,10 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
/* Out of window: send ACK and drop. */
- if (!(flg & TCP_FLAG_RST))
+ if (!(flg & TCP_FLAG_RST) &&
+ !tcp_oow_rate_limited(sock_net(sk), skb,
+ LINUX_MIB_TCPACKSKIPPEDSYNRECV,
+ &tcp_rsk(req)->last_oow_ack_time))
req->rsk_ops->send_ack(sk, skb, req);
if (paws_reject)
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 08eed5e16df0..3563788d064f 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -143,10 +143,9 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num,
unsigned int log)
{
struct sock *sk2;
- struct hlist_nulls_node *node;
kuid_t uid = sock_i_uid(sk);
- sk_nulls_for_each(sk2, node, &hslot->head) {
+ sk_for_each(sk2, &hslot->head) {
if (net_eq(sock_net(sk2), net) &&
sk2 != sk &&
(bitmap || udp_sk(sk2)->udp_port_hash == num) &&
@@ -177,12 +176,11 @@ static int udp_lib_lport_inuse2(struct net *net, __u16 num,
bool match_wildcard))
{
struct sock *sk2;
- struct hlist_nulls_node *node;
kuid_t uid = sock_i_uid(sk);
int res = 0;
spin_lock(&hslot2->lock);
- udp_portaddr_for_each_entry(sk2, node, &hslot2->head) {
+ udp_portaddr_for_each_entry(sk2, &hslot2->head) {
if (net_eq(sock_net(sk2), net) &&
sk2 != sk &&
(udp_sk(sk2)->udp_port_hash == num) &&
@@ -207,11 +205,10 @@ static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot,
bool match_wildcard))
{
struct net *net = sock_net(sk);
- struct hlist_nulls_node *node;
kuid_t uid = sock_i_uid(sk);
struct sock *sk2;
- sk_nulls_for_each(sk2, node, &hslot->head) {
+ sk_for_each(sk2, &hslot->head) {
if (net_eq(sock_net(sk2), net) &&
sk2 != sk &&
sk2->sk_family == sk->sk_family &&
@@ -333,17 +330,18 @@ found:
goto fail_unlock;
}
- sk_nulls_add_node_rcu(sk, &hslot->head);
+ sk_add_node_rcu(sk, &hslot->head);
hslot->count++;
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
spin_lock(&hslot2->lock);
- hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
+ hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
&hslot2->head);
hslot2->count++;
spin_unlock(&hslot2->lock);
}
+ sock_set_flag(sk, SOCK_RCU_FREE);
error = 0;
fail_unlock:
spin_unlock_bh(&hslot->lock);
@@ -497,37 +495,27 @@ static struct sock *udp4_lib_lookup2(struct net *net,
struct sk_buff *skb)
{
struct sock *sk, *result;
- struct hlist_nulls_node *node;
int score, badness, matches = 0, reuseport = 0;
- bool select_ok = true;
u32 hash = 0;
-begin:
result = NULL;
badness = 0;
- udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) {
+ udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
score = compute_score2(sk, net, saddr, sport,
daddr, hnum, dif);
if (score > badness) {
- result = sk;
- badness = score;
reuseport = sk->sk_reuseport;
if (reuseport) {
hash = udp_ehashfn(net, daddr, hnum,
saddr, sport);
- if (select_ok) {
- struct sock *sk2;
-
- sk2 = reuseport_select_sock(sk, hash, skb,
+ result = reuseport_select_sock(sk, hash, skb,
sizeof(struct udphdr));
- if (sk2) {
- result = sk2;
- select_ok = false;
- goto found;
- }
- }
+ if (result)
+ return result;
matches = 1;
}
+ badness = score;
+ result = sk;
} else if (score == badness && reuseport) {
matches++;
if (reciprocal_scale(hash, matches) == 0)
@@ -535,23 +523,6 @@ begin:
hash = next_pseudo_random32(hash);
}
}
- /*
- * if the nulls value we got at the end of this lookup is
- * not the expected one, we must restart lookup.
- * We probably met an item that was moved to another chain.
- */
- if (get_nulls_value(node) != slot2)
- goto begin;
- if (result) {
-found:
- if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
- result = NULL;
- else if (unlikely(compute_score2(result, net, saddr, sport,
- daddr, hnum, dif) < badness)) {
- sock_put(result);
- goto begin;
- }
- }
return result;
}
@@ -563,15 +534,12 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
int dif, struct udp_table *udptable, struct sk_buff *skb)
{
struct sock *sk, *result;
- struct hlist_nulls_node *node;
unsigned short hnum = ntohs(dport);
unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
int score, badness, matches = 0, reuseport = 0;
- bool select_ok = true;
u32 hash = 0;
- rcu_read_lock();
if (hslot->count > 10) {
hash2 = udp4_portaddr_hash(net, daddr, hnum);
slot2 = hash2 & udptable->mask;
@@ -593,35 +561,27 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
htonl(INADDR_ANY), hnum, dif,
hslot2, slot2, skb);
}
- rcu_read_unlock();
return result;
}
begin:
result = NULL;
badness = 0;
- sk_nulls_for_each_rcu(sk, node, &hslot->head) {
+ sk_for_each_rcu(sk, &hslot->head) {
score = compute_score(sk, net, saddr, hnum, sport,
daddr, dport, dif);
if (score > badness) {
- result = sk;
- badness = score;
reuseport = sk->sk_reuseport;
if (reuseport) {
hash = udp_ehashfn(net, daddr, hnum,
saddr, sport);
- if (select_ok) {
- struct sock *sk2;
-
- sk2 = reuseport_select_sock(sk, hash, skb,
+ result = reuseport_select_sock(sk, hash, skb,
sizeof(struct udphdr));
- if (sk2) {
- result = sk2;
- select_ok = false;
- goto found;
- }
- }
+ if (result)
+ return result;
matches = 1;
}
+ result = sk;
+ badness = score;
} else if (score == badness && reuseport) {
matches++;
if (reciprocal_scale(hash, matches) == 0)
@@ -629,25 +589,6 @@ begin:
hash = next_pseudo_random32(hash);
}
}
- /*
- * if the nulls value we got at the end of this lookup is
- * not the expected one, we must restart lookup.
- * We probably met an item that was moved to another chain.
- */
- if (get_nulls_value(node) != slot)
- goto begin;
-
- if (result) {
-found:
- if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
- result = NULL;
- else if (unlikely(compute_score(result, net, saddr, hnum, sport,
- daddr, dport, dif) < badness)) {
- sock_put(result);
- goto begin;
- }
- }
- rcu_read_unlock();
return result;
}
EXPORT_SYMBOL_GPL(__udp4_lib_lookup);
@@ -663,13 +604,37 @@ static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
udptable, skb);
}
+struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
+ __be16 sport, __be16 dport)
+{
+ const struct iphdr *iph = ip_hdr(skb);
+ const struct net_device *dev =
+ skb_dst(skb) ? skb_dst(skb)->dev : skb->dev;
+
+ return __udp4_lib_lookup(dev_net(dev), iph->saddr, sport,
+ iph->daddr, dport, inet_iif(skb),
+ &udp_table, skb);
+}
+EXPORT_SYMBOL_GPL(udp4_lib_lookup_skb);
+
+/* Must be called under rcu_read_lock().
+ * Does increment socket refcount.
+ */
+#if IS_ENABLED(CONFIG_NETFILTER_XT_MATCH_SOCKET) || \
+ IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TPROXY)
struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
__be32 daddr, __be16 dport, int dif)
{
- return __udp4_lib_lookup(net, saddr, sport, daddr, dport, dif,
- &udp_table, NULL);
+ struct sock *sk;
+
+ sk = __udp4_lib_lookup(net, saddr, sport, daddr, dport,
+ dif, &udp_table, NULL);
+ if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
+ sk = NULL;
+ return sk;
}
EXPORT_SYMBOL_GPL(udp4_lib_lookup);
+#endif
static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
__be16 loc_port, __be32 loc_addr,
@@ -771,7 +736,7 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
sk->sk_err = err;
sk->sk_error_report(sk);
out:
- sock_put(sk);
+ return;
}
void udp_err(struct sk_buff *skb, u32 info)
@@ -1027,15 +992,13 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
*/
connected = 1;
}
- ipc.addr = inet->inet_saddr;
+ ipc.sockc.tsflags = sk->sk_tsflags;
+ ipc.addr = inet->inet_saddr;
ipc.oif = sk->sk_bound_dev_if;
- sock_tx_timestamp(sk, &ipc.tx_flags);
-
if (msg->msg_controllen) {
- err = ip_cmsg_send(sock_net(sk), msg, &ipc,
- sk->sk_family == AF_INET6);
+ err = ip_cmsg_send(sk, msg, &ipc, sk->sk_family == AF_INET6);
if (unlikely(err)) {
kfree(ipc.opt);
return err;
@@ -1060,6 +1023,8 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
saddr = ipc.addr;
ipc.addr = faddr = daddr;
+ sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags);
+
if (ipc.opt && ipc.opt->opt.srr) {
if (!daddr)
return -EINVAL;
@@ -1342,7 +1307,7 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
struct sk_buff *skb;
unsigned int ulen, copied;
- int peeked, off = 0;
+ int peeked, peeking, off;
int err;
int is_udplite = IS_UDPLITE(sk);
bool checksum_valid = false;
@@ -1352,15 +1317,16 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
return ip_recv_error(sk, msg, len, addr_len);
try_again:
+ peeking = off = sk_peek_offset(sk, flags);
skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
&peeked, &off, &err);
if (!skb)
- goto out;
+ return err;
- ulen = skb->len - sizeof(struct udphdr);
+ ulen = skb->len;
copied = len;
- if (copied > ulen)
- copied = ulen;
+ if (copied > ulen - off)
+ copied = ulen - off;
else if (copied < ulen)
msg->msg_flags |= MSG_TRUNC;
@@ -1370,18 +1336,16 @@ try_again:
* coverage checksum (UDP-Lite), do it before the copy.
*/
- if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
+ if (copied < ulen || UDP_SKB_CB(skb)->partial_cov || peeking) {
checksum_valid = !udp_lib_checksum_complete(skb);
if (!checksum_valid)
goto csum_copy_err;
}
if (checksum_valid || skb_csum_unnecessary(skb))
- err = skb_copy_datagram_msg(skb, sizeof(struct udphdr),
- msg, copied);
+ err = skb_copy_datagram_msg(skb, off, msg, copied);
else {
- err = skb_copy_and_csum_datagram_msg(skb, sizeof(struct udphdr),
- msg);
+ err = skb_copy_and_csum_datagram_msg(skb, off, msg);
if (err == -EINVAL)
goto csum_copy_err;
@@ -1394,7 +1358,8 @@ try_again:
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_INERRORS, is_udplite);
}
- goto out_free;
+ skb_free_datagram_locked(sk, skb);
+ return err;
}
if (!peeked)
@@ -1418,9 +1383,7 @@ try_again:
if (flags & MSG_TRUNC)
err = ulen;
-out_free:
- skb_free_datagram_locked(sk, skb);
-out:
+ __skb_free_datagram_locked(sk, skb, peeking ? -err : err);
return err;
csum_copy_err:
@@ -1474,13 +1437,13 @@ void udp_lib_unhash(struct sock *sk)
spin_lock_bh(&hslot->lock);
if (rcu_access_pointer(sk->sk_reuseport_cb))
reuseport_detach_sock(sk);
- if (sk_nulls_del_node_init_rcu(sk)) {
+ if (sk_del_node_init_rcu(sk)) {
hslot->count--;
inet_sk(sk)->inet_num = 0;
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
spin_lock(&hslot2->lock);
- hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
+ hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
hslot2->count--;
spin_unlock(&hslot2->lock);
}
@@ -1513,12 +1476,12 @@ void udp_lib_rehash(struct sock *sk, u16 newhash)
if (hslot2 != nhslot2) {
spin_lock(&hslot2->lock);
- hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
+ hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
hslot2->count--;
spin_unlock(&hslot2->lock);
spin_lock(&nhslot2->lock);
- hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
+ hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
&nhslot2->head);
nhslot2->count++;
spin_unlock(&nhslot2->lock);
@@ -1548,7 +1511,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
sk_incoming_cpu_update(sk);
}
- rc = sock_queue_rcv_skb(sk, skb);
+ rc = __sock_queue_rcv_skb(sk, skb);
if (rc < 0) {
int is_udplite = IS_UDPLITE(sk);
@@ -1664,10 +1627,14 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
}
}
- if (rcu_access_pointer(sk->sk_filter) &&
- udp_lib_checksum_complete(skb))
- goto csum_error;
+ if (rcu_access_pointer(sk->sk_filter)) {
+ if (udp_lib_checksum_complete(skb))
+ goto csum_error;
+ if (sk_filter(sk, skb))
+ goto drop;
+ }
+ udp_csum_pull_header(skb);
if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
is_udplite);
@@ -1697,35 +1664,6 @@ drop:
return -1;
}
-static void flush_stack(struct sock **stack, unsigned int count,
- struct sk_buff *skb, unsigned int final)
-{
- unsigned int i;
- struct sk_buff *skb1 = NULL;
- struct sock *sk;
-
- for (i = 0; i < count; i++) {
- sk = stack[i];
- if (likely(!skb1))
- skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
-
- if (!skb1) {
- atomic_inc(&sk->sk_drops);
- UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
- IS_UDPLITE(sk));
- UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
- IS_UDPLITE(sk));
- }
-
- if (skb1 && udp_queue_rcv_skb(sk, skb1) <= 0)
- skb1 = NULL;
-
- sock_put(sk);
- }
- if (unlikely(skb1))
- kfree_skb(skb1);
-}
-
/* For TCP sockets, sk_rx_dst is protected by socket lock
* For UDP, we use xchg() to guard against concurrent changes.
*/
@@ -1749,14 +1687,14 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
struct udp_table *udptable,
int proto)
{
- struct sock *sk, *stack[256 / sizeof(struct sock *)];
- struct hlist_nulls_node *node;
+ struct sock *sk, *first = NULL;
unsigned short hnum = ntohs(uh->dest);
struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
- int dif = skb->dev->ifindex;
- unsigned int count = 0, offset = offsetof(typeof(*sk), sk_nulls_node);
unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
- bool inner_flushed = false;
+ unsigned int offset = offsetof(typeof(*sk), sk_node);
+ int dif = skb->dev->ifindex;
+ struct hlist_node *node;
+ struct sk_buff *nskb;
if (use_hash2) {
hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) &
@@ -1767,23 +1705,28 @@ start_lookup:
offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
}
- spin_lock(&hslot->lock);
- sk_nulls_for_each_entry_offset(sk, node, &hslot->head, offset) {
- if (__udp_is_mcast_sock(net, sk,
- uh->dest, daddr,
- uh->source, saddr,
- dif, hnum)) {
- if (unlikely(count == ARRAY_SIZE(stack))) {
- flush_stack(stack, count, skb, ~0);
- inner_flushed = true;
- count = 0;
- }
- stack[count++] = sk;
- sock_hold(sk);
+ sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
+ if (!__udp_is_mcast_sock(net, sk, uh->dest, daddr,
+ uh->source, saddr, dif, hnum))
+ continue;
+
+ if (!first) {
+ first = sk;
+ continue;
}
- }
+ nskb = skb_clone(skb, GFP_ATOMIC);
- spin_unlock(&hslot->lock);
+ if (unlikely(!nskb)) {
+ atomic_inc(&sk->sk_drops);
+ UDP_INC_STATS_BH(net, UDP_MIB_RCVBUFERRORS,
+ IS_UDPLITE(sk));
+ UDP_INC_STATS_BH(net, UDP_MIB_INERRORS,
+ IS_UDPLITE(sk));
+ continue;
+ }
+ if (udp_queue_rcv_skb(sk, nskb) > 0)
+ consume_skb(nskb);
+ }
/* Also lookup *:port if we are using hash2 and haven't done so yet. */
if (use_hash2 && hash2 != hash2_any) {
@@ -1791,16 +1734,13 @@ start_lookup:
goto start_lookup;
}
- /*
- * do the slow work with no lock held
- */
- if (count) {
- flush_stack(stack, count, skb, count - 1);
+ if (first) {
+ if (udp_queue_rcv_skb(first, skb) > 0)
+ consume_skb(skb);
} else {
- if (!inner_flushed)
- UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
- proto == IPPROTO_UDPLITE);
- consume_skb(skb);
+ kfree_skb(skb);
+ UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
+ proto == IPPROTO_UDPLITE);
}
return 0;
}
@@ -1897,7 +1837,6 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
inet_compute_pseudo);
ret = udp_queue_rcv_skb(sk, skb);
- sock_put(sk);
/* a return value > 0 means to resubmit the input, but
* it wants the return to be -protocol, or 0
@@ -1958,49 +1897,24 @@ static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net,
int dif)
{
struct sock *sk, *result;
- struct hlist_nulls_node *node;
unsigned short hnum = ntohs(loc_port);
- unsigned int count, slot = udp_hashfn(net, hnum, udp_table.mask);
+ unsigned int slot = udp_hashfn(net, hnum, udp_table.mask);
struct udp_hslot *hslot = &udp_table.hash[slot];
/* Do not bother scanning a too big list */
if (hslot->count > 10)
return NULL;
- rcu_read_lock();
-begin:
- count = 0;
result = NULL;
- sk_nulls_for_each_rcu(sk, node, &hslot->head) {
- if (__udp_is_mcast_sock(net, sk,
- loc_port, loc_addr,
- rmt_port, rmt_addr,
- dif, hnum)) {
+ sk_for_each_rcu(sk, &hslot->head) {
+ if (__udp_is_mcast_sock(net, sk, loc_port, loc_addr,
+ rmt_port, rmt_addr, dif, hnum)) {
+ if (result)
+ return NULL;
result = sk;
- ++count;
- }
- }
- /*
- * if the nulls value we got at the end of this lookup is
- * not the expected one, we must restart lookup.
- * We probably met an item that was moved to another chain.
- */
- if (get_nulls_value(node) != slot)
- goto begin;
-
- if (result) {
- if (count != 1 ||
- unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
- result = NULL;
- else if (unlikely(!__udp_is_mcast_sock(net, result,
- loc_port, loc_addr,
- rmt_port, rmt_addr,
- dif, hnum))) {
- sock_put(result);
- result = NULL;
}
}
- rcu_read_unlock();
+
return result;
}
@@ -2013,37 +1927,22 @@ static struct sock *__udp4_lib_demux_lookup(struct net *net,
__be16 rmt_port, __be32 rmt_addr,
int dif)
{
- struct sock *sk, *result;
- struct hlist_nulls_node *node;
unsigned short hnum = ntohs(loc_port);
unsigned int hash2 = udp4_portaddr_hash(net, loc_addr, hnum);
unsigned int slot2 = hash2 & udp_table.mask;
struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr);
const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
+ struct sock *sk;
- rcu_read_lock();
- result = NULL;
- udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) {
- if (INET_MATCH(sk, net, acookie,
- rmt_addr, loc_addr, ports, dif))
- result = sk;
+ udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
+ if (INET_MATCH(sk, net, acookie, rmt_addr,
+ loc_addr, ports, dif))
+ return sk;
/* Only check first socket in chain */
break;
}
-
- if (result) {
- if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
- result = NULL;
- else if (unlikely(!INET_MATCH(sk, net, acookie,
- rmt_addr, loc_addr,
- ports, dif))) {
- sock_put(result);
- result = NULL;
- }
- }
- rcu_read_unlock();
- return result;
+ return NULL;
}
void udp_v4_early_demux(struct sk_buff *skb)
@@ -2051,7 +1950,7 @@ void udp_v4_early_demux(struct sk_buff *skb)
struct net *net = dev_net(skb->dev);
const struct iphdr *iph;
const struct udphdr *uh;
- struct sock *sk;
+ struct sock *sk = NULL;
struct dst_entry *dst;
int dif = skb->dev->ifindex;
int ours;
@@ -2083,11 +1982,9 @@ void udp_v4_early_demux(struct sk_buff *skb)
} else if (skb->pkt_type == PACKET_HOST) {
sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr,
uh->source, iph->saddr, dif);
- } else {
- return;
}
- if (!sk)
+ if (!sk || !atomic_inc_not_zero_hint(&sk->sk_refcnt, 2))
return;
skb->sk = sk;
@@ -2387,14 +2284,13 @@ static struct sock *udp_get_first(struct seq_file *seq, int start)
for (state->bucket = start; state->bucket <= state->udp_table->mask;
++state->bucket) {
- struct hlist_nulls_node *node;
struct udp_hslot *hslot = &state->udp_table->hash[state->bucket];
- if (hlist_nulls_empty(&hslot->head))
+ if (hlist_empty(&hslot->head))
continue;
spin_lock_bh(&hslot->lock);
- sk_nulls_for_each(sk, node, &hslot->head) {
+ sk_for_each(sk, &hslot->head) {
if (!net_eq(sock_net(sk), net))
continue;
if (sk->sk_family == state->family)
@@ -2413,7 +2309,7 @@ static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
struct net *net = seq_file_net(seq);
do {
- sk = sk_nulls_next(sk);
+ sk = sk_next(sk);
} while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family));
if (!sk) {
@@ -2622,12 +2518,12 @@ void __init udp_table_init(struct udp_table *table, const char *name)
table->hash2 = table->hash + (table->mask + 1);
for (i = 0; i <= table->mask; i++) {
- INIT_HLIST_NULLS_HEAD(&table->hash[i].head, i);
+ INIT_HLIST_HEAD(&table->hash[i].head);
table->hash[i].count = 0;
spin_lock_init(&table->hash[i].lock);
}
for (i = 0; i <= table->mask; i++) {
- INIT_HLIST_NULLS_HEAD(&table->hash2[i].head, i);
+ INIT_HLIST_HEAD(&table->hash2[i].head);
table->hash2[i].count = 0;
spin_lock_init(&table->hash2[i].lock);
}
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
index df1966f3b6ec..3d5ccf4b1412 100644
--- a/net/ipv4/udp_diag.c
+++ b/net/ipv4/udp_diag.c
@@ -36,10 +36,11 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
const struct inet_diag_req_v2 *req)
{
int err = -EINVAL;
- struct sock *sk;
+ struct sock *sk = NULL;
struct sk_buff *rep;
struct net *net = sock_net(in_skb->sk);
+ rcu_read_lock();
if (req->sdiag_family == AF_INET)
sk = __udp4_lib_lookup(net,
req->id.idiag_src[0], req->id.idiag_sport,
@@ -54,9 +55,9 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
req->id.idiag_dport,
req->id.idiag_if, tbl, NULL);
#endif
- else
- goto out_nosk;
-
+ if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
+ sk = NULL;
+ rcu_read_unlock();
err = -ENOENT;
if (!sk)
goto out_nosk;
@@ -96,24 +97,23 @@ static void udp_dump(struct udp_table *table, struct sk_buff *skb,
struct netlink_callback *cb,
const struct inet_diag_req_v2 *r, struct nlattr *bc)
{
- int num, s_num, slot, s_slot;
struct net *net = sock_net(skb->sk);
+ int num, s_num, slot, s_slot;
s_slot = cb->args[0];
num = s_num = cb->args[1];
for (slot = s_slot; slot <= table->mask; s_num = 0, slot++) {
- struct sock *sk;
- struct hlist_nulls_node *node;
struct udp_hslot *hslot = &table->hash[slot];
+ struct sock *sk;
num = 0;
- if (hlist_nulls_empty(&hslot->head))
+ if (hlist_empty(&hslot->head))
continue;
spin_lock_bh(&hslot->lock);
- sk_nulls_for_each(sk, node, &hslot->head) {
+ sk_for_each(sk, &hslot->head) {
struct inet_sock *inet = inet_sk(sk);
if (!net_eq(sock_net(sk), net))
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 0ed2dafb7cc4..6230cf4b0d2d 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -14,18 +14,6 @@
#include <net/udp.h>
#include <net/protocol.h>
-static DEFINE_SPINLOCK(udp_offload_lock);
-static struct udp_offload_priv __rcu *udp_offload_base __read_mostly;
-
-#define udp_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&udp_offload_lock))
-
-struct udp_offload_priv {
- struct udp_offload *offload;
- possible_net_t net;
- struct rcu_head rcu;
- struct udp_offload_priv __rcu *next;
-};
-
static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
netdev_features_t features,
struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb,
@@ -179,6 +167,7 @@ out_unlock:
return segs;
}
+EXPORT_SYMBOL(skb_udp_tunnel_segment);
static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
netdev_features_t features)
@@ -253,64 +242,14 @@ out:
return segs;
}
-int udp_add_offload(struct net *net, struct udp_offload *uo)
-{
- struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_ATOMIC);
-
- if (!new_offload)
- return -ENOMEM;
-
- write_pnet(&new_offload->net, net);
- new_offload->offload = uo;
-
- spin_lock(&udp_offload_lock);
- new_offload->next = udp_offload_base;
- rcu_assign_pointer(udp_offload_base, new_offload);
- spin_unlock(&udp_offload_lock);
-
- return 0;
-}
-EXPORT_SYMBOL(udp_add_offload);
-
-static void udp_offload_free_routine(struct rcu_head *head)
-{
- struct udp_offload_priv *ou_priv = container_of(head, struct udp_offload_priv, rcu);
- kfree(ou_priv);
-}
-
-void udp_del_offload(struct udp_offload *uo)
-{
- struct udp_offload_priv __rcu **head = &udp_offload_base;
- struct udp_offload_priv *uo_priv;
-
- spin_lock(&udp_offload_lock);
-
- uo_priv = udp_deref_protected(*head);
- for (; uo_priv != NULL;
- uo_priv = udp_deref_protected(*head)) {
- if (uo_priv->offload == uo) {
- rcu_assign_pointer(*head,
- udp_deref_protected(uo_priv->next));
- goto unlock;
- }
- head = &uo_priv->next;
- }
- pr_warn("udp_del_offload: didn't find offload for port %d\n", ntohs(uo->port));
-unlock:
- spin_unlock(&udp_offload_lock);
- if (uo_priv)
- call_rcu(&uo_priv->rcu, udp_offload_free_routine);
-}
-EXPORT_SYMBOL(udp_del_offload);
-
struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
- struct udphdr *uh)
+ struct udphdr *uh, udp_lookup_t lookup)
{
- struct udp_offload_priv *uo_priv;
struct sk_buff *p, **pp = NULL;
struct udphdr *uh2;
unsigned int off = skb_gro_offset(skb);
int flush = 1;
+ struct sock *sk;
if (NAPI_GRO_CB(skb)->encap_mark ||
(skb->ip_summed != CHECKSUM_PARTIAL &&
@@ -322,13 +261,10 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
NAPI_GRO_CB(skb)->encap_mark = 1;
rcu_read_lock();
- uo_priv = rcu_dereference(udp_offload_base);
- for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) {
- if (net_eq(read_pnet(&uo_priv->net), dev_net(skb->dev)) &&
- uo_priv->offload->port == uh->dest &&
- uo_priv->offload->callbacks.gro_receive)
- goto unflush;
- }
+ sk = (*lookup)(skb, uh->source, uh->dest);
+
+ if (sk && udp_sk(sk)->gro_receive)
+ goto unflush;
goto out_unlock;
unflush:
@@ -352,9 +288,7 @@ unflush:
skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
- NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
- pp = uo_priv->offload->callbacks.gro_receive(head, skb,
- uo_priv->offload);
+ pp = udp_sk(sk)->gro_receive(sk, head, skb);
out_unlock:
rcu_read_unlock();
@@ -362,6 +296,7 @@ out:
NAPI_GRO_CB(skb)->flush |= flush;
return pp;
}
+EXPORT_SYMBOL(udp_gro_receive);
static struct sk_buff **udp4_gro_receive(struct sk_buff **head,
struct sk_buff *skb)
@@ -383,39 +318,28 @@ static struct sk_buff **udp4_gro_receive(struct sk_buff **head,
inet_gro_compute_pseudo);
skip:
NAPI_GRO_CB(skb)->is_ipv6 = 0;
- return udp_gro_receive(head, skb, uh);
+ return udp_gro_receive(head, skb, uh, udp4_lib_lookup_skb);
flush:
NAPI_GRO_CB(skb)->flush = 1;
return NULL;
}
-int udp_gro_complete(struct sk_buff *skb, int nhoff)
+int udp_gro_complete(struct sk_buff *skb, int nhoff,
+ udp_lookup_t lookup)
{
- struct udp_offload_priv *uo_priv;
__be16 newlen = htons(skb->len - nhoff);
struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
int err = -ENOSYS;
+ struct sock *sk;
uh->len = newlen;
rcu_read_lock();
-
- uo_priv = rcu_dereference(udp_offload_base);
- for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) {
- if (net_eq(read_pnet(&uo_priv->net), dev_net(skb->dev)) &&
- uo_priv->offload->port == uh->dest &&
- uo_priv->offload->callbacks.gro_complete)
- break;
- }
-
- if (uo_priv) {
- NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
- err = uo_priv->offload->callbacks.gro_complete(skb,
- nhoff + sizeof(struct udphdr),
- uo_priv->offload);
- }
-
+ sk = (*lookup)(skb, uh->source, uh->dest);
+ if (sk && udp_sk(sk)->gro_complete)
+ err = udp_sk(sk)->gro_complete(sk, skb,
+ nhoff + sizeof(struct udphdr));
rcu_read_unlock();
if (skb->remcsum_offload)
@@ -426,6 +350,7 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff)
return err;
}
+EXPORT_SYMBOL(udp_gro_complete);
static int udp4_gro_complete(struct sk_buff *skb, int nhoff)
{
@@ -440,7 +365,7 @@ static int udp4_gro_complete(struct sk_buff *skb, int nhoff)
skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
}
- return udp_gro_complete(skb, nhoff);
+ return udp_gro_complete(skb, nhoff, udp4_lib_lookup_skb);
}
static const struct net_offload udpv4_offload = {
diff --git a/net/ipv4/udp_tunnel.c b/net/ipv4/udp_tunnel.c
index 96599d1a1318..47f12c73d959 100644
--- a/net/ipv4/udp_tunnel.c
+++ b/net/ipv4/udp_tunnel.c
@@ -69,6 +69,8 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
udp_sk(sk)->encap_type = cfg->encap_type;
udp_sk(sk)->encap_rcv = cfg->encap_rcv;
udp_sk(sk)->encap_destroy = cfg->encap_destroy;
+ udp_sk(sk)->gro_receive = cfg->gro_receive;
+ udp_sk(sk)->gro_complete = cfg->gro_complete;
udp_tunnel_encap_enable(sock);
}
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index 2fbd90bf8d33..5e9d6bf4aaca 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -8,9 +8,10 @@ ipv6-objs := af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o \
addrlabel.o \
route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o udplite.o \
raw.o icmp.o mcast.o reassembly.o tcp_ipv6.o ping.o \
- exthdrs.o datagram.o ip6_flowlabel.o inet6_connection_sock.o
+ exthdrs.o datagram.o ip6_flowlabel.o inet6_connection_sock.o \
+ udp_offload.o
-ipv6-offload := ip6_offload.o tcpv6_offload.o udp_offload.o exthdrs_offload.o
+ipv6-offload := ip6_offload.o tcpv6_offload.o exthdrs_offload.o
ipv6-$(CONFIG_SYSCTL) = sysctl_net_ipv6.o
ipv6-$(CONFIG_IPV6_MROUTE) += ip6mr.o
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index b11c37cfd67c..bfa86f040c16 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -64,6 +64,8 @@
#include <asm/uaccess.h>
#include <linux/mroute6.h>
+#include "ip6_offload.h"
+
MODULE_AUTHOR("Cast of dozens");
MODULE_DESCRIPTION("IPv6 protocol stack for Linux");
MODULE_LICENSE("GPL");
@@ -561,6 +563,7 @@ const struct proto_ops inet6_dgram_ops = {
.recvmsg = inet_recvmsg, /* ok */
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
+ .set_peek_off = sk_set_peek_off,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_sock_common_setsockopt,
.compat_getsockopt = compat_sock_common_getsockopt,
@@ -958,6 +961,10 @@ static int __init inet6_init(void)
if (err)
goto udplitev6_fail;
+ err = udpv6_offload_init();
+ if (err)
+ goto udpv6_offload_fail;
+
err = tcpv6_init();
if (err)
goto tcpv6_fail;
@@ -987,6 +994,8 @@ pingv6_fail:
ipv6_packet_fail:
tcpv6_exit();
tcpv6_fail:
+ udpv6_offload_exit();
+udpv6_offload_fail:
udplitev6_exit();
udplitev6_fail:
udpv6_exit();
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 428162155280..a73d70119fcd 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -685,7 +685,8 @@ EXPORT_SYMBOL_GPL(ip6_datagram_recv_ctl);
int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
struct msghdr *msg, struct flowi6 *fl6,
struct ipv6_txoptions *opt,
- int *hlimit, int *tclass, int *dontfrag)
+ int *hlimit, int *tclass, int *dontfrag,
+ struct sockcm_cookie *sockc)
{
struct in6_pktinfo *src_info;
struct cmsghdr *cmsg;
@@ -702,6 +703,12 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
goto exit_f;
}
+ if (cmsg->cmsg_level == SOL_SOCKET) {
+ if (__sock_cmsg_send(sk, msg, cmsg, sockc))
+ return -EINVAL;
+ continue;
+ }
+
if (cmsg->cmsg_level != SOL_IPV6)
continue;
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 0a37ddc7af51..6b573ebe49de 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -400,6 +400,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
struct icmp6hdr tmp_hdr;
struct flowi6 fl6;
struct icmpv6_msg msg;
+ struct sockcm_cookie sockc_unused = {0};
int iif = 0;
int addr_type = 0;
int len;
@@ -527,7 +528,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
len + sizeof(struct icmp6hdr),
sizeof(struct icmp6hdr), hlimit,
np->tclass, NULL, &fl6, (struct rt6_info *)dst,
- MSG_DONTWAIT, np->dontfrag);
+ MSG_DONTWAIT, np->dontfrag, &sockc_unused);
if (err) {
ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS);
ip6_flush_pending_frames(sk);
@@ -566,6 +567,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
int hlimit;
u8 tclass;
u32 mark = IP6_REPLY_MARK(net, skb->mark);
+ struct sockcm_cookie sockc_unused = {0};
saddr = &ipv6_hdr(skb)->daddr;
@@ -617,7 +619,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr),
sizeof(struct icmp6hdr), hlimit, tclass, NULL, &fl6,
(struct rt6_info *)dst, MSG_DONTWAIT,
- np->dontfrag);
+ np->dontfrag, &sockc_unused);
if (err) {
ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
index 295ca29a23c3..0b03533453e4 100644
--- a/net/ipv6/ila/ila_xlat.c
+++ b/net/ipv6/ila/ila_xlat.c
@@ -501,7 +501,8 @@ static int ila_nl_dump_start(struct netlink_callback *cb)
struct ila_net *ilan = net_generic(net, ila_net_id);
struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args;
- return rhashtable_walk_init(&ilan->rhash_table, &iter->rhiter);
+ return rhashtable_walk_init(&ilan->rhash_table, &iter->rhiter,
+ GFP_KERNEL);
}
static int ila_nl_dump_done(struct netlink_callback *cb)
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 70f2628be6fa..f1678388fb0d 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -69,7 +69,6 @@ struct sock *__inet6_lookup_established(struct net *net,
struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
- rcu_read_lock();
begin:
sk_nulls_for_each_rcu(sk, node, &head->chain) {
if (sk->sk_hash != hash)
@@ -90,7 +89,6 @@ begin:
out:
sk = NULL;
found:
- rcu_read_unlock();
return sk;
}
EXPORT_SYMBOL(__inet6_lookup_established);
@@ -122,6 +120,7 @@ static inline int compute_score(struct sock *sk, struct net *net,
return score;
}
+/* called with rcu_read_lock() */
struct sock *inet6_lookup_listener(struct net *net,
struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
@@ -129,39 +128,27 @@ struct sock *inet6_lookup_listener(struct net *net,
const __be16 sport, const struct in6_addr *daddr,
const unsigned short hnum, const int dif)
{
- struct sock *sk;
- const struct hlist_nulls_node *node;
- struct sock *result;
- int score, hiscore, matches = 0, reuseport = 0;
- bool select_ok = true;
- u32 phash = 0;
unsigned int hash = inet_lhashfn(net, hnum);
struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash];
+ int score, hiscore = 0, matches = 0, reuseport = 0;
+ struct sock *sk, *result = NULL;
+ u32 phash = 0;
- rcu_read_lock();
-begin:
- result = NULL;
- hiscore = 0;
- sk_nulls_for_each(sk, node, &ilb->head) {
+ sk_for_each(sk, &ilb->head) {
score = compute_score(sk, net, hnum, daddr, dif);
if (score > hiscore) {
- hiscore = score;
- result = sk;
reuseport = sk->sk_reuseport;
if (reuseport) {
phash = inet6_ehashfn(net, daddr, hnum,
saddr, sport);
- if (select_ok) {
- struct sock *sk2;
- sk2 = reuseport_select_sock(sk, phash,
- skb, doff);
- if (sk2) {
- result = sk2;
- goto found;
- }
- }
+ result = reuseport_select_sock(sk, phash,
+ skb, doff);
+ if (result)
+ return result;
matches = 1;
}
+ result = sk;
+ hiscore = score;
} else if (score == hiscore && reuseport) {
matches++;
if (reciprocal_scale(phash, matches) == 0)
@@ -169,25 +156,6 @@ begin:
phash = next_pseudo_random32(phash);
}
}
- /*
- * if the nulls value we got at the end of this lookup is
- * not the expected one, we must restart lookup.
- * We probably met an item that was moved to another chain.
- */
- if (get_nulls_value(node) != hash + LISTENING_NULLS_BASE)
- goto begin;
- if (result) {
-found:
- if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
- result = NULL;
- else if (unlikely(compute_score(result, net, hnum, daddr,
- dif) < hiscore)) {
- sock_put(result);
- select_ok = false;
- goto begin;
- }
- }
- rcu_read_unlock();
return result;
}
EXPORT_SYMBOL_GPL(inet6_lookup_listener);
@@ -199,12 +167,12 @@ struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
const int dif)
{
struct sock *sk;
+ bool refcounted;
- local_bh_disable();
sk = __inet6_lookup(net, hashinfo, skb, doff, saddr, sport, daddr,
- ntohs(dport), dif);
- local_bh_enable();
-
+ ntohs(dport), dif, &refcounted);
+ if (sk && !refcounted && !atomic_inc_not_zero(&sk->sk_refcnt))
+ sk = NULL;
return sk;
}
EXPORT_SYMBOL_GPL(inet6_lookup);
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index dc2db4f7b182..35d3ddc328f8 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -372,6 +372,7 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
if (olen > 0) {
struct msghdr msg;
struct flowi6 flowi6;
+ struct sockcm_cookie sockc_junk;
int junk;
err = -ENOMEM;
@@ -390,7 +391,7 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
memset(&flowi6, 0, sizeof(flowi6));
err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt,
- &junk, &junk, &junk);
+ &junk, &junk, &junk, &sockc_junk);
if (err)
goto done;
err = -EINVAL;
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 82e9f3076028..204af2219471 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -325,8 +325,6 @@ static int __init ipv6_offload_init(void)
if (tcpv6_offload_init() < 0)
pr_crit("%s: Cannot add TCP protocol offload\n", __func__);
- if (udp_offload_init() < 0)
- pr_crit("%s: Cannot add UDP protocol offload\n", __func__);
if (ipv6_exthdrs_offload_init() < 0)
pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__);
diff --git a/net/ipv6/ip6_offload.h b/net/ipv6/ip6_offload.h
index 2e155c651b35..96b40e41ac53 100644
--- a/net/ipv6/ip6_offload.h
+++ b/net/ipv6/ip6_offload.h
@@ -12,7 +12,8 @@
#define __ip6_offload_h
int ipv6_exthdrs_offload_init(void);
-int udp_offload_init(void);
+int udpv6_offload_init(void);
+int udpv6_offload_exit(void);
int tcpv6_offload_init(void);
#endif
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 9428345d3a07..171518e3ca21 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1090,8 +1090,8 @@ static inline int ip6_ufo_append_data(struct sock *sk,
int getfrag(void *from, char *to, int offset, int len,
int odd, struct sk_buff *skb),
void *from, int length, int hh_len, int fragheaderlen,
- int transhdrlen, int mtu, unsigned int flags,
- const struct flowi6 *fl6)
+ int exthdrlen, int transhdrlen, int mtu,
+ unsigned int flags, const struct flowi6 *fl6)
{
struct sk_buff *skb;
@@ -1116,7 +1116,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
skb_put(skb, fragheaderlen + transhdrlen);
/* initialize network header pointer */
- skb_reset_network_header(skb);
+ skb_set_network_header(skb, exthdrlen);
/* initialize protocol header pointer */
skb->transport_header = skb->network_header + fragheaderlen;
@@ -1258,7 +1258,8 @@ static int __ip6_append_data(struct sock *sk,
int getfrag(void *from, char *to, int offset,
int len, int odd, struct sk_buff *skb),
void *from, int length, int transhdrlen,
- unsigned int flags, int dontfrag)
+ unsigned int flags, int dontfrag,
+ const struct sockcm_cookie *sockc)
{
struct sk_buff *skb, *skb_prev = NULL;
unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
@@ -1329,7 +1330,7 @@ emsgsize:
csummode = CHECKSUM_PARTIAL;
if (sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_RAW) {
- sock_tx_timestamp(sk, &tx_flags);
+ sock_tx_timestamp(sk, sockc->tsflags, &tx_flags);
if (tx_flags & SKBTX_ANY_SW_TSTAMP &&
sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
tskey = sk->sk_tskey++;
@@ -1358,7 +1359,7 @@ emsgsize:
(rt->dst.dev->features & NETIF_F_UFO) &&
(sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
- hh_len, fragheaderlen,
+ hh_len, fragheaderlen, exthdrlen,
transhdrlen, mtu, flags, fl6);
if (err)
goto error;
@@ -1565,7 +1566,8 @@ int ip6_append_data(struct sock *sk,
int odd, struct sk_buff *skb),
void *from, int length, int transhdrlen, int hlimit,
int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
- struct rt6_info *rt, unsigned int flags, int dontfrag)
+ struct rt6_info *rt, unsigned int flags, int dontfrag,
+ const struct sockcm_cookie *sockc)
{
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
@@ -1593,7 +1595,8 @@ int ip6_append_data(struct sock *sk,
return __ip6_append_data(sk, fl6, &sk->sk_write_queue, &inet->cork.base,
&np->cork, sk_page_frag(sk), getfrag,
- from, length, transhdrlen, flags, dontfrag);
+ from, length, transhdrlen, flags, dontfrag,
+ sockc);
}
EXPORT_SYMBOL_GPL(ip6_append_data);
@@ -1752,7 +1755,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
int hlimit, int tclass,
struct ipv6_txoptions *opt, struct flowi6 *fl6,
struct rt6_info *rt, unsigned int flags,
- int dontfrag)
+ int dontfrag, const struct sockcm_cookie *sockc)
{
struct inet_cork_full cork;
struct inet6_cork v6_cork;
@@ -1779,7 +1782,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
err = __ip6_append_data(sk, fl6, &queue, &cork.base, &v6_cork,
&current->task_frag, getfrag, from,
length + exthdrlen, transhdrlen + exthdrlen,
- flags, dontfrag);
+ flags, dontfrag, sockc);
if (err) {
__ip6_flush_pending_frames(sk, &queue, &cork, &v6_cork);
return ERR_PTR(err);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index eb2ac4bb09ce..1f20345cbc97 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -252,12 +252,12 @@ static int ip6_tnl_create2(struct net_device *dev)
t = netdev_priv(dev);
+ dev->rtnl_link_ops = &ip6_link_ops;
err = register_netdevice(dev);
if (err < 0)
goto out;
strcpy(t->parms.name, dev->name);
- dev->rtnl_link_ops = &ip6_link_ops;
dev_hold(dev);
ip6_tnl_link(ip6n, t);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 4449ad1f8114..4ff4b29894eb 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -407,7 +407,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW))
break;
- opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
+ opt = rcu_dereference_protected(np->opt,
+ lockdep_sock_is_held(sk));
opt = ipv6_renew_options(sk, opt, optname,
(struct ipv6_opt_hdr __user *)optval,
optlen);
@@ -471,6 +472,7 @@ sticky_done:
struct ipv6_txoptions *opt = NULL;
struct msghdr msg;
struct flowi6 fl6;
+ struct sockcm_cookie sockc_junk;
int junk;
memset(&fl6, 0, sizeof(fl6));
@@ -503,7 +505,7 @@ sticky_done:
msg.msg_control = (void *)(opt+1);
retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk,
- &junk, &junk);
+ &junk, &junk, &sockc_junk);
if (retv)
goto done;
update:
@@ -1123,7 +1125,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
struct ipv6_txoptions *opt;
lock_sock(sk);
- opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
+ opt = rcu_dereference_protected(np->opt,
+ lockdep_sock_is_held(sk));
len = ipv6_getsockopt_sticky(sk, opt, optname, optval, len);
release_sock(sk);
/* check if ipv6_getsockopt_sticky() returns err code */
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 84f9baf7aee8..86b67b70b626 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -198,11 +198,12 @@ get_entry(const void *base, unsigned int offset)
/* All zeroes == unconditional rule. */
/* Mildly perf critical (only if packet tracing is on) */
-static inline bool unconditional(const struct ip6t_ip6 *ipv6)
+static inline bool unconditional(const struct ip6t_entry *e)
{
static const struct ip6t_ip6 uncond;
- return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
+ return e->target_offset == sizeof(struct ip6t_entry) &&
+ memcmp(&e->ipv6, &uncond, sizeof(uncond)) == 0;
}
static inline const struct xt_entry_target *
@@ -258,11 +259,10 @@ get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
} else if (s == e) {
(*rulenum)++;
- if (s->target_offset == sizeof(struct ip6t_entry) &&
+ if (unconditional(s) &&
strcmp(t->target.u.kernel.target->name,
XT_STANDARD_TARGET) == 0 &&
- t->verdict < 0 &&
- unconditional(&s->ipv6)) {
+ t->verdict < 0) {
/* Tail of chains: STANDARD target (return/policy) */
*comment = *chainname == hookname
? comments[NF_IP6_TRACE_COMMENT_POLICY]
@@ -488,11 +488,10 @@ mark_source_chains(const struct xt_table_info *newinfo,
e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
/* Unconditional return/END. */
- if ((e->target_offset == sizeof(struct ip6t_entry) &&
+ if ((unconditional(e) &&
(strcmp(t->target.u.user.name,
XT_STANDARD_TARGET) == 0) &&
- t->verdict < 0 &&
- unconditional(&e->ipv6)) || visited) {
+ t->verdict < 0) || visited) {
unsigned int oldpos, size;
if ((strcmp(t->target.u.user.name,
@@ -581,14 +580,12 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net)
}
static int
-check_entry(const struct ip6t_entry *e, const char *name)
+check_entry(const struct ip6t_entry *e)
{
const struct xt_entry_target *t;
- if (!ip6_checkentry(&e->ipv6)) {
- duprintf("ip_tables: ip check failed %p %s.\n", e, name);
+ if (!ip6_checkentry(&e->ipv6))
return -EINVAL;
- }
if (e->target_offset + sizeof(struct xt_entry_target) >
e->next_offset)
@@ -679,10 +676,6 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
struct xt_mtchk_param mtpar;
struct xt_entry_match *ematch;
- ret = check_entry(e, name);
- if (ret)
- return ret;
-
e->counters.pcnt = xt_percpu_counter_alloc();
if (IS_ERR_VALUE(e->counters.pcnt))
return -ENOMEM;
@@ -733,7 +726,7 @@ static bool check_underflow(const struct ip6t_entry *e)
const struct xt_entry_target *t;
unsigned int verdict;
- if (!unconditional(&e->ipv6))
+ if (!unconditional(e))
return false;
t = ip6t_get_target_c(e);
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
@@ -753,9 +746,11 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
unsigned int valid_hooks)
{
unsigned int h;
+ int err;
if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
- (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
+ (unsigned char *)e + sizeof(struct ip6t_entry) >= limit ||
+ (unsigned char *)e + e->next_offset > limit) {
duprintf("Bad offset %p\n", e);
return -EINVAL;
}
@@ -767,6 +762,10 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
return -EINVAL;
}
+ err = check_entry(e);
+ if (err)
+ return err;
+
/* Check hooks & underflows */
for (h = 0; h < NF_INET_NUMHOOKS; h++) {
if (!(valid_hooks & (1 << h)))
@@ -775,9 +774,9 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
newinfo->hook_entry[h] = hook_entries[h];
if ((unsigned char *)e - base == underflows[h]) {
if (!check_underflow(e)) {
- pr_err("Underflows must be unconditional and "
- "use the STANDARD target with "
- "ACCEPT/DROP\n");
+ pr_debug("Underflows must be unconditional and "
+ "use the STANDARD target with "
+ "ACCEPT/DROP\n");
return -EINVAL;
}
newinfo->underflow[h] = underflows[h];
@@ -1169,6 +1168,7 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
*len, sizeof(get) + get.size);
return -EINVAL;
}
+ get.name[sizeof(get.name) - 1] = '\0';
t = xt_find_table_lock(net, AF_INET6, get.name);
if (!IS_ERR_OR_NULL(t)) {
@@ -1505,7 +1505,8 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
duprintf("check_compat_entry_size_and_hooks %p\n", e);
if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
- (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
+ (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit ||
+ (unsigned char *)e + e->next_offset > limit) {
duprintf("Bad offset %p, limit = %p\n", e, limit);
return -EINVAL;
}
@@ -1518,7 +1519,7 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
}
/* For purposes of check_entry casting the compat entry is fine */
- ret = check_entry((struct ip6t_entry *)e, name);
+ ret = check_entry((struct ip6t_entry *)e);
if (ret)
return ret;
@@ -1944,6 +1945,7 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
*len, sizeof(get) + get.size);
return -EINVAL;
}
+ get.name[sizeof(get.name) - 1] = '\0';
xt_compat_lock(AF_INET6);
t = xt_find_table_lock(net, AF_INET6, get.name);
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index c382db7a2e73..da1cff79e447 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -62,6 +62,7 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
struct dst_entry *dst;
struct rt6_info *rt;
struct pingfakehdr pfh;
+ struct sockcm_cookie junk = {0};
pr_debug("ping_v6_sendmsg(sk=%p,sk->num=%u)\n", inet, inet->inet_num);
@@ -144,7 +145,7 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
err = ip6_append_data(sk, ping_getfrag, &pfh, len,
0, hlimit,
np->tclass, NULL, &fl6, rt,
- MSG_DONTWAIT, np->dontfrag);
+ MSG_DONTWAIT, np->dontfrag, &junk);
if (err) {
ICMP6_INC_STATS(sock_net(sk), rt->rt6i_idev,
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index fa59dd7a427e..b07ce21983aa 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -745,6 +745,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
struct dst_entry *dst = NULL;
struct raw6_frag_vec rfv;
struct flowi6 fl6;
+ struct sockcm_cookie sockc;
int addr_len = msg->msg_namelen;
int hlimit = -1;
int tclass = -1;
@@ -821,13 +822,15 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (fl6.flowi6_oif == 0)
fl6.flowi6_oif = sk->sk_bound_dev_if;
+ sockc.tsflags = sk->sk_tsflags;
if (msg->msg_controllen) {
opt = &opt_space;
memset(opt, 0, sizeof(struct ipv6_txoptions));
opt->tot_len = sizeof(struct ipv6_txoptions);
err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
- &hlimit, &tclass, &dontfrag);
+ &hlimit, &tclass, &dontfrag,
+ &sockc);
if (err < 0) {
fl6_sock_release(flowlabel);
return err;
@@ -897,7 +900,7 @@ back_from_confirm:
lock_sock(sk);
err = ip6_append_data(sk, raw6_getfrag, &rfv,
len, 0, hlimit, tclass, opt, &fl6, (struct rt6_info *)dst,
- msg->msg_flags, dontfrag);
+ msg->msg_flags, dontfrag, &sockc);
if (err)
ip6_flush_pending_frames(sk);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 711d209f9124..0e621bc1ae11 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -234,7 +234,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
fl6.fl6_dport = usin->sin6_port;
fl6.fl6_sport = inet->inet_sport;
- opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
+ opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
final_p = fl6_update_dst(&fl6, opt, &final);
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
@@ -858,6 +858,7 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
return;
#ifdef CONFIG_TCP_MD5SIG
+ rcu_read_lock();
hash_location = tcp_parse_md5sig_option(th);
if (sk && sk_fullsock(sk)) {
key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
@@ -875,16 +876,15 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
th->source, &ipv6h->daddr,
ntohs(th->source), tcp_v6_iif(skb));
if (!sk1)
- return;
+ goto out;
- rcu_read_lock();
key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
if (!key)
- goto release_sk1;
+ goto out;
genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
if (genhash || memcmp(hash_location, newhash, 16) != 0)
- goto release_sk1;
+ goto out;
}
#endif
@@ -898,11 +898,8 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
#ifdef CONFIG_TCP_MD5SIG
-release_sk1:
- if (sk1) {
- rcu_read_unlock();
- sock_put(sk1);
- }
+out:
+ rcu_read_unlock();
#endif
}
@@ -967,7 +964,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
&tcp_request_sock_ipv6_ops, sk, skb);
drop:
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
+ tcp_listendrop(sk);
return 0; /* don't send reset */
}
@@ -1172,7 +1169,7 @@ out_overflow:
out_nonewsk:
dst_release(dst);
out:
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
+ tcp_listendrop(sk);
return NULL;
}
@@ -1351,6 +1348,7 @@ static int tcp_v6_rcv(struct sk_buff *skb)
{
const struct tcphdr *th;
const struct ipv6hdr *hdr;
+ bool refcounted;
struct sock *sk;
int ret;
struct net *net = dev_net(skb->dev);
@@ -1381,7 +1379,8 @@ static int tcp_v6_rcv(struct sk_buff *skb)
lookup:
sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
- th->source, th->dest, inet6_iif(skb));
+ th->source, th->dest, inet6_iif(skb),
+ &refcounted);
if (!sk)
goto no_tcp_socket;
@@ -1404,6 +1403,7 @@ process:
goto lookup;
}
sock_hold(sk);
+ refcounted = true;
nsk = tcp_check_req(sk, skb, req, false);
if (!nsk) {
reqsk_put(req);
@@ -1460,7 +1460,8 @@ process:
bh_unlock_sock(sk);
put_and_return:
- sock_put(sk);
+ if (refcounted)
+ sock_put(sk);
return ret ? -1 : 0;
no_tcp_socket:
@@ -1483,7 +1484,9 @@ discard_it:
return 0;
discard_and_relse:
- sock_put(sk);
+ sk_drops_add(sk, skb);
+ if (refcounted)
+ sock_put(sk);
goto discard_it;
do_time_wait:
@@ -1514,6 +1517,7 @@ do_time_wait:
inet_twsk_deschedule_put(tw);
sk = sk2;
tcp_v6_restore_cb(skb);
+ refcounted = false;
goto process;
}
/* Fall through to ACK */
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index fd25e447a5fa..a050b70b9101 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -213,37 +213,28 @@ static struct sock *udp6_lib_lookup2(struct net *net,
struct sk_buff *skb)
{
struct sock *sk, *result;
- struct hlist_nulls_node *node;
int score, badness, matches = 0, reuseport = 0;
- bool select_ok = true;
u32 hash = 0;
-begin:
result = NULL;
badness = -1;
- udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) {
+ udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
score = compute_score2(sk, net, saddr, sport,
daddr, hnum, dif);
if (score > badness) {
- result = sk;
- badness = score;
reuseport = sk->sk_reuseport;
if (reuseport) {
hash = udp6_ehashfn(net, daddr, hnum,
saddr, sport);
- if (select_ok) {
- struct sock *sk2;
- sk2 = reuseport_select_sock(sk, hash, skb,
+ result = reuseport_select_sock(sk, hash, skb,
sizeof(struct udphdr));
- if (sk2) {
- result = sk2;
- select_ok = false;
- goto found;
- }
- }
+ if (result)
+ return result;
matches = 1;
}
+ result = sk;
+ badness = score;
} else if (score == badness && reuseport) {
matches++;
if (reciprocal_scale(hash, matches) == 0)
@@ -251,27 +242,10 @@ begin:
hash = next_pseudo_random32(hash);
}
}
- /*
- * if the nulls value we got at the end of this lookup is
- * not the expected one, we must restart lookup.
- * We probably met an item that was moved to another chain.
- */
- if (get_nulls_value(node) != slot2)
- goto begin;
-
- if (result) {
-found:
- if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
- result = NULL;
- else if (unlikely(compute_score2(result, net, saddr, sport,
- daddr, hnum, dif) < badness)) {
- sock_put(result);
- goto begin;
- }
- }
return result;
}
+/* rcu_read_lock() must be held */
struct sock *__udp6_lib_lookup(struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport,
@@ -279,15 +253,12 @@ struct sock *__udp6_lib_lookup(struct net *net,
struct sk_buff *skb)
{
struct sock *sk, *result;
- struct hlist_nulls_node *node;
unsigned short hnum = ntohs(dport);
unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
int score, badness, matches = 0, reuseport = 0;
- bool select_ok = true;
u32 hash = 0;
- rcu_read_lock();
if (hslot->count > 10) {
hash2 = udp6_portaddr_hash(net, daddr, hnum);
slot2 = hash2 & udptable->mask;
@@ -309,34 +280,26 @@ struct sock *__udp6_lib_lookup(struct net *net,
&in6addr_any, hnum, dif,
hslot2, slot2, skb);
}
- rcu_read_unlock();
return result;
}
begin:
result = NULL;
badness = -1;
- sk_nulls_for_each_rcu(sk, node, &hslot->head) {
+ sk_for_each_rcu(sk, &hslot->head) {
score = compute_score(sk, net, hnum, saddr, sport, daddr, dport, dif);
if (score > badness) {
- result = sk;
- badness = score;
reuseport = sk->sk_reuseport;
if (reuseport) {
hash = udp6_ehashfn(net, daddr, hnum,
saddr, sport);
- if (select_ok) {
- struct sock *sk2;
-
- sk2 = reuseport_select_sock(sk, hash, skb,
+ result = reuseport_select_sock(sk, hash, skb,
sizeof(struct udphdr));
- if (sk2) {
- result = sk2;
- select_ok = false;
- goto found;
- }
- }
+ if (result)
+ return result;
matches = 1;
}
+ result = sk;
+ badness = score;
} else if (score == badness && reuseport) {
matches++;
if (reciprocal_scale(hash, matches) == 0)
@@ -344,25 +307,6 @@ begin:
hash = next_pseudo_random32(hash);
}
}
- /*
- * if the nulls value we got at the end of this lookup is
- * not the expected one, we must restart lookup.
- * We probably met an item that was moved to another chain.
- */
- if (get_nulls_value(node) != slot)
- goto begin;
-
- if (result) {
-found:
- if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
- result = NULL;
- else if (unlikely(compute_score(result, net, hnum, saddr, sport,
- daddr, dport, dif) < badness)) {
- sock_put(result);
- goto begin;
- }
- }
- rcu_read_unlock();
return result;
}
EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
@@ -382,12 +326,37 @@ static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
udptable, skb);
}
+struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
+ __be16 sport, __be16 dport)
+{
+ const struct ipv6hdr *iph = ipv6_hdr(skb);
+ const struct net_device *dev =
+ skb_dst(skb) ? skb_dst(skb)->dev : skb->dev;
+
+ return __udp6_lib_lookup(dev_net(dev), &iph->saddr, sport,
+ &iph->daddr, dport, inet6_iif(skb),
+ &udp_table, skb);
+}
+EXPORT_SYMBOL_GPL(udp6_lib_lookup_skb);
+
+/* Must be called under rcu_read_lock().
+ * Does increment socket refcount.
+ */
+#if IS_ENABLED(CONFIG_NETFILTER_XT_MATCH_SOCKET) || \
+ IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TPROXY)
struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport, int dif)
{
- return __udp6_lib_lookup(net, saddr, sport, daddr, dport, dif, &udp_table, NULL);
+ struct sock *sk;
+
+ sk = __udp6_lib_lookup(net, saddr, sport, daddr, dport,
+ dif, &udp_table, NULL);
+ if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
+ sk = NULL;
+ return sk;
}
EXPORT_SYMBOL_GPL(udp6_lib_lookup);
+#endif
/*
* This should be easy, if there is something there we
@@ -401,7 +370,7 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
struct inet_sock *inet = inet_sk(sk);
struct sk_buff *skb;
unsigned int ulen, copied;
- int peeked, off = 0;
+ int peeked, peeking, off;
int err;
int is_udplite = IS_UDPLITE(sk);
bool checksum_valid = false;
@@ -415,15 +384,16 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
try_again:
+ peeking = off = sk_peek_offset(sk, flags);
skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
&peeked, &off, &err);
if (!skb)
- goto out;
+ return err;
- ulen = skb->len - sizeof(struct udphdr);
+ ulen = skb->len;
copied = len;
- if (copied > ulen)
- copied = ulen;
+ if (copied > ulen - off)
+ copied = ulen - off;
else if (copied < ulen)
msg->msg_flags |= MSG_TRUNC;
@@ -435,17 +405,16 @@ try_again:
* coverage checksum (UDP-Lite), do it before the copy.
*/
- if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
+ if (copied < ulen || UDP_SKB_CB(skb)->partial_cov || peeking) {
checksum_valid = !udp_lib_checksum_complete(skb);
if (!checksum_valid)
goto csum_copy_err;
}
if (checksum_valid || skb_csum_unnecessary(skb))
- err = skb_copy_datagram_msg(skb, sizeof(struct udphdr),
- msg, copied);
+ err = skb_copy_datagram_msg(skb, off, msg, copied);
else {
- err = skb_copy_and_csum_datagram_msg(skb, sizeof(struct udphdr), msg);
+ err = skb_copy_and_csum_datagram_msg(skb, off, msg);
if (err == -EINVAL)
goto csum_copy_err;
}
@@ -462,7 +431,8 @@ try_again:
UDP_MIB_INERRORS,
is_udplite);
}
- goto out_free;
+ skb_free_datagram_locked(sk, skb);
+ return err;
}
if (!peeked) {
if (is_udp4)
@@ -510,9 +480,7 @@ try_again:
if (flags & MSG_TRUNC)
err = ulen;
-out_free:
- skb_free_datagram_locked(sk, skb);
-out:
+ __skb_free_datagram_locked(sk, skb, peeking ? -err : err);
return err;
csum_copy_err:
@@ -585,7 +553,7 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
sk->sk_err = err;
sk->sk_error_report(sk);
out:
- sock_put(sk);
+ return;
}
static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
@@ -598,7 +566,7 @@ static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
sk_incoming_cpu_update(sk);
}
- rc = sock_queue_rcv_skb(sk, skb);
+ rc = __sock_queue_rcv_skb(sk, skb);
if (rc < 0) {
int is_udplite = IS_UDPLITE(sk);
@@ -692,8 +660,11 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
if (rcu_access_pointer(sk->sk_filter)) {
if (udp_lib_checksum_complete(skb))
goto csum_error;
+ if (sk_filter(sk, skb))
+ goto drop;
}
+ udp_csum_pull_header(skb);
if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
UDP6_INC_STATS_BH(sock_net(sk),
UDP_MIB_RCVBUFERRORS, is_udplite);
@@ -747,33 +718,6 @@ static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk,
return true;
}
-static void flush_stack(struct sock **stack, unsigned int count,
- struct sk_buff *skb, unsigned int final)
-{
- struct sk_buff *skb1 = NULL;
- struct sock *sk;
- unsigned int i;
-
- for (i = 0; i < count; i++) {
- sk = stack[i];
- if (likely(!skb1))
- skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
- if (!skb1) {
- atomic_inc(&sk->sk_drops);
- UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
- IS_UDPLITE(sk));
- UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
- IS_UDPLITE(sk));
- }
-
- if (skb1 && udpv6_queue_rcv_skb(sk, skb1) <= 0)
- skb1 = NULL;
- sock_put(sk);
- }
- if (unlikely(skb1))
- kfree_skb(skb1);
-}
-
static void udp6_csum_zero_error(struct sk_buff *skb)
{
/* RFC 2460 section 8.1 says that we SHOULD log
@@ -792,15 +736,15 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
const struct in6_addr *saddr, const struct in6_addr *daddr,
struct udp_table *udptable, int proto)
{
- struct sock *sk, *stack[256 / sizeof(struct sock *)];
+ struct sock *sk, *first = NULL;
const struct udphdr *uh = udp_hdr(skb);
- struct hlist_nulls_node *node;
unsigned short hnum = ntohs(uh->dest);
struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
- int dif = inet6_iif(skb);
- unsigned int count = 0, offset = offsetof(typeof(*sk), sk_nulls_node);
+ unsigned int offset = offsetof(typeof(*sk), sk_node);
unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
- bool inner_flushed = false;
+ int dif = inet6_iif(skb);
+ struct hlist_node *node;
+ struct sk_buff *nskb;
if (use_hash2) {
hash2_any = udp6_portaddr_hash(net, &in6addr_any, hnum) &
@@ -811,27 +755,32 @@ start_lookup:
offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
}
- spin_lock(&hslot->lock);
- sk_nulls_for_each_entry_offset(sk, node, &hslot->head, offset) {
- if (__udp_v6_is_mcast_sock(net, sk,
- uh->dest, daddr,
- uh->source, saddr,
- dif, hnum) &&
- /* If zero checksum and no_check is not on for
- * the socket then skip it.
- */
- (uh->check || udp_sk(sk)->no_check6_rx)) {
- if (unlikely(count == ARRAY_SIZE(stack))) {
- flush_stack(stack, count, skb, ~0);
- inner_flushed = true;
- count = 0;
- }
- stack[count++] = sk;
- sock_hold(sk);
+ sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
+ if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr,
+ uh->source, saddr, dif, hnum))
+ continue;
+ /* If zero checksum and no_check is not on for
+ * the socket then skip it.
+ */
+ if (!uh->check && !udp_sk(sk)->no_check6_rx)
+ continue;
+ if (!first) {
+ first = sk;
+ continue;
+ }
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (unlikely(!nskb)) {
+ atomic_inc(&sk->sk_drops);
+ UDP6_INC_STATS_BH(net, UDP_MIB_RCVBUFERRORS,
+ IS_UDPLITE(sk));
+ UDP6_INC_STATS_BH(net, UDP_MIB_INERRORS,
+ IS_UDPLITE(sk));
+ continue;
}
- }
- spin_unlock(&hslot->lock);
+ if (udpv6_queue_rcv_skb(sk, nskb) > 0)
+ consume_skb(nskb);
+ }
/* Also lookup *:port if we are using hash2 and haven't done so yet. */
if (use_hash2 && hash2 != hash2_any) {
@@ -839,13 +788,13 @@ start_lookup:
goto start_lookup;
}
- if (count) {
- flush_stack(stack, count, skb, count - 1);
+ if (first) {
+ if (udpv6_queue_rcv_skb(first, skb) > 0)
+ consume_skb(skb);
} else {
- if (!inner_flushed)
- UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
- proto == IPPROTO_UDPLITE);
- consume_skb(skb);
+ kfree_skb(skb);
+ UDP6_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
+ proto == IPPROTO_UDPLITE);
}
return 0;
}
@@ -853,10 +802,10 @@ start_lookup:
int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
int proto)
{
+ const struct in6_addr *saddr, *daddr;
struct net *net = dev_net(skb->dev);
- struct sock *sk;
struct udphdr *uh;
- const struct in6_addr *saddr, *daddr;
+ struct sock *sk;
u32 ulen = 0;
if (!pskb_may_pull(skb, sizeof(struct udphdr)))
@@ -910,7 +859,6 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
int ret;
if (!uh->check && !udp_sk(sk)->no_check6_rx) {
- sock_put(sk);
udp6_csum_zero_error(skb);
goto csum_error;
}
@@ -920,7 +868,6 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
ip6_compute_pseudo);
ret = udpv6_queue_rcv_skb(sk, skb);
- sock_put(sk);
/* a return value > 0 means to resubmit the input */
if (ret > 0)
@@ -1128,6 +1075,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
int connected = 0;
int is_udplite = IS_UDPLITE(sk);
int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
+ struct sockcm_cookie sockc;
/* destination address check */
if (sin6) {
@@ -1247,6 +1195,7 @@ do_udp_sendmsg:
fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
fl6.flowi6_mark = sk->sk_mark;
+ sockc.tsflags = sk->sk_tsflags;
if (msg->msg_controllen) {
opt = &opt_space;
@@ -1254,7 +1203,8 @@ do_udp_sendmsg:
opt->tot_len = sizeof(*opt);
err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
- &hlimit, &tclass, &dontfrag);
+ &hlimit, &tclass, &dontfrag,
+ &sockc);
if (err < 0) {
fl6_sock_release(flowlabel);
return err;
@@ -1321,7 +1271,7 @@ back_from_confirm:
skb = ip6_make_skb(sk, getfrag, msg, ulen,
sizeof(struct udphdr), hlimit, tclass, opt,
&fl6, (struct rt6_info *)dst,
- msg->msg_flags, dontfrag);
+ msg->msg_flags, dontfrag, &sockc);
err = PTR_ERR(skb);
if (!IS_ERR_OR_NULL(skb))
err = udp_v6_send_skb(skb, &fl6);
@@ -1348,7 +1298,8 @@ do_append_data:
err = ip6_append_data(sk, getfrag, msg, ulen,
sizeof(struct udphdr), hlimit, tclass, opt, &fl6,
(struct rt6_info *)dst,
- corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags, dontfrag);
+ corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags, dontfrag,
+ &sockc);
if (err)
udp_v6_flush_pending_frames(sk);
else if (!corkreq)
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index 2b0fbe6929e8..5429f6bcf047 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -153,7 +153,7 @@ static struct sk_buff **udp6_gro_receive(struct sk_buff **head,
skip:
NAPI_GRO_CB(skb)->is_ipv6 = 1;
- return udp_gro_receive(head, skb, uh);
+ return udp_gro_receive(head, skb, uh, udp6_lib_lookup_skb);
flush:
NAPI_GRO_CB(skb)->flush = 1;
@@ -173,7 +173,7 @@ static int udp6_gro_complete(struct sk_buff *skb, int nhoff)
skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
}
- return udp_gro_complete(skb, nhoff);
+ return udp_gro_complete(skb, nhoff, udp6_lib_lookup_skb);
}
static const struct net_offload udpv6_offload = {
@@ -184,7 +184,12 @@ static const struct net_offload udpv6_offload = {
},
};
-int __init udp_offload_init(void)
+int udpv6_offload_init(void)
{
return inet6_add_offload(&udpv6_offload, IPPROTO_UDP);
}
+
+int udpv6_offload_exit(void)
+{
+ return inet6_del_offload(&udpv6_offload, IPPROTO_UDP);
+}
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index ec22078b0914..42de4ccd159f 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -123,12 +123,11 @@ static int l2tp_ip_recv(struct sk_buff *skb)
struct l2tp_tunnel *tunnel = NULL;
int length;
- /* Point to L2TP header */
- optr = ptr = skb->data;
-
if (!pskb_may_pull(skb, 4))
goto discard;
+ /* Point to L2TP header */
+ optr = ptr = skb->data;
session_id = ntohl(*((__be32 *) ptr));
ptr += 4;
@@ -156,6 +155,9 @@ static int l2tp_ip_recv(struct sk_buff *skb)
if (!pskb_may_pull(skb, length))
goto discard;
+ /* Point to L2TP header */
+ optr = ptr = skb->data;
+ ptr += 4;
pr_debug("%s: ip recv\n", tunnel->name);
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
}
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 6b54ff3ff4cb..46e07267e503 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -136,12 +136,11 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
struct l2tp_tunnel *tunnel = NULL;
int length;
- /* Point to L2TP header */
- optr = ptr = skb->data;
-
if (!pskb_may_pull(skb, 4))
goto discard;
+ /* Point to L2TP header */
+ optr = ptr = skb->data;
session_id = ntohl(*((__be32 *) ptr));
ptr += 4;
@@ -169,6 +168,9 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
if (!pskb_may_pull(skb, length))
goto discard;
+ /* Point to L2TP header */
+ optr = ptr = skb->data;
+ ptr += 4;
pr_debug("%s: ip recv\n", tunnel->name);
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
}
@@ -492,6 +494,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
struct ip6_flowlabel *flowlabel = NULL;
struct dst_entry *dst = NULL;
struct flowi6 fl6;
+ struct sockcm_cookie sockc_unused = {0};
int addr_len = msg->msg_namelen;
int hlimit = -1;
int tclass = -1;
@@ -562,9 +565,10 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
memset(opt, 0, sizeof(struct ipv6_txoptions));
opt->tot_len = sizeof(struct ipv6_txoptions);
- err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
- &hlimit, &tclass, &dontfrag);
- if (err < 0) {
+ err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
+ &hlimit, &tclass, &dontfrag,
+ &sockc_unused);
+ if (err < 0) {
fl6_sock_release(flowlabel);
return err;
}
@@ -625,7 +629,7 @@ back_from_confirm:
err = ip6_append_data(sk, ip_generic_getfrag, msg,
ulen, transhdrlen, hlimit, tclass, opt,
&fl6, (struct rt6_info *)dst,
- msg->msg_flags, dontfrag);
+ msg->msg_flags, dontfrag, &sockc_unused);
if (err)
ip6_flush_pending_frames(sk);
else if (!(msg->msg_flags & MSG_MORE))
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 4932e9f243a2..42fa81031dfa 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -935,6 +935,7 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
size_t len)
{
struct tid_ampdu_tx *tid_tx;
+ struct ieee80211_txq *txq;
u16 capab, tid;
u8 buf_size;
bool amsdu;
@@ -945,6 +946,10 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6;
buf_size = min(buf_size, local->hw.max_tx_aggregation_subframes);
+ txq = sta->sta.txq[tid];
+ if (!amsdu && txq)
+ set_bit(IEEE80211_TXQ_NO_AMSDU, &to_txq_info(txq)->flags);
+
mutex_lock(&sta->ampdu_mlme.mtx);
tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index fe1704c4e8fb..fc4730b938d0 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -65,11 +65,13 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
return ret;
if (type == NL80211_IFTYPE_AP_VLAN &&
- params && params->use_4addr == 0)
+ params && params->use_4addr == 0) {
RCU_INIT_POINTER(sdata->u.vlan.sta, NULL);
- else if (type == NL80211_IFTYPE_STATION &&
- params && params->use_4addr >= 0)
+ ieee80211_check_fast_rx_iface(sdata);
+ } else if (type == NL80211_IFTYPE_STATION &&
+ params && params->use_4addr >= 0) {
sdata->u.mgd.use_4addr = params->use_4addr;
+ }
if (sdata->vif.type == NL80211_IFTYPE_MONITOR && flags) {
struct ieee80211_local *local = sdata->local;
@@ -732,6 +734,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
sdata->vif.bss_conf.beacon_int = params->beacon_interval;
sdata->vif.bss_conf.dtim_period = params->dtim_period;
sdata->vif.bss_conf.enable_beacon = true;
+ sdata->vif.bss_conf.allow_p2p_go_ps = sdata->vif.p2p;
sdata->vif.bss_conf.ssid_len = params->ssid_len;
if (params->ssid_len)
@@ -1202,6 +1205,9 @@ static int sta_apply_parameters(struct ieee80211_local *local,
params->opmode_notif, band);
}
+ if (params->support_p2p_ps >= 0)
+ sta->sta.support_p2p_ps = params->support_p2p_ps;
+
if (ieee80211_vif_is_mesh(&sdata->vif))
sta_apply_mesh_params(local, sta, params);
@@ -1363,6 +1369,7 @@ static int ieee80211_change_station(struct wiphy *wiphy,
rcu_assign_pointer(vlansdata->u.vlan.sta, sta);
new_4addr = true;
+ __ieee80211_check_fast_rx_iface(vlansdata);
}
if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
@@ -1499,7 +1506,7 @@ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop,
memset(pinfo, 0, sizeof(*pinfo));
- pinfo->generation = mesh_paths_generation;
+ pinfo->generation = mpath->sdata->u.mesh.mesh_paths_generation;
pinfo->filled = MPATH_INFO_FRAME_QLEN |
MPATH_INFO_SN |
@@ -1577,7 +1584,7 @@ static void mpp_set_pinfo(struct mesh_path *mpath, u8 *mpp,
memset(pinfo, 0, sizeof(*pinfo));
memcpy(mpp, mpath->mpp, ETH_ALEN);
- pinfo->generation = mpp_paths_generation;
+ pinfo->generation = mpath->sdata->u.mesh.mpp_paths_generation;
}
static int ieee80211_get_mpp(struct wiphy *wiphy, struct net_device *dev,
@@ -1885,6 +1892,7 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
sdata->flags |= IEEE80211_SDATA_DONT_BRIDGE_PACKETS;
else
sdata->flags &= ~IEEE80211_SDATA_DONT_BRIDGE_PACKETS;
+ ieee80211_check_fast_rx_iface(sdata);
}
if (params->ht_opmode >= 0) {
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 283981108ca8..74142d07ad31 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -343,8 +343,10 @@ static void ieee80211_change_chanctx(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx,
const struct cfg80211_chan_def *chandef)
{
- if (cfg80211_chandef_identical(&ctx->conf.def, chandef))
+ if (cfg80211_chandef_identical(&ctx->conf.def, chandef)) {
+ ieee80211_recalc_chanctx_min_def(local, ctx);
return;
+ }
WARN_ON(!cfg80211_chandef_compatible(&ctx->conf.def, chandef));
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 4ab5c522ceee..b251b2f7f8dd 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -127,6 +127,9 @@ static const char *hw_flag_names[] = {
FLAG(BEACON_TX_STATUS),
FLAG(NEEDS_UNIQUE_STA_ADDR),
FLAG(SUPPORTS_REORDERING_BUFFER),
+ FLAG(USES_RSS),
+ FLAG(TX_AMSDU),
+ FLAG(TX_FRAG_LIST),
#undef FLAG
};
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index a39512f09f9e..33dfcbc2bf9c 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -3,6 +3,7 @@
* Copyright (c) 2006 Jiri Benc <jbenc@suse.cz>
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -51,31 +52,54 @@ static const struct file_operations sta_ ##name## _ops = { \
STA_FILE(aid, sta.aid, D);
+static const char * const sta_flag_names[] = {
+#define FLAG(F) [WLAN_STA_##F] = #F
+ FLAG(AUTH),
+ FLAG(ASSOC),
+ FLAG(PS_STA),
+ FLAG(AUTHORIZED),
+ FLAG(SHORT_PREAMBLE),
+ FLAG(WDS),
+ FLAG(CLEAR_PS_FILT),
+ FLAG(MFP),
+ FLAG(BLOCK_BA),
+ FLAG(PS_DRIVER),
+ FLAG(PSPOLL),
+ FLAG(TDLS_PEER),
+ FLAG(TDLS_PEER_AUTH),
+ FLAG(TDLS_INITIATOR),
+ FLAG(TDLS_CHAN_SWITCH),
+ FLAG(TDLS_OFF_CHANNEL),
+ FLAG(TDLS_WIDER_BW),
+ FLAG(UAPSD),
+ FLAG(SP),
+ FLAG(4ADDR_EVENT),
+ FLAG(INSERTED),
+ FLAG(RATE_CONTROL),
+ FLAG(TOFFSET_KNOWN),
+ FLAG(MPSP_OWNER),
+ FLAG(MPSP_RECIPIENT),
+ FLAG(PS_DELIVER),
+#undef FLAG
+};
+
static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
- char buf[121];
+ char buf[16 * NUM_WLAN_STA_FLAGS], *pos = buf;
+ char *end = buf + sizeof(buf) - 1;
struct sta_info *sta = file->private_data;
+ unsigned int flg;
+
+ BUILD_BUG_ON(ARRAY_SIZE(sta_flag_names) != NUM_WLAN_STA_FLAGS);
+
+ for (flg = 0; flg < NUM_WLAN_STA_FLAGS; flg++) {
+ if (test_sta_flag(sta, flg))
+ pos += scnprintf(pos, end - pos, "%s\n",
+ sta_flag_names[flg]);
+ }
-#define TEST(flg) \
- test_sta_flag(sta, WLAN_STA_##flg) ? #flg "\n" : ""
-
- int res = scnprintf(buf, sizeof(buf),
- "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
- TEST(AUTH), TEST(ASSOC), TEST(PS_STA),
- TEST(PS_DRIVER), TEST(AUTHORIZED),
- TEST(SHORT_PREAMBLE),
- sta->sta.wme ? "WME\n" : "",
- TEST(WDS), TEST(CLEAR_PS_FILT),
- TEST(MFP), TEST(BLOCK_BA), TEST(PSPOLL),
- TEST(UAPSD), TEST(SP), TEST(TDLS_PEER),
- TEST(TDLS_PEER_AUTH), TEST(TDLS_INITIATOR),
- TEST(TDLS_CHAN_SWITCH), TEST(TDLS_OFF_CHANNEL),
- TEST(4ADDR_EVENT), TEST(INSERTED),
- TEST(RATE_CONTROL), TEST(TOFFSET_KNOWN),
- TEST(MPSP_OWNER), TEST(MPSP_RECIPIENT));
-#undef TEST
- return simple_read_from_buffer(userbuf, count, ppos, buf, res);
+ return simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf));
}
STA_OPS(flags);
@@ -151,11 +175,12 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
static ssize_t sta_agg_status_write(struct file *file, const char __user *userbuf,
size_t count, loff_t *ppos)
{
- char _buf[12] = {}, *buf = _buf;
+ char _buf[25] = {}, *buf = _buf;
struct sta_info *sta = file->private_data;
bool start, tx;
unsigned long tid;
- int ret;
+ char *pos;
+ int ret, timeout = 5000;
if (count > sizeof(_buf))
return -EINVAL;
@@ -164,37 +189,48 @@ static ssize_t sta_agg_status_write(struct file *file, const char __user *userbu
return -EFAULT;
buf[sizeof(_buf) - 1] = '\0';
+ pos = buf;
+ buf = strsep(&pos, " ");
+ if (!buf)
+ return -EINVAL;
- if (strncmp(buf, "tx ", 3) == 0) {
- buf += 3;
+ if (!strcmp(buf, "tx"))
tx = true;
- } else if (strncmp(buf, "rx ", 3) == 0) {
- buf += 3;
+ else if (!strcmp(buf, "rx"))
tx = false;
- } else
+ else
return -EINVAL;
- if (strncmp(buf, "start ", 6) == 0) {
- buf += 6;
+ buf = strsep(&pos, " ");
+ if (!buf)
+ return -EINVAL;
+ if (!strcmp(buf, "start")) {
start = true;
if (!tx)
return -EINVAL;
- } else if (strncmp(buf, "stop ", 5) == 0) {
- buf += 5;
+ } else if (!strcmp(buf, "stop")) {
start = false;
- } else
+ } else {
return -EINVAL;
+ }
- ret = kstrtoul(buf, 0, &tid);
- if (ret)
- return ret;
+ buf = strsep(&pos, " ");
+ if (!buf)
+ return -EINVAL;
+ if (sscanf(buf, "timeout=%d", &timeout) == 1) {
+ buf = strsep(&pos, " ");
+ if (!buf || !tx || !start)
+ return -EINVAL;
+ }
- if (tid >= IEEE80211_NUM_TIDS)
+ ret = kstrtoul(buf, 0, &tid);
+ if (ret || tid >= IEEE80211_NUM_TIDS)
return -EINVAL;
if (tx) {
if (start)
- ret = ieee80211_start_tx_ba_session(&sta->sta, tid, 5000);
+ ret = ieee80211_start_tx_ba_session(&sta->sta, tid,
+ timeout);
else
ret = ieee80211_stop_tx_ba_session(&sta->sta, tid);
} else {
@@ -322,14 +358,14 @@ STA_OPS(vht_capa);
#define DEBUGFS_ADD(name) \
debugfs_create_file(#name, 0400, \
- sta->debugfs.dir, sta, &sta_ ##name## _ops);
+ sta->debugfs_dir, sta, &sta_ ##name## _ops);
#define DEBUGFS_ADD_COUNTER(name, field) \
if (sizeof(sta->field) == sizeof(u32)) \
- debugfs_create_u32(#name, 0400, sta->debugfs.dir, \
+ debugfs_create_u32(#name, 0400, sta->debugfs_dir, \
(u32 *) &sta->field); \
else \
- debugfs_create_u64(#name, 0400, sta->debugfs.dir, \
+ debugfs_create_u64(#name, 0400, sta->debugfs_dir, \
(u64 *) &sta->field);
void ieee80211_sta_debugfs_add(struct sta_info *sta)
@@ -339,8 +375,6 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
struct dentry *stations_dir = sta->sdata->debugfs.subdir_stations;
u8 mac[3*ETH_ALEN];
- sta->debugfs.add_has_run = true;
-
if (!stations_dir)
return;
@@ -355,8 +389,8 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
* destroyed quickly enough the old station's debugfs
* dir might still be around.
*/
- sta->debugfs.dir = debugfs_create_dir(mac, stations_dir);
- if (!sta->debugfs.dir)
+ sta->debugfs_dir = debugfs_create_dir(mac, stations_dir);
+ if (!sta->debugfs_dir)
return;
DEBUGFS_ADD(flags);
@@ -372,14 +406,14 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
if (sizeof(sta->driver_buffered_tids) == sizeof(u32))
debugfs_create_x32("driver_buffered_tids", 0400,
- sta->debugfs.dir,
+ sta->debugfs_dir,
(u32 *)&sta->driver_buffered_tids);
else
debugfs_create_x64("driver_buffered_tids", 0400,
- sta->debugfs.dir,
+ sta->debugfs_dir,
(u64 *)&sta->driver_buffered_tids);
- drv_sta_add_debugfs(local, sdata, &sta->sta, sta->debugfs.dir);
+ drv_sta_add_debugfs(local, sdata, &sta->sta, sta->debugfs_dir);
}
void ieee80211_sta_debugfs_remove(struct sta_info *sta)
@@ -387,7 +421,7 @@ void ieee80211_sta_debugfs_remove(struct sta_info *sta)
struct ieee80211_local *local = sta->local;
struct ieee80211_sub_if_data *sdata = sta->sdata;
- drv_sta_remove_debugfs(local, sdata, &sta->sta, sta->debugfs.dir);
- debugfs_remove_recursive(sta->debugfs.dir);
- sta->debugfs.dir = NULL;
+ drv_sta_remove_debugfs(local, sdata, &sta->sta, sta->debugfs_dir);
+ debugfs_remove_recursive(sta->debugfs_dir);
+ sta->debugfs_dir = NULL;
}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 18b0d65baff0..184473c257eb 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -1,3 +1,8 @@
+/*
+* Portions of this file
+* Copyright(c) 2016 Intel Deutschland GmbH
+*/
+
#ifndef __MAC80211_DRIVER_OPS
#define __MAC80211_DRIVER_OPS
@@ -29,6 +34,16 @@ static inline void drv_tx(struct ieee80211_local *local,
local->ops->tx(&local->hw, control, skb);
}
+static inline void drv_sync_rx_queues(struct ieee80211_local *local,
+ struct sta_info *sta)
+{
+ if (local->ops->sync_rx_queues) {
+ trace_drv_sync_rx_queues(local, sta->sdata, &sta->sta);
+ local->ops->sync_rx_queues(&local->hw);
+ trace_drv_return_void(local);
+ }
+}
+
static inline void drv_get_et_strings(struct ieee80211_sub_if_data *sdata,
u32 sset, u8 *data)
{
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index fc3238376b39..c6d4b75eb60b 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -649,8 +649,6 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid,
return NULL;
}
- sta->rx_stats.last_rx = jiffies;
-
/* make sure mandatory rates are always added */
sband = local->hw.wiphy->bands[band];
sta->sta.supp_rates[band] = supp_rates |
@@ -670,10 +668,11 @@ static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata)
rcu_read_lock();
list_for_each_entry_rcu(sta, &local->sta_list, list) {
+ unsigned long last_active = ieee80211_sta_last_active(sta);
+
if (sta->sdata == sdata &&
- time_after(sta->rx_stats.last_rx +
- IEEE80211_IBSS_MERGE_INTERVAL,
- jiffies)) {
+ time_is_after_jiffies(last_active +
+ IEEE80211_IBSS_MERGE_INTERVAL)) {
active++;
break;
}
@@ -1236,8 +1235,6 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
if (!sta)
return;
- sta->rx_stats.last_rx = jiffies;
-
/* make sure mandatory rates are always added */
sband = local->hw.wiphy->bands[band];
sta->sta.supp_rates[band] = supp_rates |
@@ -1259,11 +1256,13 @@ static void ieee80211_ibss_sta_expire(struct ieee80211_sub_if_data *sdata)
mutex_lock(&local->sta_mtx);
list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
+ unsigned long last_active = ieee80211_sta_last_active(sta);
+
if (sdata != sta->sdata)
continue;
- if (time_after(jiffies, sta->rx_stats.last_rx + exp_time) ||
- (time_after(jiffies, sta->rx_stats.last_rx + exp_rsn) &&
+ if (time_is_before_jiffies(last_active + exp_time) ||
+ (time_is_before_jiffies(last_active + exp_rsn) &&
sta->sta_state != IEEE80211_STA_AUTHORIZED)) {
sta_dbg(sta->sdata, "expiring inactive %sSTA %pM\n",
sta->sta_state != IEEE80211_STA_AUTHORIZED ?
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 804575ff7af5..8857b01b82d0 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -696,6 +696,11 @@ struct ieee80211_if_mesh {
/* offset from skb->data while building IE */
int meshconf_offset;
+
+ struct mesh_table *mesh_paths;
+ struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */
+ int mesh_paths_generation;
+ int mpp_paths_generation;
};
#ifdef CONFIG_MAC80211_MESH
@@ -797,6 +802,7 @@ struct mac80211_qos_map {
enum txq_info_flags {
IEEE80211_TXQ_STOP,
IEEE80211_TXQ_AMPDU,
+ IEEE80211_TXQ_NO_AMSDU,
};
struct txq_info {
@@ -1489,6 +1495,11 @@ u64 ieee80211_mgmt_tx_cookie(struct ieee80211_local *local);
int ieee80211_attach_ack_skb(struct ieee80211_local *local, struct sk_buff *skb,
u64 *cookie, gfp_t gfp);
+void ieee80211_check_fast_rx(struct sta_info *sta);
+void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata);
+void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata);
+void ieee80211_clear_fast_rx(struct sta_info *sta);
+
/* STA code */
void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata);
int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
@@ -1719,6 +1730,10 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta);
enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta);
void ieee80211_sta_set_rx_nss(struct sta_info *sta);
+enum ieee80211_sta_rx_bandwidth
+ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width);
+enum nl80211_chan_width ieee80211_sta_cap_chan_bw(struct sta_info *sta);
+void ieee80211_sta_set_rx_nss(struct sta_info *sta);
void ieee80211_process_mu_groups(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt);
u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 453b4e741780..097ece8b5c02 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1093,7 +1093,7 @@ static void ieee80211_teardown_sdata(struct ieee80211_sub_if_data *sdata)
sdata->fragment_next = 0;
if (ieee80211_vif_is_mesh(&sdata->vif))
- mesh_rmc_free(sdata);
+ ieee80211_mesh_teardown_sdata(sdata);
}
static void ieee80211_uninit(struct net_device *dev)
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 3df7b0392d30..edd6f2945f69 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -338,6 +338,7 @@ static void ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
} else {
rcu_assign_pointer(sta->gtk[idx], new);
}
+ ieee80211_check_fast_rx(sta);
} else {
defunikey = old &&
old == key_mtx_dereference(sdata->local,
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 8190bf27ebff..33c80de61eca 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -558,6 +558,8 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
if (!ops->set_key)
wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_RRM);
+
wiphy->bss_priv_size = sizeof(struct ieee80211_bss);
local = wiphy_priv(wiphy);
@@ -854,7 +856,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
/* Only HW csum features are currently compatible with mac80211 */
feature_whitelist = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA |
- NETIF_F_GSO_SOFTWARE;
+ NETIF_F_GSO_SOFTWARE | NETIF_F_RXCSUM;
if (WARN_ON(hw->netdev_features & ~feature_whitelist))
return -EINVAL;
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index d32cefcb63b0..dcc1facc807c 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -25,7 +25,6 @@ bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt)
void ieee80211s_init(void)
{
- mesh_pathtbl_init();
mesh_allocated = 1;
rm_cache = kmem_cache_create("mesh_rmc", sizeof(struct rmc_entry),
0, 0, NULL);
@@ -35,7 +34,6 @@ void ieee80211s_stop(void)
{
if (!mesh_allocated)
return;
- mesh_pathtbl_unregister();
kmem_cache_destroy(rm_cache);
}
@@ -176,22 +174,23 @@ int mesh_rmc_init(struct ieee80211_sub_if_data *sdata)
return -ENOMEM;
sdata->u.mesh.rmc->idx_mask = RMC_BUCKETS - 1;
for (i = 0; i < RMC_BUCKETS; i++)
- INIT_LIST_HEAD(&sdata->u.mesh.rmc->bucket[i]);
+ INIT_HLIST_HEAD(&sdata->u.mesh.rmc->bucket[i]);
return 0;
}
void mesh_rmc_free(struct ieee80211_sub_if_data *sdata)
{
struct mesh_rmc *rmc = sdata->u.mesh.rmc;
- struct rmc_entry *p, *n;
+ struct rmc_entry *p;
+ struct hlist_node *n;
int i;
if (!sdata->u.mesh.rmc)
return;
for (i = 0; i < RMC_BUCKETS; i++) {
- list_for_each_entry_safe(p, n, &rmc->bucket[i], list) {
- list_del(&p->list);
+ hlist_for_each_entry_safe(p, n, &rmc->bucket[i], list) {
+ hlist_del(&p->list);
kmem_cache_free(rm_cache, p);
}
}
@@ -220,16 +219,20 @@ int mesh_rmc_check(struct ieee80211_sub_if_data *sdata,
u32 seqnum = 0;
int entries = 0;
u8 idx;
- struct rmc_entry *p, *n;
+ struct rmc_entry *p;
+ struct hlist_node *n;
+
+ if (!rmc)
+ return -1;
/* Don't care about endianness since only match matters */
memcpy(&seqnum, &mesh_hdr->seqnum, sizeof(mesh_hdr->seqnum));
idx = le32_to_cpu(mesh_hdr->seqnum) & rmc->idx_mask;
- list_for_each_entry_safe(p, n, &rmc->bucket[idx], list) {
+ hlist_for_each_entry_safe(p, n, &rmc->bucket[idx], list) {
++entries;
if (time_after(jiffies, p->exp_time) ||
entries == RMC_QUEUE_MAX_LEN) {
- list_del(&p->list);
+ hlist_del(&p->list);
kmem_cache_free(rm_cache, p);
--entries;
} else if ((seqnum == p->seqnum) && ether_addr_equal(sa, p->sa))
@@ -243,7 +246,7 @@ int mesh_rmc_check(struct ieee80211_sub_if_data *sdata,
p->seqnum = seqnum;
p->exp_time = jiffies + RMC_TIMEOUT;
memcpy(p->sa, sa, ETH_ALEN);
- list_add(&p->list, &rmc->bucket[idx]);
+ hlist_add_head(&p->list, &rmc->bucket[idx]);
return 0;
}
@@ -1348,12 +1351,6 @@ void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata)
ifmsh->last_preq + msecs_to_jiffies(ifmsh->mshcfg.dot11MeshHWMPpreqMinInterval)))
mesh_path_start_discovery(sdata);
- if (test_and_clear_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags))
- mesh_mpath_table_grow();
-
- if (test_and_clear_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags))
- mesh_mpp_table_grow();
-
if (test_and_clear_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags))
ieee80211_mesh_housekeeping(sdata);
@@ -1388,6 +1385,9 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
/* Allocate all mesh structures when creating the first mesh interface. */
if (!mesh_allocated)
ieee80211s_init();
+
+ mesh_pathtbl_init(sdata);
+
setup_timer(&ifmsh->mesh_path_timer,
ieee80211_mesh_path_timer,
(unsigned long) sdata);
@@ -1402,3 +1402,9 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
sdata->vif.bss_conf.bssid = zero_addr;
}
+
+void ieee80211_mesh_teardown_sdata(struct ieee80211_sub_if_data *sdata)
+{
+ mesh_rmc_free(sdata);
+ mesh_pathtbl_unregister(sdata);
+}
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 87c017a3b1ce..26b9ccbe1fce 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -21,8 +21,6 @@
/**
* enum mesh_path_flags - mac80211 mesh path flags
*
- *
- *
* @MESH_PATH_ACTIVE: the mesh path can be used for forwarding
* @MESH_PATH_RESOLVING: the discovery process is running for this mesh path
* @MESH_PATH_SN_VALID: the mesh path contains a valid destination sequence
@@ -32,6 +30,8 @@
* @MESH_PATH_RESOLVED: the mesh path can has been resolved
* @MESH_PATH_REQ_QUEUED: there is an unsent path request for this destination
* already queued up, waiting for the discovery process to start.
+ * @MESH_PATH_DELETED: the mesh path has been deleted and should no longer
+ * be used
*
* MESH_PATH_RESOLVED is used by the mesh path timer to
* decide when to stop or cancel the mesh path discovery.
@@ -43,6 +43,7 @@ enum mesh_path_flags {
MESH_PATH_FIXED = BIT(3),
MESH_PATH_RESOLVED = BIT(4),
MESH_PATH_REQ_QUEUED = BIT(5),
+ MESH_PATH_DELETED = BIT(6),
};
/**
@@ -51,10 +52,6 @@ enum mesh_path_flags {
*
*
* @MESH_WORK_HOUSEKEEPING: run the periodic mesh housekeeping tasks
- * @MESH_WORK_GROW_MPATH_TABLE: the mesh path table is full and needs
- * to grow.
- * @MESH_WORK_GROW_MPP_TABLE: the mesh portals table is full and needs to
- * grow
* @MESH_WORK_ROOT: the mesh root station needs to send a frame
* @MESH_WORK_DRIFT_ADJUST: time to compensate for clock drift relative to other
* mesh nodes
@@ -62,8 +59,6 @@ enum mesh_path_flags {
*/
enum mesh_deferred_task_flags {
MESH_WORK_HOUSEKEEPING,
- MESH_WORK_GROW_MPATH_TABLE,
- MESH_WORK_GROW_MPP_TABLE,
MESH_WORK_ROOT,
MESH_WORK_DRIFT_ADJUST,
MESH_WORK_MBSS_CHANGED,
@@ -73,12 +68,16 @@ enum mesh_deferred_task_flags {
* struct mesh_path - mac80211 mesh path structure
*
* @dst: mesh path destination mac address
+ * @mpp: mesh proxy mac address
+ * @rhash: rhashtable list pointer
+ * @gate_list: list pointer for known gates list
* @sdata: mesh subif
* @next_hop: mesh neighbor to which frames for this destination will be
* forwarded
* @timer: mesh path discovery timer
* @frame_queue: pending queue for frames sent to this destination while the
* path is unresolved
+ * @rcu: rcu head for freeing mesh path
* @sn: target sequence number
* @metric: current metric to this destination
* @hop_count: hops to destination
@@ -97,14 +96,16 @@ enum mesh_deferred_task_flags {
* @is_gate: the destination station of this path is a mesh gate
*
*
- * The combination of dst and sdata is unique in the mesh path table. Since the
- * next_hop STA is only protected by RCU as well, deleting the STA must also
- * remove/substitute the mesh_path structure and wait until that is no longer
- * reachable before destroying the STA completely.
+ * The dst address is unique in the mesh path table. Since the mesh_path is
+ * protected by RCU, deleting the next_hop STA must remove / substitute the
+ * mesh_path structure and wait until that is no longer reachable before
+ * destroying the STA completely.
*/
struct mesh_path {
u8 dst[ETH_ALEN];
u8 mpp[ETH_ALEN]; /* used for MPP or MAP */
+ struct rhash_head rhash;
+ struct hlist_node gate_list;
struct ieee80211_sub_if_data *sdata;
struct sta_info __rcu *next_hop;
struct timer_list timer;
@@ -128,34 +129,17 @@ struct mesh_path {
/**
* struct mesh_table
*
- * @hash_buckets: array of hash buckets of the table
- * @hashwlock: array of locks to protect write operations, one per bucket
- * @hash_mask: 2^size_order - 1, used to compute hash idx
- * @hash_rnd: random value used for hash computations
- * @entries: number of entries in the table
- * @free_node: function to free nodes of the table
- * @copy_node: function to copy nodes of the table
- * @size_order: determines size of the table, there will be 2^size_order hash
- * buckets
* @known_gates: list of known mesh gates and their mpaths by the station. The
* gate's mpath may or may not be resolved and active.
- *
- * rcu_head: RCU head to free the table
+ * @gates_lock: protects updates to known_gates
+ * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
+ * @entries: number of entries in the table
*/
struct mesh_table {
- /* Number of buckets will be 2^N */
- struct hlist_head *hash_buckets;
- spinlock_t *hashwlock; /* One per bucket, for add/del */
- unsigned int hash_mask; /* (2^size_order) - 1 */
- __u32 hash_rnd; /* Used for hash generation */
- atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
- void (*free_node) (struct hlist_node *p, bool free_leafs);
- int (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl);
- int size_order;
- struct hlist_head *known_gates;
+ struct hlist_head known_gates;
spinlock_t gates_lock;
-
- struct rcu_head rcu_head;
+ struct rhashtable rhead;
+ atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
};
/* Recent multicast cache */
@@ -170,20 +154,21 @@ struct mesh_table {
* @seqnum: mesh sequence number of the frame
* @exp_time: expiration time of the entry, in jiffies
* @sa: source address of the frame
+ * @list: hashtable list pointer
*
* The Recent Multicast Cache keeps track of the latest multicast frames that
* have been received by a mesh interface and discards received multicast frames
* that are found in the cache.
*/
struct rmc_entry {
- struct list_head list;
- u32 seqnum;
+ struct hlist_node list;
unsigned long exp_time;
+ u32 seqnum;
u8 sa[ETH_ALEN];
};
struct mesh_rmc {
- struct list_head bucket[RMC_BUCKETS];
+ struct hlist_head bucket[RMC_BUCKETS];
u32 idx_mask;
};
@@ -234,6 +219,7 @@ void ieee80211s_init(void);
void ieee80211s_update_metric(struct ieee80211_local *local,
struct sta_info *sta, struct sk_buff *skb);
void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata);
+void ieee80211_mesh_teardown_sdata(struct ieee80211_sub_if_data *sdata);
int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata);
void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata);
void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh);
@@ -299,9 +285,6 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata,
void mesh_sta_cleanup(struct sta_info *sta);
/* Private interfaces */
-/* Mesh tables */
-void mesh_mpath_table_grow(void);
-void mesh_mpp_table_grow(void);
/* Mesh paths */
int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
u8 ttl, const u8 *target, u32 target_sn,
@@ -309,8 +292,8 @@ int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta);
void mesh_path_flush_pending(struct mesh_path *mpath);
void mesh_path_tx_pending(struct mesh_path *mpath);
-int mesh_pathtbl_init(void);
-void mesh_pathtbl_unregister(void);
+int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata);
+void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata);
int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr);
void mesh_path_timer(unsigned long data);
void mesh_path_flush_by_nexthop(struct sta_info *sta);
@@ -319,8 +302,6 @@ void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata);
bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt);
-extern int mesh_paths_generation;
-extern int mpp_paths_generation;
#ifdef CONFIG_MAC80211_MESH
static inline
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 5b6aec1a0630..8f9c3bde835f 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -530,7 +530,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
const u8 *target_addr, *orig_addr;
const u8 *da;
u8 target_flags, ttl, flags;
- u32 orig_sn, target_sn, lifetime, target_metric;
+ u32 orig_sn, target_sn, lifetime, target_metric = 0;
bool reply = false;
bool forward = true;
bool root_is_gate;
@@ -1012,6 +1012,10 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
goto enddiscovery;
spin_lock_bh(&mpath->state_lock);
+ if (mpath->flags & MESH_PATH_DELETED) {
+ spin_unlock_bh(&mpath->state_lock);
+ goto enddiscovery;
+ }
mpath->flags &= ~MESH_PATH_REQ_QUEUED;
if (preq_node->flags & PREQ_Q_F_START) {
if (mpath->flags & MESH_PATH_RESOLVING) {
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 2ba7aa56b11c..6db2ddfa0695 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -18,11 +18,22 @@
#include "ieee80211_i.h"
#include "mesh.h"
-/* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
-#define INIT_PATHS_SIZE_ORDER 2
+static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath);
-/* Keep the mean chain length below this constant */
-#define MEAN_CHAIN_LEN 2
+static u32 mesh_table_hash(const void *addr, u32 len, u32 seed)
+{
+ /* Use last four bytes of hw addr as hash index */
+ return jhash_1word(*(u32 *)(addr+2), seed);
+}
+
+static const struct rhashtable_params mesh_rht_params = {
+ .nelem_hint = 2,
+ .automatic_shrinking = true,
+ .key_len = ETH_ALEN,
+ .key_offset = offsetof(struct mesh_path, dst),
+ .head_offset = offsetof(struct mesh_path, rhash),
+ .hashfn = mesh_table_hash,
+};
static inline bool mpath_expired(struct mesh_path *mpath)
{
@@ -31,173 +42,36 @@ static inline bool mpath_expired(struct mesh_path *mpath)
!(mpath->flags & MESH_PATH_FIXED);
}
-struct mpath_node {
- struct hlist_node list;
- struct rcu_head rcu;
- /* This indirection allows two different tables to point to the same
- * mesh_path structure, useful when resizing
- */
- struct mesh_path *mpath;
-};
-
-static struct mesh_table __rcu *mesh_paths;
-static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
-
-int mesh_paths_generation;
-int mpp_paths_generation;
-
-/* This lock will have the grow table function as writer and add / delete nodes
- * as readers. RCU provides sufficient protection only when reading the table
- * (i.e. doing lookups). Adding or adding or removing nodes requires we take
- * the read lock or we risk operating on an old table. The write lock is only
- * needed when modifying the number of buckets a table.
- */
-static DEFINE_RWLOCK(pathtbl_resize_lock);
-
-
-static inline struct mesh_table *resize_dereference_paths(
- struct mesh_table __rcu *table)
+static void mesh_path_rht_free(void *ptr, void *tblptr)
{
- return rcu_dereference_protected(table,
- lockdep_is_held(&pathtbl_resize_lock));
-}
+ struct mesh_path *mpath = ptr;
+ struct mesh_table *tbl = tblptr;
-static inline struct mesh_table *resize_dereference_mesh_paths(void)
-{
- return resize_dereference_paths(mesh_paths);
+ mesh_path_free_rcu(tbl, mpath);
}
-static inline struct mesh_table *resize_dereference_mpp_paths(void)
+static struct mesh_table *mesh_table_alloc(void)
{
- return resize_dereference_paths(mpp_paths);
-}
-
-/*
- * CAREFUL -- "tbl" must not be an expression,
- * in particular not an rcu_dereference(), since
- * it's used twice. So it is illegal to do
- * for_each_mesh_entry(rcu_dereference(...), ...)
- */
-#define for_each_mesh_entry(tbl, node, i) \
- for (i = 0; i <= tbl->hash_mask; i++) \
- hlist_for_each_entry_rcu(node, &tbl->hash_buckets[i], list)
-
-
-static struct mesh_table *mesh_table_alloc(int size_order)
-{
- int i;
struct mesh_table *newtbl;
newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
if (!newtbl)
return NULL;
- newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
- (1 << size_order), GFP_ATOMIC);
-
- if (!newtbl->hash_buckets) {
- kfree(newtbl);
- return NULL;
- }
-
- newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
- (1 << size_order), GFP_ATOMIC);
- if (!newtbl->hashwlock) {
- kfree(newtbl->hash_buckets);
- kfree(newtbl);
- return NULL;
- }
-
- newtbl->size_order = size_order;
- newtbl->hash_mask = (1 << size_order) - 1;
+ INIT_HLIST_HEAD(&newtbl->known_gates);
atomic_set(&newtbl->entries, 0);
- get_random_bytes(&newtbl->hash_rnd,
- sizeof(newtbl->hash_rnd));
- for (i = 0; i <= newtbl->hash_mask; i++)
- spin_lock_init(&newtbl->hashwlock[i]);
spin_lock_init(&newtbl->gates_lock);
return newtbl;
}
-static void __mesh_table_free(struct mesh_table *tbl)
+static void mesh_table_free(struct mesh_table *tbl)
{
- kfree(tbl->hash_buckets);
- kfree(tbl->hashwlock);
+ rhashtable_free_and_destroy(&tbl->rhead,
+ mesh_path_rht_free, tbl);
kfree(tbl);
}
-static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
-{
- struct hlist_head *mesh_hash;
- struct hlist_node *p, *q;
- struct mpath_node *gate;
- int i;
-
- mesh_hash = tbl->hash_buckets;
- for (i = 0; i <= tbl->hash_mask; i++) {
- spin_lock_bh(&tbl->hashwlock[i]);
- hlist_for_each_safe(p, q, &mesh_hash[i]) {
- tbl->free_node(p, free_leafs);
- atomic_dec(&tbl->entries);
- }
- spin_unlock_bh(&tbl->hashwlock[i]);
- }
- if (free_leafs) {
- spin_lock_bh(&tbl->gates_lock);
- hlist_for_each_entry_safe(gate, q,
- tbl->known_gates, list) {
- hlist_del(&gate->list);
- kfree(gate);
- }
- kfree(tbl->known_gates);
- spin_unlock_bh(&tbl->gates_lock);
- }
-
- __mesh_table_free(tbl);
-}
-
-static int mesh_table_grow(struct mesh_table *oldtbl,
- struct mesh_table *newtbl)
-{
- struct hlist_head *oldhash;
- struct hlist_node *p, *q;
- int i;
-
- if (atomic_read(&oldtbl->entries)
- < MEAN_CHAIN_LEN * (oldtbl->hash_mask + 1))
- return -EAGAIN;
-
- newtbl->free_node = oldtbl->free_node;
- newtbl->copy_node = oldtbl->copy_node;
- newtbl->known_gates = oldtbl->known_gates;
- atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
-
- oldhash = oldtbl->hash_buckets;
- for (i = 0; i <= oldtbl->hash_mask; i++)
- hlist_for_each(p, &oldhash[i])
- if (oldtbl->copy_node(p, newtbl) < 0)
- goto errcopy;
-
- return 0;
-
-errcopy:
- for (i = 0; i <= newtbl->hash_mask; i++) {
- hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
- oldtbl->free_node(p, 0);
- }
- return -ENOMEM;
-}
-
-static u32 mesh_table_hash(const u8 *addr, struct ieee80211_sub_if_data *sdata,
- struct mesh_table *tbl)
-{
- /* Use last four bytes of hw addr and interface index as hash index */
- return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex,
- tbl->hash_rnd) & tbl->hash_mask;
-}
-
-
/**
*
* mesh_path_assign_nexthop - update mesh path next hop
@@ -340,23 +214,15 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
struct ieee80211_sub_if_data *sdata)
{
struct mesh_path *mpath;
- struct hlist_head *bucket;
- struct mpath_node *node;
-
- bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
- hlist_for_each_entry_rcu(node, bucket, list) {
- mpath = node->mpath;
- if (mpath->sdata == sdata &&
- ether_addr_equal(dst, mpath->dst)) {
- if (mpath_expired(mpath)) {
- spin_lock_bh(&mpath->state_lock);
- mpath->flags &= ~MESH_PATH_ACTIVE;
- spin_unlock_bh(&mpath->state_lock);
- }
- return mpath;
- }
+
+ mpath = rhashtable_lookup_fast(&tbl->rhead, dst, mesh_rht_params);
+
+ if (mpath && mpath_expired(mpath)) {
+ spin_lock_bh(&mpath->state_lock);
+ mpath->flags &= ~MESH_PATH_ACTIVE;
+ spin_unlock_bh(&mpath->state_lock);
}
- return NULL;
+ return mpath;
}
/**
@@ -371,15 +237,52 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
struct mesh_path *
mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
{
- return mpath_lookup(rcu_dereference(mesh_paths), dst, sdata);
+ return mpath_lookup(sdata->u.mesh.mesh_paths, dst, sdata);
}
struct mesh_path *
mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
{
- return mpath_lookup(rcu_dereference(mpp_paths), dst, sdata);
+ return mpath_lookup(sdata->u.mesh.mpp_paths, dst, sdata);
}
+static struct mesh_path *
+__mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
+{
+ int i = 0, ret;
+ struct mesh_path *mpath = NULL;
+ struct rhashtable_iter iter;
+
+ ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
+ if (ret)
+ return NULL;
+
+ ret = rhashtable_walk_start(&iter);
+ if (ret && ret != -EAGAIN)
+ goto err;
+
+ while ((mpath = rhashtable_walk_next(&iter))) {
+ if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
+ continue;
+ if (IS_ERR(mpath))
+ break;
+ if (i++ == idx)
+ break;
+ }
+err:
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
+
+ if (IS_ERR(mpath) || !mpath)
+ return NULL;
+
+ if (mpath_expired(mpath)) {
+ spin_lock_bh(&mpath->state_lock);
+ mpath->flags &= ~MESH_PATH_ACTIVE;
+ spin_unlock_bh(&mpath->state_lock);
+ }
+ return mpath;
+}
/**
* mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
@@ -393,25 +296,7 @@ mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
struct mesh_path *
mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
{
- struct mesh_table *tbl = rcu_dereference(mesh_paths);
- struct mpath_node *node;
- int i;
- int j = 0;
-
- for_each_mesh_entry(tbl, node, i) {
- if (sdata && node->mpath->sdata != sdata)
- continue;
- if (j++ == idx) {
- if (mpath_expired(node->mpath)) {
- spin_lock_bh(&node->mpath->state_lock);
- node->mpath->flags &= ~MESH_PATH_ACTIVE;
- spin_unlock_bh(&node->mpath->state_lock);
- }
- return node->mpath;
- }
- }
-
- return NULL;
+ return __mesh_path_lookup_by_idx(sdata->u.mesh.mesh_paths, idx);
}
/**
@@ -426,19 +311,7 @@ mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
struct mesh_path *
mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
{
- struct mesh_table *tbl = rcu_dereference(mpp_paths);
- struct mpath_node *node;
- int i;
- int j = 0;
-
- for_each_mesh_entry(tbl, node, i) {
- if (sdata && node->mpath->sdata != sdata)
- continue;
- if (j++ == idx)
- return node->mpath;
- }
-
- return NULL;
+ return __mesh_path_lookup_by_idx(sdata->u.mesh.mpp_paths, idx);
}
/**
@@ -448,30 +321,26 @@ mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
int mesh_path_add_gate(struct mesh_path *mpath)
{
struct mesh_table *tbl;
- struct mpath_node *gate, *new_gate;
int err;
rcu_read_lock();
- tbl = rcu_dereference(mesh_paths);
-
- hlist_for_each_entry_rcu(gate, tbl->known_gates, list)
- if (gate->mpath == mpath) {
- err = -EEXIST;
- goto err_rcu;
- }
+ tbl = mpath->sdata->u.mesh.mesh_paths;
- new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC);
- if (!new_gate) {
- err = -ENOMEM;
+ spin_lock_bh(&mpath->state_lock);
+ if (mpath->is_gate) {
+ err = -EEXIST;
+ spin_unlock_bh(&mpath->state_lock);
goto err_rcu;
}
-
mpath->is_gate = true;
mpath->sdata->u.mesh.num_gates++;
- new_gate->mpath = mpath;
- spin_lock_bh(&tbl->gates_lock);
- hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
- spin_unlock_bh(&tbl->gates_lock);
+
+ spin_lock(&tbl->gates_lock);
+ hlist_add_head_rcu(&mpath->gate_list, &tbl->known_gates);
+ spin_unlock(&tbl->gates_lock);
+
+ spin_unlock_bh(&mpath->state_lock);
+
mpath_dbg(mpath->sdata,
"Mesh path: Recorded new gate: %pM. %d known gates\n",
mpath->dst, mpath->sdata->u.mesh.num_gates);
@@ -485,28 +354,22 @@ err_rcu:
* mesh_gate_del - remove a mesh gate from the list of known gates
* @tbl: table which holds our list of known gates
* @mpath: gate mpath
- *
- * Locking: must be called inside rcu_read_lock() section
*/
static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
{
- struct mpath_node *gate;
- struct hlist_node *q;
+ lockdep_assert_held(&mpath->state_lock);
+ if (!mpath->is_gate)
+ return;
- hlist_for_each_entry_safe(gate, q, tbl->known_gates, list) {
- if (gate->mpath != mpath)
- continue;
- spin_lock_bh(&tbl->gates_lock);
- hlist_del_rcu(&gate->list);
- kfree_rcu(gate, rcu);
- spin_unlock_bh(&tbl->gates_lock);
- mpath->sdata->u.mesh.num_gates--;
- mpath->is_gate = false;
- mpath_dbg(mpath->sdata,
- "Mesh path: Deleted gate: %pM. %d known gates\n",
- mpath->dst, mpath->sdata->u.mesh.num_gates);
- break;
- }
+ mpath->is_gate = false;
+ spin_lock_bh(&tbl->gates_lock);
+ hlist_del_rcu(&mpath->gate_list);
+ mpath->sdata->u.mesh.num_gates--;
+ spin_unlock_bh(&tbl->gates_lock);
+
+ mpath_dbg(mpath->sdata,
+ "Mesh path: Deleted gate: %pM. %d known gates\n",
+ mpath->dst, mpath->sdata->u.mesh.num_gates);
}
/**
@@ -518,6 +381,31 @@ int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
return sdata->u.mesh.num_gates;
}
+static
+struct mesh_path *mesh_path_new(struct ieee80211_sub_if_data *sdata,
+ const u8 *dst, gfp_t gfp_flags)
+{
+ struct mesh_path *new_mpath;
+
+ new_mpath = kzalloc(sizeof(struct mesh_path), gfp_flags);
+ if (!new_mpath)
+ return NULL;
+
+ memcpy(new_mpath->dst, dst, ETH_ALEN);
+ eth_broadcast_addr(new_mpath->rann_snd_addr);
+ new_mpath->is_root = false;
+ new_mpath->sdata = sdata;
+ new_mpath->flags = 0;
+ skb_queue_head_init(&new_mpath->frame_queue);
+ new_mpath->timer.data = (unsigned long) new_mpath;
+ new_mpath->timer.function = mesh_path_timer;
+ new_mpath->exp_time = jiffies;
+ spin_lock_init(&new_mpath->state_lock);
+ init_timer(&new_mpath->timer);
+
+ return new_mpath;
+}
+
/**
* mesh_path_add - allocate and add a new path to the mesh path table
* @dst: destination address of the path (ETH_ALEN length)
@@ -530,15 +418,9 @@ int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
const u8 *dst)
{
- struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
- struct ieee80211_local *local = sdata->local;
struct mesh_table *tbl;
struct mesh_path *mpath, *new_mpath;
- struct mpath_node *node, *new_node;
- struct hlist_head *bucket;
- int grow = 0;
- int err;
- u32 hash_idx;
+ int ret;
if (ether_addr_equal(dst, sdata->vif.addr))
/* never add ourselves as neighbours */
@@ -550,129 +432,44 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
return ERR_PTR(-ENOSPC);
- read_lock_bh(&pathtbl_resize_lock);
- tbl = resize_dereference_mesh_paths();
-
- hash_idx = mesh_table_hash(dst, sdata, tbl);
- bucket = &tbl->hash_buckets[hash_idx];
-
- spin_lock(&tbl->hashwlock[hash_idx]);
-
- hlist_for_each_entry(node, bucket, list) {
- mpath = node->mpath;
- if (mpath->sdata == sdata &&
- ether_addr_equal(dst, mpath->dst))
- goto found;
- }
-
- err = -ENOMEM;
- new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
+ new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC);
if (!new_mpath)
- goto err_path_alloc;
-
- new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
- if (!new_node)
- goto err_node_alloc;
-
- memcpy(new_mpath->dst, dst, ETH_ALEN);
- eth_broadcast_addr(new_mpath->rann_snd_addr);
- new_mpath->is_root = false;
- new_mpath->sdata = sdata;
- new_mpath->flags = 0;
- skb_queue_head_init(&new_mpath->frame_queue);
- new_node->mpath = new_mpath;
- new_mpath->timer.data = (unsigned long) new_mpath;
- new_mpath->timer.function = mesh_path_timer;
- new_mpath->exp_time = jiffies;
- spin_lock_init(&new_mpath->state_lock);
- init_timer(&new_mpath->timer);
-
- hlist_add_head_rcu(&new_node->list, bucket);
- if (atomic_inc_return(&tbl->entries) >=
- MEAN_CHAIN_LEN * (tbl->hash_mask + 1))
- grow = 1;
-
- mesh_paths_generation++;
-
- if (grow) {
- set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
- ieee80211_queue_work(&local->hw, &sdata->work);
- }
- mpath = new_mpath;
-found:
- spin_unlock(&tbl->hashwlock[hash_idx]);
- read_unlock_bh(&pathtbl_resize_lock);
- return mpath;
+ return ERR_PTR(-ENOMEM);
-err_node_alloc:
- kfree(new_mpath);
-err_path_alloc:
- atomic_dec(&sdata->u.mesh.mpaths);
- spin_unlock(&tbl->hashwlock[hash_idx]);
- read_unlock_bh(&pathtbl_resize_lock);
- return ERR_PTR(err);
-}
+ tbl = sdata->u.mesh.mesh_paths;
+ do {
+ ret = rhashtable_lookup_insert_fast(&tbl->rhead,
+ &new_mpath->rhash,
+ mesh_rht_params);
-static void mesh_table_free_rcu(struct rcu_head *rcu)
-{
- struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);
+ if (ret == -EEXIST)
+ mpath = rhashtable_lookup_fast(&tbl->rhead,
+ dst,
+ mesh_rht_params);
- mesh_table_free(tbl, false);
-}
+ } while (unlikely(ret == -EEXIST && !mpath));
-void mesh_mpath_table_grow(void)
-{
- struct mesh_table *oldtbl, *newtbl;
-
- write_lock_bh(&pathtbl_resize_lock);
- oldtbl = resize_dereference_mesh_paths();
- newtbl = mesh_table_alloc(oldtbl->size_order + 1);
- if (!newtbl)
- goto out;
- if (mesh_table_grow(oldtbl, newtbl) < 0) {
- __mesh_table_free(newtbl);
- goto out;
- }
- rcu_assign_pointer(mesh_paths, newtbl);
-
- call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
-
- out:
- write_unlock_bh(&pathtbl_resize_lock);
-}
+ if (ret && ret != -EEXIST)
+ return ERR_PTR(ret);
-void mesh_mpp_table_grow(void)
-{
- struct mesh_table *oldtbl, *newtbl;
-
- write_lock_bh(&pathtbl_resize_lock);
- oldtbl = resize_dereference_mpp_paths();
- newtbl = mesh_table_alloc(oldtbl->size_order + 1);
- if (!newtbl)
- goto out;
- if (mesh_table_grow(oldtbl, newtbl) < 0) {
- __mesh_table_free(newtbl);
- goto out;
+ /* At this point either new_mpath was added, or we found a
+ * matching entry already in the table; in the latter case
+ * free the unnecessary new entry.
+ */
+ if (ret == -EEXIST) {
+ kfree(new_mpath);
+ new_mpath = mpath;
}
- rcu_assign_pointer(mpp_paths, newtbl);
- call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
-
- out:
- write_unlock_bh(&pathtbl_resize_lock);
+ sdata->u.mesh.mesh_paths_generation++;
+ return new_mpath;
}
int mpp_path_add(struct ieee80211_sub_if_data *sdata,
const u8 *dst, const u8 *mpp)
{
- struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
- struct ieee80211_local *local = sdata->local;
struct mesh_table *tbl;
- struct mesh_path *mpath, *new_mpath;
- struct mpath_node *node, *new_node;
- struct hlist_head *bucket;
- int grow = 0;
- int err = 0;
- u32 hash_idx;
+ struct mesh_path *new_mpath;
+ int ret;
if (ether_addr_equal(dst, sdata->vif.addr))
/* never add ourselves as neighbours */
@@ -681,65 +478,19 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
if (is_multicast_ether_addr(dst))
return -ENOTSUPP;
- err = -ENOMEM;
- new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
- if (!new_mpath)
- goto err_path_alloc;
+ new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC);
- new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
- if (!new_node)
- goto err_node_alloc;
+ if (!new_mpath)
+ return -ENOMEM;
- read_lock_bh(&pathtbl_resize_lock);
- memcpy(new_mpath->dst, dst, ETH_ALEN);
memcpy(new_mpath->mpp, mpp, ETH_ALEN);
- new_mpath->sdata = sdata;
- new_mpath->flags = 0;
- skb_queue_head_init(&new_mpath->frame_queue);
- new_node->mpath = new_mpath;
- init_timer(&new_mpath->timer);
- new_mpath->exp_time = jiffies;
- spin_lock_init(&new_mpath->state_lock);
-
- tbl = resize_dereference_mpp_paths();
+ tbl = sdata->u.mesh.mpp_paths;
+ ret = rhashtable_lookup_insert_fast(&tbl->rhead,
+ &new_mpath->rhash,
+ mesh_rht_params);
- hash_idx = mesh_table_hash(dst, sdata, tbl);
- bucket = &tbl->hash_buckets[hash_idx];
-
- spin_lock(&tbl->hashwlock[hash_idx]);
-
- err = -EEXIST;
- hlist_for_each_entry(node, bucket, list) {
- mpath = node->mpath;
- if (mpath->sdata == sdata &&
- ether_addr_equal(dst, mpath->dst))
- goto err_exists;
- }
-
- hlist_add_head_rcu(&new_node->list, bucket);
- if (atomic_inc_return(&tbl->entries) >=
- MEAN_CHAIN_LEN * (tbl->hash_mask + 1))
- grow = 1;
-
- spin_unlock(&tbl->hashwlock[hash_idx]);
- read_unlock_bh(&pathtbl_resize_lock);
-
- mpp_paths_generation++;
-
- if (grow) {
- set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
- ieee80211_queue_work(&local->hw, &sdata->work);
- }
- return 0;
-
-err_exists:
- spin_unlock(&tbl->hashwlock[hash_idx]);
- read_unlock_bh(&pathtbl_resize_lock);
- kfree(new_node);
-err_node_alloc:
- kfree(new_mpath);
-err_path_alloc:
- return err;
+ sdata->u.mesh.mpp_paths_generation++;
+ return ret;
}
@@ -753,17 +504,26 @@ err_path_alloc:
*/
void mesh_plink_broken(struct sta_info *sta)
{
- struct mesh_table *tbl;
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct mesh_path *mpath;
- struct mpath_node *node;
- struct ieee80211_sub_if_data *sdata = sta->sdata;
- int i;
+ struct rhashtable_iter iter;
+ int ret;
- rcu_read_lock();
- tbl = rcu_dereference(mesh_paths);
- for_each_mesh_entry(tbl, node, i) {
- mpath = node->mpath;
+ ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
+ if (ret)
+ return;
+
+ ret = rhashtable_walk_start(&iter);
+ if (ret && ret != -EAGAIN)
+ goto out;
+
+ while ((mpath = rhashtable_walk_next(&iter))) {
+ if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
+ continue;
+ if (IS_ERR(mpath))
+ break;
if (rcu_access_pointer(mpath->next_hop) == sta &&
mpath->flags & MESH_PATH_ACTIVE &&
!(mpath->flags & MESH_PATH_FIXED)) {
@@ -777,33 +537,30 @@ void mesh_plink_broken(struct sta_info *sta)
WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
}
}
- rcu_read_unlock();
+out:
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
}
-static void mesh_path_node_reclaim(struct rcu_head *rp)
+static void mesh_path_free_rcu(struct mesh_table *tbl,
+ struct mesh_path *mpath)
{
- struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
+ struct ieee80211_sub_if_data *sdata = mpath->sdata;
- del_timer_sync(&node->mpath->timer);
- kfree(node->mpath);
- kfree(node);
+ spin_lock_bh(&mpath->state_lock);
+ mpath->flags |= MESH_PATH_RESOLVING | MESH_PATH_DELETED;
+ mesh_gate_del(tbl, mpath);
+ spin_unlock_bh(&mpath->state_lock);
+ del_timer_sync(&mpath->timer);
+ atomic_dec(&sdata->u.mesh.mpaths);
+ atomic_dec(&tbl->entries);
+ kfree_rcu(mpath, rcu);
}
-/* needs to be called with the corresponding hashwlock taken */
-static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
+static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
{
- struct mesh_path *mpath = node->mpath;
- struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
-
- spin_lock(&mpath->state_lock);
- mpath->flags |= MESH_PATH_RESOLVING;
- if (mpath->is_gate)
- mesh_gate_del(tbl, mpath);
- hlist_del_rcu(&node->list);
- call_rcu(&node->rcu, mesh_path_node_reclaim);
- spin_unlock(&mpath->state_lock);
- atomic_dec(&sdata->u.mesh.mpaths);
- atomic_dec(&tbl->entries);
+ rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params);
+ mesh_path_free_rcu(tbl, mpath);
}
/**
@@ -819,65 +576,88 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
*/
void mesh_path_flush_by_nexthop(struct sta_info *sta)
{
- struct mesh_table *tbl;
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
struct mesh_path *mpath;
- struct mpath_node *node;
- int i;
+ struct rhashtable_iter iter;
+ int ret;
- rcu_read_lock();
- read_lock_bh(&pathtbl_resize_lock);
- tbl = resize_dereference_mesh_paths();
- for_each_mesh_entry(tbl, node, i) {
- mpath = node->mpath;
- if (rcu_access_pointer(mpath->next_hop) == sta) {
- spin_lock(&tbl->hashwlock[i]);
- __mesh_path_del(tbl, node);
- spin_unlock(&tbl->hashwlock[i]);
- }
+ ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
+ if (ret)
+ return;
+
+ ret = rhashtable_walk_start(&iter);
+ if (ret && ret != -EAGAIN)
+ goto out;
+
+ while ((mpath = rhashtable_walk_next(&iter))) {
+ if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
+ continue;
+ if (IS_ERR(mpath))
+ break;
+
+ if (rcu_access_pointer(mpath->next_hop) == sta)
+ __mesh_path_del(tbl, mpath);
}
- read_unlock_bh(&pathtbl_resize_lock);
- rcu_read_unlock();
+out:
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
}
static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
const u8 *proxy)
{
- struct mesh_table *tbl;
- struct mesh_path *mpp;
- struct mpath_node *node;
- int i;
+ struct mesh_table *tbl = sdata->u.mesh.mpp_paths;
+ struct mesh_path *mpath;
+ struct rhashtable_iter iter;
+ int ret;
- rcu_read_lock();
- read_lock_bh(&pathtbl_resize_lock);
- tbl = resize_dereference_mpp_paths();
- for_each_mesh_entry(tbl, node, i) {
- mpp = node->mpath;
- if (ether_addr_equal(mpp->mpp, proxy)) {
- spin_lock(&tbl->hashwlock[i]);
- __mesh_path_del(tbl, node);
- spin_unlock(&tbl->hashwlock[i]);
- }
+ ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
+ if (ret)
+ return;
+
+ ret = rhashtable_walk_start(&iter);
+ if (ret && ret != -EAGAIN)
+ goto out;
+
+ while ((mpath = rhashtable_walk_next(&iter))) {
+ if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
+ continue;
+ if (IS_ERR(mpath))
+ break;
+
+ if (ether_addr_equal(mpath->mpp, proxy))
+ __mesh_path_del(tbl, mpath);
}
- read_unlock_bh(&pathtbl_resize_lock);
- rcu_read_unlock();
+out:
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
}
-static void table_flush_by_iface(struct mesh_table *tbl,
- struct ieee80211_sub_if_data *sdata)
+static void table_flush_by_iface(struct mesh_table *tbl)
{
struct mesh_path *mpath;
- struct mpath_node *node;
- int i;
+ struct rhashtable_iter iter;
+ int ret;
+
+ ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
+ if (ret)
+ return;
+
+ ret = rhashtable_walk_start(&iter);
+ if (ret && ret != -EAGAIN)
+ goto out;
- WARN_ON(!rcu_read_lock_held());
- for_each_mesh_entry(tbl, node, i) {
- mpath = node->mpath;
- if (mpath->sdata != sdata)
+ while ((mpath = rhashtable_walk_next(&iter))) {
+ if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
continue;
- spin_lock_bh(&tbl->hashwlock[i]);
- __mesh_path_del(tbl, node);
- spin_unlock_bh(&tbl->hashwlock[i]);
+ if (IS_ERR(mpath))
+ break;
+ __mesh_path_del(tbl, mpath);
}
+out:
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
}
/**
@@ -890,16 +670,8 @@ static void table_flush_by_iface(struct mesh_table *tbl,
*/
void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
{
- struct mesh_table *tbl;
-
- rcu_read_lock();
- read_lock_bh(&pathtbl_resize_lock);
- tbl = resize_dereference_mesh_paths();
- table_flush_by_iface(tbl, sdata);
- tbl = resize_dereference_mpp_paths();
- table_flush_by_iface(tbl, sdata);
- read_unlock_bh(&pathtbl_resize_lock);
- rcu_read_unlock();
+ table_flush_by_iface(sdata->u.mesh.mesh_paths);
+ table_flush_by_iface(sdata->u.mesh.mpp_paths);
}
/**
@@ -911,37 +683,25 @@ void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
*
* Returns: 0 if successful
*/
-static int table_path_del(struct mesh_table __rcu *rcu_tbl,
+static int table_path_del(struct mesh_table *tbl,
struct ieee80211_sub_if_data *sdata,
const u8 *addr)
{
- struct mesh_table *tbl;
struct mesh_path *mpath;
- struct mpath_node *node;
- struct hlist_head *bucket;
- int hash_idx;
- int err = 0;
-
- tbl = resize_dereference_paths(rcu_tbl);
- hash_idx = mesh_table_hash(addr, sdata, tbl);
- bucket = &tbl->hash_buckets[hash_idx];
-
- spin_lock(&tbl->hashwlock[hash_idx]);
- hlist_for_each_entry(node, bucket, list) {
- mpath = node->mpath;
- if (mpath->sdata == sdata &&
- ether_addr_equal(addr, mpath->dst)) {
- __mesh_path_del(tbl, node);
- goto enddel;
- }
+
+ rcu_read_lock();
+ mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params);
+ if (!mpath) {
+ rcu_read_unlock();
+ return -ENXIO;
}
- err = -ENXIO;
-enddel:
- spin_unlock(&tbl->hashwlock[hash_idx]);
- return err;
+ __mesh_path_del(tbl, mpath);
+ rcu_read_unlock();
+ return 0;
}
+
/**
* mesh_path_del - delete a mesh path from the table
*
@@ -952,36 +712,13 @@ enddel:
*/
int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
{
- int err = 0;
+ int err;
/* flush relevant mpp entries first */
mpp_flush_by_proxy(sdata, addr);
- read_lock_bh(&pathtbl_resize_lock);
- err = table_path_del(mesh_paths, sdata, addr);
- mesh_paths_generation++;
- read_unlock_bh(&pathtbl_resize_lock);
-
- return err;
-}
-
-/**
- * mpp_path_del - delete a mesh proxy path from the table
- *
- * @addr: addr address (ETH_ALEN length)
- * @sdata: local subif
- *
- * Returns: 0 if successful
- */
-static int mpp_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
-{
- int err = 0;
-
- read_lock_bh(&pathtbl_resize_lock);
- err = table_path_del(mpp_paths, sdata, addr);
- mpp_paths_generation++;
- read_unlock_bh(&pathtbl_resize_lock);
-
+ err = table_path_del(sdata->u.mesh.mesh_paths, sdata, addr);
+ sdata->u.mesh.mesh_paths_generation++;
return err;
}
@@ -1015,39 +752,30 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
struct ieee80211_sub_if_data *sdata = mpath->sdata;
struct mesh_table *tbl;
struct mesh_path *from_mpath = mpath;
- struct mpath_node *gate = NULL;
+ struct mesh_path *gate;
bool copy = false;
- struct hlist_head *known_gates;
-
- rcu_read_lock();
- tbl = rcu_dereference(mesh_paths);
- known_gates = tbl->known_gates;
- rcu_read_unlock();
- if (!known_gates)
- return -EHOSTUNREACH;
+ tbl = sdata->u.mesh.mesh_paths;
- hlist_for_each_entry_rcu(gate, known_gates, list) {
- if (gate->mpath->sdata != sdata)
- continue;
-
- if (gate->mpath->flags & MESH_PATH_ACTIVE) {
- mpath_dbg(sdata, "Forwarding to %pM\n", gate->mpath->dst);
- mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
- from_mpath = gate->mpath;
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
+ if (gate->flags & MESH_PATH_ACTIVE) {
+ mpath_dbg(sdata, "Forwarding to %pM\n", gate->dst);
+ mesh_path_move_to_queue(gate, from_mpath, copy);
+ from_mpath = gate;
copy = true;
} else {
mpath_dbg(sdata,
"Not forwarding to %pM (flags %#x)\n",
- gate->mpath->dst, gate->mpath->flags);
+ gate->dst, gate->flags);
}
}
- hlist_for_each_entry_rcu(gate, known_gates, list)
- if (gate->mpath->sdata == sdata) {
- mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst);
- mesh_path_tx_pending(gate->mpath);
- }
+ hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
+ mpath_dbg(sdata, "Sending to %pM\n", gate->dst);
+ mesh_path_tx_pending(gate);
+ }
+ rcu_read_unlock();
return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
}
@@ -1104,118 +832,73 @@ void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
mesh_path_tx_pending(mpath);
}
-static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
-{
- struct mesh_path *mpath;
- struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
- mpath = node->mpath;
- hlist_del_rcu(p);
- if (free_leafs) {
- del_timer_sync(&mpath->timer);
- kfree(mpath);
- }
- kfree(node);
-}
-
-static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
-{
- struct mesh_path *mpath;
- struct mpath_node *node, *new_node;
- u32 hash_idx;
-
- new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
- if (new_node == NULL)
- return -ENOMEM;
-
- node = hlist_entry(p, struct mpath_node, list);
- mpath = node->mpath;
- new_node->mpath = mpath;
- hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
- hlist_add_head(&new_node->list,
- &newtbl->hash_buckets[hash_idx]);
- return 0;
-}
-
-int mesh_pathtbl_init(void)
+int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
{
struct mesh_table *tbl_path, *tbl_mpp;
int ret;
- tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
+ tbl_path = mesh_table_alloc();
if (!tbl_path)
return -ENOMEM;
- tbl_path->free_node = &mesh_path_node_free;
- tbl_path->copy_node = &mesh_path_node_copy;
- tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
- if (!tbl_path->known_gates) {
- ret = -ENOMEM;
- goto free_path;
- }
- INIT_HLIST_HEAD(tbl_path->known_gates);
-
- tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
+ tbl_mpp = mesh_table_alloc();
if (!tbl_mpp) {
ret = -ENOMEM;
goto free_path;
}
- tbl_mpp->free_node = &mesh_path_node_free;
- tbl_mpp->copy_node = &mesh_path_node_copy;
- tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
- if (!tbl_mpp->known_gates) {
- ret = -ENOMEM;
- goto free_mpp;
- }
- INIT_HLIST_HEAD(tbl_mpp->known_gates);
- /* Need no locking since this is during init */
- RCU_INIT_POINTER(mesh_paths, tbl_path);
- RCU_INIT_POINTER(mpp_paths, tbl_mpp);
+ rhashtable_init(&tbl_path->rhead, &mesh_rht_params);
+ rhashtable_init(&tbl_mpp->rhead, &mesh_rht_params);
+
+ sdata->u.mesh.mesh_paths = tbl_path;
+ sdata->u.mesh.mpp_paths = tbl_mpp;
return 0;
-free_mpp:
- mesh_table_free(tbl_mpp, true);
free_path:
- mesh_table_free(tbl_path, true);
+ mesh_table_free(tbl_path);
return ret;
}
-void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
+static
+void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
+ struct mesh_table *tbl)
{
- struct mesh_table *tbl;
struct mesh_path *mpath;
- struct mpath_node *node;
- int i;
+ struct rhashtable_iter iter;
+ int ret;
- rcu_read_lock();
- tbl = rcu_dereference(mesh_paths);
- for_each_mesh_entry(tbl, node, i) {
- if (node->mpath->sdata != sdata)
+ ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_KERNEL);
+ if (ret)
+ return;
+
+ ret = rhashtable_walk_start(&iter);
+ if (ret && ret != -EAGAIN)
+ goto out;
+
+ while ((mpath = rhashtable_walk_next(&iter))) {
+ if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
continue;
- mpath = node->mpath;
+ if (IS_ERR(mpath))
+ break;
if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
(!(mpath->flags & MESH_PATH_FIXED)) &&
time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
- mesh_path_del(mpath->sdata, mpath->dst);
- }
-
- tbl = rcu_dereference(mpp_paths);
- for_each_mesh_entry(tbl, node, i) {
- if (node->mpath->sdata != sdata)
- continue;
- mpath = node->mpath;
- if ((!(mpath->flags & MESH_PATH_FIXED)) &&
- time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
- mpp_path_del(mpath->sdata, mpath->dst);
+ __mesh_path_del(tbl, mpath);
}
+out:
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
+}
- rcu_read_unlock();
+void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
+{
+ mesh_path_tbl_expire(sdata, sdata->u.mesh.mesh_paths);
+ mesh_path_tbl_expire(sdata, sdata->u.mesh.mpp_paths);
}
-void mesh_pathtbl_unregister(void)
+void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata)
{
- /* no need for locking during exit path */
- mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true);
- mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true);
+ mesh_table_free(sdata->u.mesh.mesh_paths);
+ mesh_table_free(sdata->u.mesh.mpp_paths);
}
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index a07e93c21c9e..563bea050383 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -61,7 +61,7 @@ static bool rssi_threshold_check(struct ieee80211_sub_if_data *sdata,
s32 rssi_threshold = sdata->u.mesh.mshcfg.rssi_threshold;
return rssi_threshold == 0 ||
(sta &&
- (s8)-ewma_signal_read(&sta->rx_stats.avg_signal) >
+ (s8)-ewma_signal_read(&sta->rx_stats_avg.signal) >
rssi_threshold);
}
@@ -331,7 +331,9 @@ free:
*
* @sta: mesh peer link to deactivate
*
- * All mesh paths with this peer as next hop will be flushed
+ * Mesh paths with this peer as next hop should be flushed
+ * by the caller outside of plink_lock.
+ *
* Returns beacon changed flag if the beacon content changed.
*
* Locking: the caller must hold sta->mesh->plink_lock
@@ -346,7 +348,6 @@ static u32 __mesh_plink_deactivate(struct sta_info *sta)
if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
changed = mesh_plink_dec_estab_count(sdata);
sta->mesh->plink_state = NL80211_PLINK_BLOCKED;
- mesh_path_flush_by_nexthop(sta);
ieee80211_mps_sta_status_update(sta);
changed |= ieee80211_mps_set_sta_local_pm(sta,
@@ -374,6 +375,7 @@ u32 mesh_plink_deactivate(struct sta_info *sta)
sta->sta.addr, sta->mesh->llid, sta->mesh->plid,
sta->mesh->reason);
spin_unlock_bh(&sta->mesh->plink_lock);
+ mesh_path_flush_by_nexthop(sta);
return changed;
}
@@ -748,6 +750,7 @@ u32 mesh_plink_block(struct sta_info *sta)
changed = __mesh_plink_deactivate(sta);
sta->mesh->plink_state = NL80211_PLINK_BLOCKED;
spin_unlock_bh(&sta->mesh->plink_lock);
+ mesh_path_flush_by_nexthop(sta);
return changed;
}
@@ -797,6 +800,7 @@ static u32 mesh_plink_fsm(struct ieee80211_sub_if_data *sdata,
struct mesh_config *mshcfg = &sdata->u.mesh.mshcfg;
enum ieee80211_self_protected_actioncode action = 0;
u32 changed = 0;
+ bool flush = false;
mpl_dbg(sdata, "peer %pM in state %s got event %s\n", sta->sta.addr,
mplstates[sta->mesh->plink_state], mplevents[event]);
@@ -885,6 +889,7 @@ static u32 mesh_plink_fsm(struct ieee80211_sub_if_data *sdata,
changed |= mesh_set_short_slot_time(sdata);
mesh_plink_close(sdata, sta, event);
action = WLAN_SP_MESH_PEERING_CLOSE;
+ flush = true;
break;
case OPN_ACPT:
action = WLAN_SP_MESH_PEERING_CONFIRM;
@@ -916,6 +921,8 @@ static u32 mesh_plink_fsm(struct ieee80211_sub_if_data *sdata,
break;
}
spin_unlock_bh(&sta->mesh->plink_lock);
+ if (flush)
+ mesh_path_flush_by_nexthop(sta);
if (action) {
mesh_plink_frame_tx(sdata, sta, action, sta->sta.addr,
sta->mesh->llid, sta->mesh->plid,
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 281b8d6e5109..d3c75ac8a029 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -122,15 +122,16 @@ void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- if (unlikely(!sdata->u.mgd.associated))
+ if (unlikely(!ifmgd->associated))
return;
- ifmgd->probe_send_count = 0;
+ if (ifmgd->probe_send_count)
+ ifmgd->probe_send_count = 0;
if (ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR))
return;
- mod_timer(&sdata->u.mgd.conn_mon_timer,
+ mod_timer(&ifmgd->conn_mon_timer,
round_jiffies_up(jiffies + IEEE80211_CONNECTION_IDLE_TIME));
}
@@ -2216,6 +2217,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
const u8 *ssid;
u8 *dst = ifmgd->associated->bssid;
u8 unicast_limit = max(1, max_probe_tries - 3);
+ struct sta_info *sta;
/*
* Try sending broadcast probe requests for the last three
@@ -2234,6 +2236,14 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
*/
ifmgd->probe_send_count++;
+ if (dst) {
+ mutex_lock(&sdata->local->sta_mtx);
+ sta = sta_info_get(sdata, dst);
+ if (!WARN_ON(!sta))
+ ieee80211_check_fast_rx(sta);
+ mutex_unlock(&sdata->local->sta_mtx);
+ }
+
if (ieee80211_hw_check(&sdata->local->hw, REPORTS_TX_ACK_STATUS)) {
ifmgd->nullfunc_failed = false;
ieee80211_send_nullfunc(sdata->local, sdata, false);
diff --git a/net/mac80211/ocb.c b/net/mac80211/ocb.c
index 0be0aadfc559..88e6ebbbe24f 100644
--- a/net/mac80211/ocb.c
+++ b/net/mac80211/ocb.c
@@ -75,8 +75,6 @@ void ieee80211_ocb_rx_no_sta(struct ieee80211_sub_if_data *sdata,
if (!sta)
return;
- sta->rx_stats.last_rx = jiffies;
-
/* Add only mandatory rates for now */
sband = local->hw.wiphy->bands[band];
sta->sta.supp_rates[band] =
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index 624fe5b81615..8d3260785b94 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -96,9 +96,9 @@ static inline void rate_control_add_sta_debugfs(struct sta_info *sta)
{
#ifdef CONFIG_MAC80211_DEBUGFS
struct rate_control_ref *ref = sta->rate_ctrl;
- if (ref && sta->debugfs.dir && ref->ops->add_sta_debugfs)
+ if (ref && sta->debugfs_dir && ref->ops->add_sta_debugfs)
ref->ops->add_sta_debugfs(ref->priv, sta->rate_ctrl_priv,
- sta->debugfs.dir);
+ sta->debugfs_dir);
#endif
}
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 370d677b547b..d77a9a842338 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -883,6 +883,59 @@ minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
ratetbl->rate[offset].flags = flags;
}
+static inline int
+minstrel_ht_get_prob_ewma(struct minstrel_ht_sta *mi, int rate)
+{
+ int group = rate / MCS_GROUP_RATES;
+ rate %= MCS_GROUP_RATES;
+ return mi->groups[group].rates[rate].prob_ewma;
+}
+
+static int
+minstrel_ht_get_max_amsdu_len(struct minstrel_ht_sta *mi)
+{
+ int group = mi->max_prob_rate / MCS_GROUP_RATES;
+ const struct mcs_group *g = &minstrel_mcs_groups[group];
+ int rate = mi->max_prob_rate % MCS_GROUP_RATES;
+
+ /* Disable A-MSDU if max_prob_rate is bad */
+ if (mi->groups[group].rates[rate].prob_ewma < MINSTREL_FRAC(50, 100))
+ return 1;
+
+ /* If the rate is slower than single-stream MCS1, make A-MSDU limit small */
+ if (g->duration[rate] > MCS_DURATION(1, 0, 52))
+ return 500;
+
+ /*
+ * If the rate is slower than single-stream MCS4, limit A-MSDU to usual
+ * data packet size
+ */
+ if (g->duration[rate] > MCS_DURATION(1, 0, 104))
+ return 1600;
+
+ /*
+ * If the rate is slower than single-stream MCS7, or if the max throughput
+ * rate success probability is less than 75%, limit A-MSDU to twice the usual
+ * data packet size
+ */
+ if (g->duration[rate] > MCS_DURATION(1, 0, 260) ||
+ (minstrel_ht_get_prob_ewma(mi, mi->max_tp_rate[0]) <
+ MINSTREL_FRAC(75, 100)))
+ return 3200;
+
+ /*
+ * HT A-MPDU limits maximum MPDU size under BA agreement to 4095 bytes.
+ * Since aggregation sessions are started/stopped without txq flush, use
+ * the limit here to avoid the complexity of having to de-aggregate
+ * packets in the queue.
+ */
+ if (!mi->sta->vht_cap.vht_supported)
+ return IEEE80211_MAX_MPDU_LEN_HT_BA;
+
+ /* unlimited */
+ return 0;
+}
+
static void
minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
{
@@ -907,6 +960,7 @@ minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_prob_rate);
}
+ mi->sta->max_rc_amsdu_len = minstrel_ht_get_max_amsdu_len(mi);
rates->rate[i].idx = -1;
rate_control_set_rates(mp->hw, mi->sta, rates);
}
@@ -924,6 +978,7 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
struct minstrel_rate_stats *mrs;
struct minstrel_mcs_group_data *mg;
unsigned int sample_dur, sample_group, cur_max_tp_streams;
+ int tp_rate1, tp_rate2;
int sample_idx = 0;
if (mi->sample_wait > 0) {
@@ -945,14 +1000,22 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
mrs = &mg->rates[sample_idx];
sample_idx += sample_group * MCS_GROUP_RATES;
+ /* Set tp_rate1, tp_rate2 to the highest / second highest max_tp_rate */
+ if (minstrel_get_duration(mi->max_tp_rate[0]) >
+ minstrel_get_duration(mi->max_tp_rate[1])) {
+ tp_rate1 = mi->max_tp_rate[1];
+ tp_rate2 = mi->max_tp_rate[0];
+ } else {
+ tp_rate1 = mi->max_tp_rate[0];
+ tp_rate2 = mi->max_tp_rate[1];
+ }
+
/*
* Sampling might add some overhead (RTS, no aggregation)
- * to the frame. Hence, don't use sampling for the currently
- * used rates.
+ * to the frame. Hence, don't use sampling for the highest currently
+ * used highest throughput or probability rate.
*/
- if (sample_idx == mi->max_tp_rate[0] ||
- sample_idx == mi->max_tp_rate[1] ||
- sample_idx == mi->max_prob_rate)
+ if (sample_idx == mi->max_tp_rate[0] || sample_idx == mi->max_prob_rate)
return -1;
/*
@@ -967,10 +1030,10 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
* if the link is working perfectly.
*/
- cur_max_tp_streams = minstrel_mcs_groups[mi->max_tp_rate[0] /
+ cur_max_tp_streams = minstrel_mcs_groups[tp_rate1 /
MCS_GROUP_RATES].streams;
sample_dur = minstrel_get_duration(sample_idx);
- if (sample_dur >= minstrel_get_duration(mi->max_tp_rate[1]) &&
+ if (sample_dur >= minstrel_get_duration(tp_rate2) &&
(cur_max_tp_streams - 1 <
minstrel_mcs_groups[sample_group].streams ||
sample_dur >= minstrel_get_duration(mi->max_prob_rate))) {
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index dc27becb9b71..c2b659e9a9f9 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -722,8 +722,8 @@ static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
return -1;
}
-static int iwl80211_get_cs_keyid(const struct ieee80211_cipher_scheme *cs,
- struct sk_buff *skb)
+static int ieee80211_get_cs_keyid(const struct ieee80211_cipher_scheme *cs,
+ struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
__le16 fc;
@@ -1421,16 +1421,9 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
sta->rx_stats.last_rx = jiffies;
if (ieee80211_is_data(hdr->frame_control) &&
- !is_multicast_ether_addr(hdr->addr1)) {
- sta->rx_stats.last_rate_idx =
- status->rate_idx;
- sta->rx_stats.last_rate_flag =
- status->flag;
- sta->rx_stats.last_rate_vht_flag =
- status->vht_flag;
- sta->rx_stats.last_rate_vht_nss =
- status->vht_nss;
- }
+ !is_multicast_ether_addr(hdr->addr1))
+ sta->rx_stats.last_rate =
+ sta_stats_encode_rate(status);
}
} else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) {
sta->rx_stats.last_rx = jiffies;
@@ -1440,22 +1433,22 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
* match the current local configuration when processed.
*/
sta->rx_stats.last_rx = jiffies;
- if (ieee80211_is_data(hdr->frame_control)) {
- sta->rx_stats.last_rate_idx = status->rate_idx;
- sta->rx_stats.last_rate_flag = status->flag;
- sta->rx_stats.last_rate_vht_flag = status->vht_flag;
- sta->rx_stats.last_rate_vht_nss = status->vht_nss;
- }
+ if (ieee80211_is_data(hdr->frame_control))
+ sta->rx_stats.last_rate = sta_stats_encode_rate(status);
}
if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
ieee80211_sta_rx_notify(rx->sdata, hdr);
sta->rx_stats.fragments++;
+
+ u64_stats_update_begin(&rx->sta->rx_stats.syncp);
sta->rx_stats.bytes += rx->skb->len;
+ u64_stats_update_end(&rx->sta->rx_stats.syncp);
+
if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
sta->rx_stats.last_signal = status->signal;
- ewma_signal_add(&sta->rx_stats.avg_signal, -status->signal);
+ ewma_signal_add(&sta->rx_stats_avg.signal, -status->signal);
}
if (status->chains) {
@@ -1467,7 +1460,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
continue;
sta->rx_stats.chain_signal_last[i] = signal;
- ewma_signal_add(&sta->rx_stats.chain_signal_avg[i],
+ ewma_signal_add(&sta->rx_stats_avg.chain_signal[i],
-signal);
}
}
@@ -1586,7 +1579,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
if (ieee80211_has_protected(fc) && rx->sta->cipher_scheme) {
cs = rx->sta->cipher_scheme;
- keyid = iwl80211_get_cs_keyid(cs, rx->skb);
+ keyid = ieee80211_get_cs_keyid(cs, rx->skb);
if (unlikely(keyid < 0))
return RX_DROP_UNUSABLE;
}
@@ -1670,7 +1663,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
hdrlen = ieee80211_hdrlen(fc);
if (cs) {
- keyidx = iwl80211_get_cs_keyid(cs, rx->skb);
+ keyidx = ieee80211_get_cs_keyid(cs, rx->skb);
if (unlikely(keyidx < 0))
return RX_DROP_UNUSABLE;
@@ -2129,6 +2122,17 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
ieee80211_rx_stats(dev, skb->len);
+ if (rx->sta) {
+ /* The seqno index has the same property as needed
+ * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS
+ * for non-QoS-data frames. Here we know it's a data
+ * frame, so count MSDUs.
+ */
+ u64_stats_update_begin(&rx->sta->rx_stats.syncp);
+ rx->sta->rx_stats.msdu[rx->seqno_idx]++;
+ u64_stats_update_end(&rx->sta->rx_stats.syncp);
+ }
+
if ((sdata->vif.type == NL80211_IFTYPE_AP ||
sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
!(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
@@ -2415,15 +2419,6 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
return RX_DROP_MONITOR;
- if (rx->sta) {
- /* The seqno index has the same property as needed
- * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS
- * for non-QoS-data frames. Here we know it's a data
- * frame, so count MSDUs.
- */
- rx->sta->rx_stats.msdu[rx->seqno_idx]++;
- }
-
/*
* Send unexpected-4addr-frame event to hostapd. For older versions,
* also drop the frame to cooked monitor interfaces.
@@ -2474,14 +2469,14 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
rx->skb->dev = dev;
- if (local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 &&
+ if (!ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS) &&
+ local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 &&
!is_multicast_ether_addr(
((struct ethhdr *)rx->skb->data)->h_dest) &&
(!local->scanning &&
- !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))) {
- mod_timer(&local->dynamic_ps_timer, jiffies +
- msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
- }
+ !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)))
+ mod_timer(&local->dynamic_ps_timer, jiffies +
+ msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
ieee80211_deliver_skb(rx);
@@ -3201,7 +3196,7 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
res = rxh(rx); \
if (res != RX_CONTINUE) \
goto rxh_next; \
- } while (0);
+ } while (0)
/* Lock here to avoid hitting all of the data used in the RX
* path (e.g. key data, station data, ...) concurrently when
@@ -3219,30 +3214,30 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
*/
rx->skb = skb;
- CALL_RXH(ieee80211_rx_h_check_more_data)
- CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll)
- CALL_RXH(ieee80211_rx_h_sta_process)
- CALL_RXH(ieee80211_rx_h_decrypt)
- CALL_RXH(ieee80211_rx_h_defragment)
- CALL_RXH(ieee80211_rx_h_michael_mic_verify)
+ CALL_RXH(ieee80211_rx_h_check_more_data);
+ CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll);
+ CALL_RXH(ieee80211_rx_h_sta_process);
+ CALL_RXH(ieee80211_rx_h_decrypt);
+ CALL_RXH(ieee80211_rx_h_defragment);
+ CALL_RXH(ieee80211_rx_h_michael_mic_verify);
/* must be after MMIC verify so header is counted in MPDU mic */
#ifdef CONFIG_MAC80211_MESH
if (ieee80211_vif_is_mesh(&rx->sdata->vif))
CALL_RXH(ieee80211_rx_h_mesh_fwding);
#endif
- CALL_RXH(ieee80211_rx_h_amsdu)
- CALL_RXH(ieee80211_rx_h_data)
+ CALL_RXH(ieee80211_rx_h_amsdu);
+ CALL_RXH(ieee80211_rx_h_data);
/* special treatment -- needs the queue */
res = ieee80211_rx_h_ctrl(rx, frames);
if (res != RX_CONTINUE)
goto rxh_next;
- CALL_RXH(ieee80211_rx_h_mgmt_check)
- CALL_RXH(ieee80211_rx_h_action)
- CALL_RXH(ieee80211_rx_h_userspace_mgmt)
- CALL_RXH(ieee80211_rx_h_action_return)
- CALL_RXH(ieee80211_rx_h_mgmt)
+ CALL_RXH(ieee80211_rx_h_mgmt_check);
+ CALL_RXH(ieee80211_rx_h_action);
+ CALL_RXH(ieee80211_rx_h_userspace_mgmt);
+ CALL_RXH(ieee80211_rx_h_action_return);
+ CALL_RXH(ieee80211_rx_h_mgmt);
rxh_next:
ieee80211_rx_handlers_result(rx, res);
@@ -3265,10 +3260,10 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
res = rxh(rx); \
if (res != RX_CONTINUE) \
goto rxh_next; \
- } while (0);
+ } while (0)
- CALL_RXH(ieee80211_rx_h_check_dup)
- CALL_RXH(ieee80211_rx_h_check)
+ CALL_RXH(ieee80211_rx_h_check_dup);
+ CALL_RXH(ieee80211_rx_h_check);
ieee80211_rx_reorder_ampdu(rx, &reorder_release);
@@ -3513,6 +3508,351 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
return false;
}
+void ieee80211_check_fast_rx(struct sta_info *sta)
+{
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_key *key;
+ struct ieee80211_fast_rx fastrx = {
+ .dev = sdata->dev,
+ .vif_type = sdata->vif.type,
+ .control_port_protocol = sdata->control_port_protocol,
+ }, *old, *new = NULL;
+ bool assign = false;
+
+ /* use sparse to check that we don't return without updating */
+ __acquire(check_fast_rx);
+
+ BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != sizeof(rfc1042_header));
+ BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != ETH_ALEN);
+ ether_addr_copy(fastrx.rfc1042_hdr, rfc1042_header);
+ ether_addr_copy(fastrx.vif_addr, sdata->vif.addr);
+
+ fastrx.uses_rss = ieee80211_hw_check(&local->hw, USES_RSS);
+
+ /* fast-rx doesn't do reordering */
+ if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) &&
+ !ieee80211_hw_check(&local->hw, SUPPORTS_REORDERING_BUFFER))
+ goto clear;
+
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_STATION:
+ /* 4-addr is harder to deal with, later maybe */
+ if (sdata->u.mgd.use_4addr)
+ goto clear;
+ /* software powersave is a huge mess, avoid all of it */
+ if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK))
+ goto clear;
+ if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) &&
+ !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS))
+ goto clear;
+ if (sta->sta.tdls) {
+ fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1);
+ fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2);
+ fastrx.expected_ds_bits = 0;
+ } else {
+ fastrx.sta_notify = sdata->u.mgd.probe_send_count > 0;
+ fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1);
+ fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr3);
+ fastrx.expected_ds_bits =
+ cpu_to_le16(IEEE80211_FCTL_FROMDS);
+ }
+ break;
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_AP:
+ /* parallel-rx requires this, at least with calls to
+ * ieee80211_sta_ps_transition()
+ */
+ if (!ieee80211_hw_check(&local->hw, AP_LINK_PS))
+ goto clear;
+ fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3);
+ fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2);
+ fastrx.expected_ds_bits = cpu_to_le16(IEEE80211_FCTL_TODS);
+
+ fastrx.internal_forward =
+ !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
+ (sdata->vif.type != NL80211_IFTYPE_AP_VLAN ||
+ !sdata->u.vlan.sta);
+ break;
+ default:
+ goto clear;
+ }
+
+ if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
+ goto clear;
+
+ rcu_read_lock();
+ key = rcu_dereference(sta->ptk[sta->ptk_idx]);
+ if (key) {
+ switch (key->conf.cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ /* we don't want to deal with MMIC in fast-rx */
+ goto clear_rcu;
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ break;
+ default:
+ /* we also don't want to deal with WEP or cipher scheme
+ * since those require looking up the key idx in the
+ * frame, rather than assuming the PTK is used
+ * (we need to revisit this once we implement the real
+ * PTK index, which is now valid in the spec, but we
+ * haven't implemented that part yet)
+ */
+ goto clear_rcu;
+ }
+
+ fastrx.key = true;
+ fastrx.icv_len = key->conf.icv_len;
+ }
+
+ assign = true;
+ clear_rcu:
+ rcu_read_unlock();
+ clear:
+ __release(check_fast_rx);
+
+ if (assign)
+ new = kmemdup(&fastrx, sizeof(fastrx), GFP_KERNEL);
+
+ spin_lock_bh(&sta->lock);
+ old = rcu_dereference_protected(sta->fast_rx, true);
+ rcu_assign_pointer(sta->fast_rx, new);
+ spin_unlock_bh(&sta->lock);
+
+ if (old)
+ kfree_rcu(old, rcu_head);
+}
+
+void ieee80211_clear_fast_rx(struct sta_info *sta)
+{
+ struct ieee80211_fast_rx *old;
+
+ spin_lock_bh(&sta->lock);
+ old = rcu_dereference_protected(sta->fast_rx, true);
+ RCU_INIT_POINTER(sta->fast_rx, NULL);
+ spin_unlock_bh(&sta->lock);
+
+ if (old)
+ kfree_rcu(old, rcu_head);
+}
+
+void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct sta_info *sta;
+
+ lockdep_assert_held(&local->sta_mtx);
+
+ list_for_each_entry_rcu(sta, &local->sta_list, list) {
+ if (sdata != sta->sdata &&
+ (!sta->sdata->bss || sta->sdata->bss != sdata->bss))
+ continue;
+ ieee80211_check_fast_rx(sta);
+ }
+}
+
+void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+
+ mutex_lock(&local->sta_mtx);
+ __ieee80211_check_fast_rx_iface(sdata);
+ mutex_unlock(&local->sta_mtx);
+}
+
+static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
+ struct ieee80211_fast_rx *fast_rx)
+{
+ struct sk_buff *skb = rx->skb;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ struct sta_info *sta = rx->sta;
+ int orig_len = skb->len;
+ int snap_offs = ieee80211_hdrlen(hdr->frame_control);
+ struct {
+ u8 snap[sizeof(rfc1042_header)];
+ __be16 proto;
+ } *payload __aligned(2);
+ struct {
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
+ } addrs __aligned(2);
+ struct ieee80211_sta_rx_stats *stats = &sta->rx_stats;
+
+ if (fast_rx->uses_rss)
+ stats = this_cpu_ptr(sta->pcpu_rx_stats);
+
+ /* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write
+ * to a common data structure; drivers can implement that per queue
+ * but we don't have that information in mac80211
+ */
+ if (!(status->flag & RX_FLAG_DUP_VALIDATED))
+ return false;
+
+#define FAST_RX_CRYPT_FLAGS (RX_FLAG_PN_VALIDATED | RX_FLAG_DECRYPTED)
+
+ /* If using encryption, we also need to have:
+ * - PN_VALIDATED: similar, but the implementation is tricky
+ * - DECRYPTED: necessary for PN_VALIDATED
+ */
+ if (fast_rx->key &&
+ (status->flag & FAST_RX_CRYPT_FLAGS) != FAST_RX_CRYPT_FLAGS)
+ return false;
+
+ /* we don't deal with A-MSDU deaggregation here */
+ if (status->rx_flags & IEEE80211_RX_AMSDU)
+ return false;
+
+ if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
+ return false;
+
+ if (unlikely(ieee80211_is_frag(hdr)))
+ return false;
+
+ /* Since our interface address cannot be multicast, this
+ * implicitly also rejects multicast frames without the
+ * explicit check.
+ *
+ * We shouldn't get any *data* frames not addressed to us
+ * (AP mode will accept multicast *management* frames), but
+ * punting here will make it go through the full checks in
+ * ieee80211_accept_frame().
+ */
+ if (!ether_addr_equal(fast_rx->vif_addr, hdr->addr1))
+ return false;
+
+ if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS |
+ IEEE80211_FCTL_TODS)) !=
+ fast_rx->expected_ds_bits)
+ goto drop;
+
+ /* assign the key to drop unencrypted frames (later)
+ * and strip the IV/MIC if necessary
+ */
+ if (fast_rx->key && !(status->flag & RX_FLAG_IV_STRIPPED)) {
+ /* GCMP header length is the same */
+ snap_offs += IEEE80211_CCMP_HDR_LEN;
+ }
+
+ if (!pskb_may_pull(skb, snap_offs + sizeof(*payload)))
+ goto drop;
+ payload = (void *)(skb->data + snap_offs);
+
+ if (!ether_addr_equal(payload->snap, fast_rx->rfc1042_hdr))
+ return false;
+
+ /* Don't handle these here since they require special code.
+ * Accept AARP and IPX even though they should come with a
+ * bridge-tunnel header - but if we get them this way then
+ * there's little point in discarding them.
+ */
+ if (unlikely(payload->proto == cpu_to_be16(ETH_P_TDLS) ||
+ payload->proto == fast_rx->control_port_protocol))
+ return false;
+
+ /* after this point, don't punt to the slowpath! */
+
+ if (rx->key && !(status->flag & RX_FLAG_MIC_STRIPPED) &&
+ pskb_trim(skb, skb->len - fast_rx->icv_len))
+ goto drop;
+
+ if (unlikely(fast_rx->sta_notify)) {
+ ieee80211_sta_rx_notify(rx->sdata, hdr);
+ fast_rx->sta_notify = false;
+ }
+
+ /* statistics part of ieee80211_rx_h_sta_process() */
+ stats->last_rx = jiffies;
+ stats->last_rate = sta_stats_encode_rate(status);
+
+ stats->fragments++;
+
+ if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
+ stats->last_signal = status->signal;
+ if (!fast_rx->uses_rss)
+ ewma_signal_add(&sta->rx_stats_avg.signal,
+ -status->signal);
+ }
+
+ if (status->chains) {
+ int i;
+
+ stats->chains = status->chains;
+ for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
+ int signal = status->chain_signal[i];
+
+ if (!(status->chains & BIT(i)))
+ continue;
+
+ stats->chain_signal_last[i] = signal;
+ if (!fast_rx->uses_rss)
+ ewma_signal_add(&sta->rx_stats_avg.chain_signal[i],
+ -signal);
+ }
+ }
+ /* end of statistics */
+
+ if (rx->key && !ieee80211_has_protected(hdr->frame_control))
+ goto drop;
+
+ /* do the header conversion - first grab the addresses */
+ ether_addr_copy(addrs.da, skb->data + fast_rx->da_offs);
+ ether_addr_copy(addrs.sa, skb->data + fast_rx->sa_offs);
+ /* remove the SNAP but leave the ethertype */
+ skb_pull(skb, snap_offs + sizeof(rfc1042_header));
+ /* push the addresses in front */
+ memcpy(skb_push(skb, sizeof(addrs)), &addrs, sizeof(addrs));
+
+ skb->dev = fast_rx->dev;
+
+ ieee80211_rx_stats(fast_rx->dev, skb->len);
+
+ /* The seqno index has the same property as needed
+ * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS
+ * for non-QoS-data frames. Here we know it's a data
+ * frame, so count MSDUs.
+ */
+ u64_stats_update_begin(&stats->syncp);
+ stats->msdu[rx->seqno_idx]++;
+ stats->bytes += orig_len;
+ u64_stats_update_end(&stats->syncp);
+
+ if (fast_rx->internal_forward) {
+ struct sta_info *dsta = sta_info_get(rx->sdata, skb->data);
+
+ if (dsta) {
+ /*
+ * Send to wireless media and increase priority by 256
+ * to keep the received priority instead of
+ * reclassifying the frame (see cfg80211_classify8021d).
+ */
+ skb->priority += 256;
+ skb->protocol = htons(ETH_P_802_3);
+ skb_reset_network_header(skb);
+ skb_reset_mac_header(skb);
+ dev_queue_xmit(skb);
+ return true;
+ }
+ }
+
+ /* deliver to local stack */
+ skb->protocol = eth_type_trans(skb, fast_rx->dev);
+ memset(skb->cb, 0, sizeof(skb->cb));
+ if (rx->napi)
+ napi_gro_receive(rx->napi, skb);
+ else
+ netif_receive_skb(skb);
+
+ return true;
+ drop:
+ dev_kfree_skb(skb);
+ stats->dropped++;
+ return true;
+}
+
/*
* This function returns whether or not the SKB
* was destined for RX processing or not, which,
@@ -3527,6 +3867,21 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
rx->skb = skb;
+ /* See if we can do fast-rx; if we have to copy we already lost,
+ * so punt in that case. We should never have to deliver a data
+ * frame to multiple interfaces anyway.
+ *
+ * We skip the ieee80211_accept_frame() call and do the necessary
+ * checking inside ieee80211_invoke_fast_rx().
+ */
+ if (consume && rx->sta) {
+ struct ieee80211_fast_rx *fast_rx;
+
+ fast_rx = rcu_dereference(rx->sta->fast_rx);
+ if (fast_rx && ieee80211_invoke_fast_rx(rx, fast_rx))
+ return true;
+ }
+
if (!ieee80211_accept_frame(rx))
return false;
@@ -3552,6 +3907,7 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
* be called with rcu_read_lock protection.
*/
static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
+ struct ieee80211_sta *pubsta,
struct sk_buff *skb,
struct napi_struct *napi)
{
@@ -3561,7 +3917,6 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
__le16 fc;
struct ieee80211_rx_data rx;
struct ieee80211_sub_if_data *prev;
- struct sta_info *sta, *prev_sta;
struct rhash_head *tmp;
int err = 0;
@@ -3597,7 +3952,14 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
ieee80211_is_beacon(hdr->frame_control)))
ieee80211_scan_rx(local, skb);
- if (ieee80211_is_data(fc)) {
+ if (pubsta) {
+ rx.sta = container_of(pubsta, struct sta_info, sta);
+ rx.sdata = rx.sta->sdata;
+ if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
+ return;
+ goto out;
+ } else if (ieee80211_is_data(fc)) {
+ struct sta_info *sta, *prev_sta;
const struct bucket_table *tbl;
prev_sta = NULL;
@@ -3671,8 +4033,8 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
* This is the receive path handler. It is called by a low level driver when an
* 802.11 MPDU is received from the hardware.
*/
-void ieee80211_rx_napi(struct ieee80211_hw *hw, struct sk_buff *skb,
- struct napi_struct *napi)
+void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
+ struct sk_buff *skb, struct napi_struct *napi)
{
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_rate *rate = NULL;
@@ -3771,7 +4133,8 @@ void ieee80211_rx_napi(struct ieee80211_hw *hw, struct sk_buff *skb,
ieee80211_tpt_led_trig_rx(local,
((struct ieee80211_hdr *)skb->data)->frame_control,
skb->len);
- __ieee80211_rx_handle_packet(hw, skb, napi);
+
+ __ieee80211_rx_handle_packet(hw, pubsta, skb, napi);
rcu_read_unlock();
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index ae980ce8daff..41aa728e5468 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -66,7 +66,9 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
struct cfg80211_bss *cbss;
struct ieee80211_bss *bss;
int clen, srlen;
- struct cfg80211_inform_bss bss_meta = {};
+ struct cfg80211_inform_bss bss_meta = {
+ .boottime_ns = rx_status->boottime_ns,
+ };
bool signal_valid;
if (ieee80211_hw_check(&local->hw, SIGNAL_DBM))
@@ -303,6 +305,7 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
ether_addr_copy(local->hw_scan_req->req.mac_addr, req->mac_addr);
ether_addr_copy(local->hw_scan_req->req.mac_addr_mask,
req->mac_addr_mask);
+ ether_addr_copy(local->hw_scan_req->req.bssid, req->bssid);
return true;
}
@@ -497,7 +500,7 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
for (i = 0; i < scan_req->n_ssids; i++)
ieee80211_send_probe_req(
- sdata, local->scan_addr, NULL,
+ sdata, local->scan_addr, scan_req->bssid,
scan_req->ssids[i].ssid, scan_req->ssids[i].ssid_len,
scan_req->ie, scan_req->ie_len,
scan_req->rates[band], false,
@@ -562,6 +565,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
req->n_channels * sizeof(req->channels[0]);
local->hw_scan_req->req.ie = ies;
local->hw_scan_req->req.flags = req->flags;
+ eth_broadcast_addr(local->hw_scan_req->req.bssid);
local->hw_scan_band = 0;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index d20bab5c146c..5ccfdbd406bd 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -2,7 +2,7 @@
* Copyright 2002-2005, Instant802 Networks, Inc.
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2013-2014 Intel Mobile Communications GmbH
- * Copyright (C) 2015 Intel Deutschland GmbH
+ * Copyright (C) 2015 - 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -67,6 +67,7 @@
static const struct rhashtable_params sta_rht_params = {
.nelem_hint = 3, /* start small */
+ .insecure_elasticity = true, /* Disable chain-length checks. */
.automatic_shrinking = true,
.head_offset = offsetof(struct sta_info, hash_node),
.key_offset = offsetof(struct sta_info, addr),
@@ -254,15 +255,16 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
#ifdef CONFIG_MAC80211_MESH
kfree(sta->mesh);
#endif
+ free_percpu(sta->pcpu_rx_stats);
kfree(sta);
}
/* Caller must hold local->sta_mtx */
-static void sta_info_hash_add(struct ieee80211_local *local,
- struct sta_info *sta)
+static int sta_info_hash_add(struct ieee80211_local *local,
+ struct sta_info *sta)
{
- rhashtable_insert_fast(&local->sta_hash, &sta->hash_node,
- sta_rht_params);
+ return rhashtable_insert_fast(&local->sta_hash, &sta->hash_node,
+ sta_rht_params);
}
static void sta_deliver_ps_frames(struct work_struct *wk)
@@ -311,6 +313,13 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
if (!sta)
return NULL;
+ if (ieee80211_hw_check(hw, USES_RSS)) {
+ sta->pcpu_rx_stats =
+ alloc_percpu(struct ieee80211_sta_rx_stats);
+ if (!sta->pcpu_rx_stats)
+ goto free;
+ }
+
spin_lock_init(&sta->lock);
spin_lock_init(&sta->ps_lock);
INIT_WORK(&sta->drv_deliver_wk, sta_deliver_ps_frames);
@@ -335,15 +344,17 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
sta->sdata = sdata;
sta->rx_stats.last_rx = jiffies;
+ u64_stats_init(&sta->rx_stats.syncp);
+
sta->sta_state = IEEE80211_STA_NONE;
/* Mark TID as unreserved */
sta->reserved_tid = IEEE80211_TID_UNRESERVED;
sta->last_connected = ktime_get_seconds();
- ewma_signal_init(&sta->rx_stats.avg_signal);
- for (i = 0; i < ARRAY_SIZE(sta->rx_stats.chain_signal_avg); i++)
- ewma_signal_init(&sta->rx_stats.chain_signal_avg[i]);
+ ewma_signal_init(&sta->rx_stats_avg.signal);
+ for (i = 0; i < ARRAY_SIZE(sta->rx_stats_avg.chain_signal); i++)
+ ewma_signal_init(&sta->rx_stats_avg.chain_signal[i]);
if (local->ops->wake_tx_queue) {
void *txq_data;
@@ -406,6 +417,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
}
}
+ sta->sta.max_rc_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_BA;
+
sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr);
return sta;
@@ -524,7 +537,9 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
set_sta_flag(sta, WLAN_STA_BLOCK_BA);
/* make the station visible */
- sta_info_hash_add(local, sta);
+ err = sta_info_hash_add(local, sta);
+ if (err)
+ goto out_drop_sta;
list_add_tail_rcu(&sta->list, &local->sta_list);
@@ -557,6 +572,7 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
out_remove:
sta_info_hash_del(local, sta);
list_del_rcu(&sta->list);
+ out_drop_sta:
local->num_sta--;
synchronize_net();
__cleanup_single_sta(sta);
@@ -875,6 +891,13 @@ static int __must_check __sta_info_destroy_part1(struct sta_info *sta)
set_sta_flag(sta, WLAN_STA_BLOCK_BA);
ieee80211_sta_tear_down_BA_sessions(sta, AGG_STOP_DESTROY_STA);
+ /*
+ * Before removing the station from the driver there might be pending
+ * rx frames on RSS queues sent prior to the disassociation - wait for
+ * all such frames to be processed.
+ */
+ drv_sync_rx_queues(local, sta);
+
ret = sta_info_hash_del(local, sta);
if (WARN_ON(ret))
return ret;
@@ -1087,10 +1110,12 @@ void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
mutex_lock(&local->sta_mtx);
list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
+ unsigned long last_active = ieee80211_sta_last_active(sta);
+
if (sdata != sta->sdata)
continue;
- if (time_after(jiffies, sta->rx_stats.last_rx + exp_time)) {
+ if (time_is_before_jiffies(last_active + exp_time)) {
sta_dbg(sta->sdata, "expiring inactive STA %pM\n",
sta->sta.addr);
@@ -1760,6 +1785,31 @@ void ieee80211_sta_set_buffered(struct ieee80211_sta *pubsta,
}
EXPORT_SYMBOL(ieee80211_sta_set_buffered);
+static void
+ieee80211_recalc_p2p_go_ps_allowed(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ bool allow_p2p_go_ps = sdata->vif.p2p;
+ struct sta_info *sta;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(sta, &local->sta_list, list) {
+ if (sdata != sta->sdata ||
+ !test_sta_flag(sta, WLAN_STA_ASSOC))
+ continue;
+ if (!sta->sta.support_p2p_ps) {
+ allow_p2p_go_ps = false;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ if (allow_p2p_go_ps != sdata->vif.bss_conf.allow_p2p_go_ps) {
+ sdata->vif.bss_conf.allow_p2p_go_ps = allow_p2p_go_ps;
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_P2P_PS);
+ }
+}
+
int sta_info_move_state(struct sta_info *sta,
enum ieee80211_sta_state new_state)
{
@@ -1821,12 +1871,16 @@ int sta_info_move_state(struct sta_info *sta,
} else if (sta->sta_state == IEEE80211_STA_ASSOC) {
clear_bit(WLAN_STA_ASSOC, &sta->_flags);
ieee80211_recalc_min_chandef(sta->sdata);
+ if (!sta->sta.support_p2p_ps)
+ ieee80211_recalc_p2p_go_ps_allowed(sta->sdata);
}
break;
case IEEE80211_STA_ASSOC:
if (sta->sta_state == IEEE80211_STA_AUTH) {
set_bit(WLAN_STA_ASSOC, &sta->_flags);
ieee80211_recalc_min_chandef(sta->sdata);
+ if (!sta->sta.support_p2p_ps)
+ ieee80211_recalc_p2p_go_ps_allowed(sta->sdata);
} else if (sta->sta_state == IEEE80211_STA_AUTHORIZED) {
if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
(sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
@@ -1834,6 +1888,7 @@ int sta_info_move_state(struct sta_info *sta,
atomic_dec(&sta->sdata->bss->num_mcast_sta);
clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags);
ieee80211_clear_fast_xmit(sta);
+ ieee80211_clear_fast_rx(sta);
}
break;
case IEEE80211_STA_AUTHORIZED:
@@ -1844,6 +1899,7 @@ int sta_info_move_state(struct sta_info *sta,
atomic_inc(&sta->sdata->bss->num_mcast_sta);
set_bit(WLAN_STA_AUTHORIZED, &sta->_flags);
ieee80211_check_fast_xmit(sta);
+ ieee80211_check_fast_rx(sta);
}
break;
default:
@@ -1890,43 +1946,117 @@ u8 sta_info_tx_streams(struct sta_info *sta)
>> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT) + 1;
}
-static void sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo)
+static struct ieee80211_sta_rx_stats *
+sta_get_last_rx_stats(struct sta_info *sta)
{
- rinfo->flags = 0;
-
- if (sta->rx_stats.last_rate_flag & RX_FLAG_HT) {
- rinfo->flags |= RATE_INFO_FLAGS_MCS;
- rinfo->mcs = sta->rx_stats.last_rate_idx;
- } else if (sta->rx_stats.last_rate_flag & RX_FLAG_VHT) {
- rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS;
- rinfo->nss = sta->rx_stats.last_rate_vht_nss;
- rinfo->mcs = sta->rx_stats.last_rate_idx;
- } else {
+ struct ieee80211_sta_rx_stats *stats = &sta->rx_stats;
+ struct ieee80211_local *local = sta->local;
+ int cpu;
+
+ if (!ieee80211_hw_check(&local->hw, USES_RSS))
+ return stats;
+
+ for_each_possible_cpu(cpu) {
+ struct ieee80211_sta_rx_stats *cpustats;
+
+ cpustats = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
+
+ if (time_after(cpustats->last_rx, stats->last_rx))
+ stats = cpustats;
+ }
+
+ return stats;
+}
+
+static void sta_stats_decode_rate(struct ieee80211_local *local, u16 rate,
+ struct rate_info *rinfo)
+{
+ rinfo->bw = (rate & STA_STATS_RATE_BW_MASK) >>
+ STA_STATS_RATE_BW_SHIFT;
+
+ if (rate & STA_STATS_RATE_VHT) {
+ rinfo->flags = RATE_INFO_FLAGS_VHT_MCS;
+ rinfo->mcs = rate & 0xf;
+ rinfo->nss = (rate & 0xf0) >> 4;
+ } else if (rate & STA_STATS_RATE_HT) {
+ rinfo->flags = RATE_INFO_FLAGS_MCS;
+ rinfo->mcs = rate & 0xff;
+ } else if (rate & STA_STATS_RATE_LEGACY) {
struct ieee80211_supported_band *sband;
- int shift = ieee80211_vif_get_shift(&sta->sdata->vif);
u16 brate;
-
- sband = sta->local->hw.wiphy->bands[
- ieee80211_get_sdata_band(sta->sdata)];
- brate = sband->bitrates[sta->rx_stats.last_rate_idx].bitrate;
+ unsigned int shift;
+
+ sband = local->hw.wiphy->bands[(rate >> 4) & 0xf];
+ brate = sband->bitrates[rate & 0xf].bitrate;
+ if (rinfo->bw == RATE_INFO_BW_5)
+ shift = 2;
+ else if (rinfo->bw == RATE_INFO_BW_10)
+ shift = 1;
+ else
+ shift = 0;
rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift);
}
- if (sta->rx_stats.last_rate_flag & RX_FLAG_SHORT_GI)
+ if (rate & STA_STATS_RATE_SGI)
rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
+}
+
+static void sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo)
+{
+ u16 rate = ACCESS_ONCE(sta_get_last_rx_stats(sta)->last_rate);
- if (sta->rx_stats.last_rate_flag & RX_FLAG_5MHZ)
- rinfo->bw = RATE_INFO_BW_5;
- else if (sta->rx_stats.last_rate_flag & RX_FLAG_10MHZ)
- rinfo->bw = RATE_INFO_BW_10;
- else if (sta->rx_stats.last_rate_flag & RX_FLAG_40MHZ)
- rinfo->bw = RATE_INFO_BW_40;
- else if (sta->rx_stats.last_rate_vht_flag & RX_VHT_FLAG_80MHZ)
- rinfo->bw = RATE_INFO_BW_80;
- else if (sta->rx_stats.last_rate_vht_flag & RX_VHT_FLAG_160MHZ)
- rinfo->bw = RATE_INFO_BW_160;
+ if (rate == STA_STATS_RATE_INVALID)
+ rinfo->flags = 0;
else
- rinfo->bw = RATE_INFO_BW_20;
+ sta_stats_decode_rate(sta->local, rate, rinfo);
+}
+
+static void sta_set_tidstats(struct sta_info *sta,
+ struct cfg80211_tid_stats *tidstats,
+ int tid)
+{
+ struct ieee80211_local *local = sta->local;
+
+ if (!(tidstats->filled & BIT(NL80211_TID_STATS_RX_MSDU))) {
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&sta->rx_stats.syncp);
+ tidstats->rx_msdu = sta->rx_stats.msdu[tid];
+ } while (u64_stats_fetch_retry(&sta->rx_stats.syncp, start));
+
+ tidstats->filled |= BIT(NL80211_TID_STATS_RX_MSDU);
+ }
+
+ if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU))) {
+ tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU);
+ tidstats->tx_msdu = sta->tx_stats.msdu[tid];
+ }
+
+ if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_RETRIES)) &&
+ ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
+ tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_RETRIES);
+ tidstats->tx_msdu_retries = sta->status_stats.msdu_retries[tid];
+ }
+
+ if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_FAILED)) &&
+ ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
+ tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_FAILED);
+ tidstats->tx_msdu_failed = sta->status_stats.msdu_failed[tid];
+ }
+}
+
+static inline u64 sta_get_stats_bytes(struct ieee80211_sta_rx_stats *rxstats)
+{
+ unsigned int start;
+ u64 value;
+
+ do {
+ start = u64_stats_fetch_begin(&rxstats->syncp);
+ value = rxstats->bytes;
+ } while (u64_stats_fetch_retry(&rxstats->syncp, start));
+
+ return value;
}
void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
@@ -1935,7 +2065,10 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
struct ieee80211_local *local = sdata->local;
struct rate_control_ref *ref = NULL;
u32 thr = 0;
- int i, ac;
+ int i, ac, cpu;
+ struct ieee80211_sta_rx_stats *last_rxstats;
+
+ last_rxstats = sta_get_last_rx_stats(sta);
if (test_sta_flag(sta, WLAN_STA_RATE_CONTROL))
ref = local->rate_ctrl;
@@ -1964,7 +2097,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
sinfo->connected_time = ktime_get_seconds() - sta->last_connected;
sinfo->inactive_time =
- jiffies_to_msecs(jiffies - sta->rx_stats.last_rx);
+ jiffies_to_msecs(jiffies - ieee80211_sta_last_active(sta));
if (!(sinfo->filled & (BIT(NL80211_STA_INFO_TX_BYTES64) |
BIT(NL80211_STA_INFO_TX_BYTES)))) {
@@ -1983,12 +2116,30 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
if (!(sinfo->filled & (BIT(NL80211_STA_INFO_RX_BYTES64) |
BIT(NL80211_STA_INFO_RX_BYTES)))) {
- sinfo->rx_bytes = sta->rx_stats.bytes;
+ sinfo->rx_bytes += sta_get_stats_bytes(&sta->rx_stats);
+
+ if (sta->pcpu_rx_stats) {
+ for_each_possible_cpu(cpu) {
+ struct ieee80211_sta_rx_stats *cpurxs;
+
+ cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
+ sinfo->rx_bytes += sta_get_stats_bytes(cpurxs);
+ }
+ }
+
sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES64);
}
if (!(sinfo->filled & BIT(NL80211_STA_INFO_RX_PACKETS))) {
sinfo->rx_packets = sta->rx_stats.packets;
+ if (sta->pcpu_rx_stats) {
+ for_each_possible_cpu(cpu) {
+ struct ieee80211_sta_rx_stats *cpurxs;
+
+ cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
+ sinfo->rx_packets += cpurxs->packets;
+ }
+ }
sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS);
}
@@ -2003,6 +2154,14 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
}
sinfo->rx_dropped_misc = sta->rx_stats.dropped;
+ if (sta->pcpu_rx_stats) {
+ for_each_possible_cpu(cpu) {
+ struct ieee80211_sta_rx_stats *cpurxs;
+
+ cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
+ sinfo->rx_packets += cpurxs->dropped;
+ }
+ }
if (sdata->vif.type == NL80211_IFTYPE_STATION &&
!(sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)) {
@@ -2014,29 +2173,36 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
if (ieee80211_hw_check(&sta->local->hw, SIGNAL_DBM) ||
ieee80211_hw_check(&sta->local->hw, SIGNAL_UNSPEC)) {
if (!(sinfo->filled & BIT(NL80211_STA_INFO_SIGNAL))) {
- sinfo->signal = (s8)sta->rx_stats.last_signal;
+ sinfo->signal = (s8)last_rxstats->last_signal;
sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
}
- if (!(sinfo->filled & BIT(NL80211_STA_INFO_SIGNAL_AVG))) {
+ if (!sta->pcpu_rx_stats &&
+ !(sinfo->filled & BIT(NL80211_STA_INFO_SIGNAL_AVG))) {
sinfo->signal_avg =
- -ewma_signal_read(&sta->rx_stats.avg_signal);
+ -ewma_signal_read(&sta->rx_stats_avg.signal);
sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG);
}
}
- if (sta->rx_stats.chains &&
+ /* for the average - if pcpu_rx_stats isn't set - rxstats must point to
+ * the sta->rx_stats struct, so the check here is fine with and without
+ * pcpu statistics
+ */
+ if (last_rxstats->chains &&
!(sinfo->filled & (BIT(NL80211_STA_INFO_CHAIN_SIGNAL) |
BIT(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)))) {
- sinfo->filled |= BIT(NL80211_STA_INFO_CHAIN_SIGNAL) |
- BIT(NL80211_STA_INFO_CHAIN_SIGNAL_AVG);
+ sinfo->filled |= BIT(NL80211_STA_INFO_CHAIN_SIGNAL);
+ if (!sta->pcpu_rx_stats)
+ sinfo->filled |= BIT(NL80211_STA_INFO_CHAIN_SIGNAL_AVG);
+
+ sinfo->chains = last_rxstats->chains;
- sinfo->chains = sta->rx_stats.chains;
for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) {
sinfo->chain_signal[i] =
- sta->rx_stats.chain_signal_last[i];
+ last_rxstats->chain_signal_last[i];
sinfo->chain_signal_avg[i] =
- -ewma_signal_read(&sta->rx_stats.chain_signal_avg[i]);
+ -ewma_signal_read(&sta->rx_stats_avg.chain_signal[i]);
}
}
@@ -2055,33 +2221,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) {
struct cfg80211_tid_stats *tidstats = &sinfo->pertid[i];
- if (!(tidstats->filled & BIT(NL80211_TID_STATS_RX_MSDU))) {
- tidstats->filled |= BIT(NL80211_TID_STATS_RX_MSDU);
- tidstats->rx_msdu = sta->rx_stats.msdu[i];
- }
-
- if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU))) {
- tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU);
- tidstats->tx_msdu = sta->tx_stats.msdu[i];
- }
-
- if (!(tidstats->filled &
- BIT(NL80211_TID_STATS_TX_MSDU_RETRIES)) &&
- ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
- tidstats->filled |=
- BIT(NL80211_TID_STATS_TX_MSDU_RETRIES);
- tidstats->tx_msdu_retries =
- sta->status_stats.msdu_retries[i];
- }
-
- if (!(tidstats->filled &
- BIT(NL80211_TID_STATS_TX_MSDU_FAILED)) &&
- ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
- tidstats->filled |=
- BIT(NL80211_TID_STATS_TX_MSDU_FAILED);
- tidstats->tx_msdu_failed =
- sta->status_stats.msdu_failed[i];
- }
+ sta_set_tidstats(sta, tidstats, i);
}
if (ieee80211_vif_is_mesh(&sdata->vif)) {
@@ -2150,3 +2290,12 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
sinfo->expected_throughput = thr;
}
}
+
+unsigned long ieee80211_sta_last_active(struct sta_info *sta)
+{
+ struct ieee80211_sta_rx_stats *stats = sta_get_last_rx_stats(sta);
+
+ if (time_after(stats->last_rx, sta->status_stats.last_ack))
+ return stats->last_rx;
+ return sta->status_stats.last_ack;
+}
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 053f5c4fa495..c8b8ccc370eb 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -1,7 +1,7 @@
/*
* Copyright 2002-2005, Devicescape Software, Inc.
* Copyright 2013-2014 Intel Mobile Communications GmbH
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015-2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -18,6 +18,7 @@
#include <linux/average.h>
#include <linux/etherdevice.h>
#include <linux/rhashtable.h>
+#include <linux/u64_stats_sync.h>
#include "key.h"
/**
@@ -69,6 +70,8 @@
* @WLAN_STA_MPSP_RECIPIENT: local STA is recipient of a MPSP.
* @WLAN_STA_PS_DELIVER: station woke up, but we're still blocking TX
* until pending frames are delivered
+ *
+ * @NUM_WLAN_STA_FLAGS: number of defined flags
*/
enum ieee80211_sta_info_flags {
WLAN_STA_AUTH,
@@ -97,6 +100,8 @@ enum ieee80211_sta_info_flags {
WLAN_STA_MPSP_OWNER,
WLAN_STA_MPSP_RECIPIENT,
WLAN_STA_PS_DELIVER,
+
+ NUM_WLAN_STA_FLAGS,
};
#define ADDBA_RESP_INTERVAL HZ
@@ -281,6 +286,40 @@ struct ieee80211_fast_tx {
};
/**
+ * struct ieee80211_fast_rx - RX fastpath information
+ * @dev: netdevice for reporting the SKB
+ * @vif_type: (P2P-less) interface type of the original sdata (sdata->vif.type)
+ * @vif_addr: interface address
+ * @rfc1042_hdr: copy of the RFC 1042 SNAP header (to have in cache)
+ * @control_port_protocol: control port protocol copied from sdata
+ * @expected_ds_bits: from/to DS bits expected
+ * @icv_len: length of the MIC if present
+ * @key: bool indicating encryption is expected (key is set)
+ * @sta_notify: notify the MLME code (once)
+ * @internal_forward: forward froms internally on AP/VLAN type interfaces
+ * @uses_rss: copy of USES_RSS hw flag
+ * @da_offs: offset of the DA in the header (for header conversion)
+ * @sa_offs: offset of the SA in the header (for header conversion)
+ * @rcu_head: RCU head for freeing this structure
+ */
+struct ieee80211_fast_rx {
+ struct net_device *dev;
+ enum nl80211_iftype vif_type;
+ u8 vif_addr[ETH_ALEN] __aligned(2);
+ u8 rfc1042_hdr[6] __aligned(2);
+ __be16 control_port_protocol;
+ __le16 expected_ds_bits;
+ u8 icv_len;
+ u8 key:1,
+ sta_notify:1,
+ internal_forward:1,
+ uses_rss:1;
+ u8 da_offs, sa_offs;
+
+ struct rcu_head rcu_head;
+};
+
+/**
* struct mesh_sta - mesh STA information
* @plink_lock: serialize access to plink fields
* @llid: Local link ID
@@ -330,6 +369,21 @@ struct mesh_sta {
DECLARE_EWMA(signal, 1024, 8)
+struct ieee80211_sta_rx_stats {
+ unsigned long packets;
+ unsigned long last_rx;
+ unsigned long num_duplicates;
+ unsigned long fragments;
+ unsigned long dropped;
+ int last_signal;
+ u8 chains;
+ s8 chain_signal_last[IEEE80211_MAX_CHAINS];
+ u16 last_rate;
+ struct u64_stats_sync syncp;
+ u64 bytes;
+ u64 msdu[IEEE80211_NUM_TIDS + 1];
+};
+
/**
* struct sta_info - STA information
*
@@ -371,13 +425,12 @@ DECLARE_EWMA(signal, 1024, 8)
* @ampdu_mlme: A-MPDU state machine state
* @timer_to_tid: identity mapping to ID timers
* @mesh: mesh STA information
- * @debugfs: debug filesystem info
+ * @debugfs_dir: debug filesystem directory dentry
* @dead: set to true when sta is unlinked
* @removed: set to true when sta is being removed from sta_list
* @uploaded: set to true when sta is uploaded to the driver
* @sta: station information we share with the driver
* @sta_state: duplicates information about station state (for debug)
- * @beacon_loss_count: number of times beacon loss has triggered
* @rcu_head: RCU head used for freeing this station struct
* @cur_max_bandwidth: maximum bandwidth to use for TX to the station,
* taken from HT/VHT capabilities or VHT operating mode notification
@@ -386,10 +439,13 @@ DECLARE_EWMA(signal, 1024, 8)
* @cipher_scheme: optional cipher scheme for this station
* @reserved_tid: reserved TID (if any, otherwise IEEE80211_TID_UNRESERVED)
* @fast_tx: TX fastpath information
+ * @fast_rx: RX fastpath information
* @tdls_chandef: a TDLS peer can have a wider chandef that is compatible to
* the BSS one.
* @tx_stats: TX statistics
* @rx_stats: RX statistics
+ * @pcpu_rx_stats: per-CPU RX statistics, assigned only if the driver needs
+ * this (by advertising the USES_RSS hw flag)
* @status_stats: TX status statistics
*/
struct sta_info {
@@ -409,6 +465,8 @@ struct sta_info {
spinlock_t lock;
struct ieee80211_fast_tx __rcu *fast_tx;
+ struct ieee80211_fast_rx __rcu *fast_rx;
+ struct ieee80211_sta_rx_stats __percpu *pcpu_rx_stats;
#ifdef CONFIG_MAC80211_MESH
struct mesh_sta *mesh;
@@ -438,24 +496,11 @@ struct sta_info {
long last_connected;
/* Updated from RX path only, no locking requirements */
+ struct ieee80211_sta_rx_stats rx_stats;
struct {
- unsigned long packets;
- u64 bytes;
- unsigned long last_rx;
- unsigned long num_duplicates;
- unsigned long fragments;
- unsigned long dropped;
- int last_signal;
- struct ewma_signal avg_signal;
- u8 chains;
- s8 chain_signal_last[IEEE80211_MAX_CHAINS];
- struct ewma_signal chain_signal_avg[IEEE80211_MAX_CHAINS];
- int last_rate_idx;
- u32 last_rate_flag;
- u32 last_rate_vht_flag;
- u8 last_rate_vht_nss;
- u64 msdu[IEEE80211_NUM_TIDS + 1];
- } rx_stats;
+ struct ewma_signal signal;
+ struct ewma_signal chain_signal[IEEE80211_MAX_CHAINS];
+ } rx_stats_avg;
/* Plus 1 for non-QoS frames */
__le16 last_seq_ctrl[IEEE80211_NUM_TIDS + 1];
@@ -468,6 +513,7 @@ struct sta_info {
unsigned long last_tdls_pkt_time;
u64 msdu_retries[IEEE80211_NUM_TIDS + 1];
u64 msdu_failed[IEEE80211_NUM_TIDS + 1];
+ unsigned long last_ack;
} status_stats;
/* Updated from TX path only, no locking requirements */
@@ -486,10 +532,7 @@ struct sta_info {
u8 timer_to_tid[IEEE80211_NUM_TIDS];
#ifdef CONFIG_MAC80211_DEBUGFS
- struct sta_info_debugfsdentries {
- struct dentry *dir;
- bool add_has_run;
- } debugfs;
+ struct dentry *debugfs_dir;
#endif
enum ieee80211_sta_rx_bandwidth cur_max_bandwidth;
@@ -677,4 +720,44 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta);
void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta);
void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta);
+unsigned long ieee80211_sta_last_active(struct sta_info *sta);
+
+#define STA_STATS_RATE_INVALID 0
+#define STA_STATS_RATE_VHT 0x8000
+#define STA_STATS_RATE_HT 0x4000
+#define STA_STATS_RATE_LEGACY 0x2000
+#define STA_STATS_RATE_SGI 0x1000
+#define STA_STATS_RATE_BW_SHIFT 9
+#define STA_STATS_RATE_BW_MASK (0x7 << STA_STATS_RATE_BW_SHIFT)
+
+static inline u16 sta_stats_encode_rate(struct ieee80211_rx_status *s)
+{
+ u16 r = s->rate_idx;
+
+ if (s->vht_flag & RX_VHT_FLAG_80MHZ)
+ r |= RATE_INFO_BW_80 << STA_STATS_RATE_BW_SHIFT;
+ else if (s->vht_flag & RX_VHT_FLAG_160MHZ)
+ r |= RATE_INFO_BW_160 << STA_STATS_RATE_BW_SHIFT;
+ else if (s->flag & RX_FLAG_40MHZ)
+ r |= RATE_INFO_BW_40 << STA_STATS_RATE_BW_SHIFT;
+ else if (s->flag & RX_FLAG_10MHZ)
+ r |= RATE_INFO_BW_10 << STA_STATS_RATE_BW_SHIFT;
+ else if (s->flag & RX_FLAG_5MHZ)
+ r |= RATE_INFO_BW_5 << STA_STATS_RATE_BW_SHIFT;
+ else
+ r |= RATE_INFO_BW_20 << STA_STATS_RATE_BW_SHIFT;
+
+ if (s->flag & RX_FLAG_SHORT_GI)
+ r |= STA_STATS_RATE_SGI;
+
+ if (s->flag & RX_FLAG_VHT)
+ r |= STA_STATS_RATE_VHT | (s->vht_nss << 4);
+ else if (s->flag & RX_FLAG_HT)
+ r |= STA_STATS_RATE_HT;
+ else
+ r |= STA_STATS_RATE_LEGACY | (s->band << 4);
+
+ return r;
+}
+
#endif /* STA_INFO_H */
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 8b1b2ea03eb5..c6d5c724e032 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -188,7 +188,7 @@ static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
struct ieee80211_sub_if_data *sdata = sta->sdata;
if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
- sta->rx_stats.last_rx = jiffies;
+ sta->status_stats.last_ack = jiffies;
if (ieee80211_is_data_qos(mgmt->frame_control)) {
struct ieee80211_hdr *hdr = (void *) skb->data;
@@ -647,7 +647,7 @@ void ieee80211_tx_status_noskb(struct ieee80211_hw *hw,
sta->status_stats.retry_count += retry_count;
if (acked) {
- sta->rx_stats.last_rx = jiffies;
+ sta->status_stats.last_ack = jiffies;
if (sta->status_stats.lost_packets)
sta->status_stats.lost_packets = 0;
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index c9eeb3f12808..a29ea813b7d5 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -4,7 +4,7 @@
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2014, Intel Corporation
* Copyright 2014 Intel Mobile Communications GmbH
- * Copyright 2015 Intel Deutschland GmbH
+ * Copyright 2015 - 2016 Intel Deutschland GmbH
*
* This file is GPLv2 as found in COPYING.
*/
@@ -15,6 +15,7 @@
#include <linux/rtnetlink.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
+#include "rate.h"
/* give usermode some time for retries in setting up the TDLS session */
#define TDLS_PEER_SETUP_TIMEOUT (15 * HZ)
@@ -302,7 +303,7 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
/* IEEE802.11ac-2013 Table E-4 */
u16 centers_80mhz[] = { 5210, 5290, 5530, 5610, 5690, 5775 };
struct cfg80211_chan_def uc = sta->tdls_chandef;
- enum nl80211_chan_width max_width = ieee80211_get_sta_bw(&sta->sta);
+ enum nl80211_chan_width max_width = ieee80211_sta_cap_chan_bw(sta);
int i;
/* only support upgrading non-narrow channels up to 80Mhz */
@@ -313,7 +314,7 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
if (max_width > NL80211_CHAN_WIDTH_80)
max_width = NL80211_CHAN_WIDTH_80;
- if (uc.width == max_width)
+ if (uc.width >= max_width)
return;
/*
* Channel usage constrains in the IEEE802.11ac-2013 specification only
@@ -324,6 +325,7 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
for (i = 0; i < ARRAY_SIZE(centers_80mhz); i++)
if (abs(uc.chan->center_freq - centers_80mhz[i]) <= 30) {
uc.center_freq1 = centers_80mhz[i];
+ uc.center_freq2 = 0;
uc.width = NL80211_CHAN_WIDTH_80;
break;
}
@@ -332,7 +334,7 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
return;
/* proceed to downgrade the chandef until usable or the same */
- while (uc.width > max_width &&
+ while (uc.width > max_width ||
!cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &uc,
sdata->wdev.iftype))
ieee80211_chandef_downgrade(&uc);
@@ -1242,18 +1244,44 @@ int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
return ret;
}
-static void iee80211_tdls_recalc_chanctx(struct ieee80211_sub_if_data *sdata)
+static void iee80211_tdls_recalc_chanctx(struct ieee80211_sub_if_data *sdata,
+ struct sta_info *sta)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_chanctx_conf *conf;
struct ieee80211_chanctx *ctx;
+ enum nl80211_chan_width width;
+ struct ieee80211_supported_band *sband;
mutex_lock(&local->chanctx_mtx);
conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
lockdep_is_held(&local->chanctx_mtx));
if (conf) {
+ width = conf->def.width;
+ sband = local->hw.wiphy->bands[conf->def.chan->band];
ctx = container_of(conf, struct ieee80211_chanctx, conf);
ieee80211_recalc_chanctx_chantype(local, ctx);
+
+ /* if width changed and a peer is given, update its BW */
+ if (width != conf->def.width && sta &&
+ test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW)) {
+ enum ieee80211_sta_rx_bandwidth bw;
+
+ bw = ieee80211_chan_width_to_rx_bw(conf->def.width);
+ bw = min(bw, ieee80211_sta_cap_rx_bw(sta));
+ if (bw != sta->sta.bandwidth) {
+ sta->sta.bandwidth = bw;
+ rate_control_rate_update(local, sband, sta,
+ IEEE80211_RC_BW_CHANGED);
+ /*
+ * if a TDLS peer BW was updated, we need to
+ * recalc the chandef width again, to get the
+ * correct chanctx min_def
+ */
+ ieee80211_recalc_chanctx_chantype(local, ctx);
+ }
+ }
+
}
mutex_unlock(&local->chanctx_mtx);
}
@@ -1350,8 +1378,6 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
break;
}
- iee80211_tdls_recalc_chanctx(sdata);
-
mutex_lock(&local->sta_mtx);
sta = sta_info_get(sdata, peer);
if (!sta) {
@@ -1360,6 +1386,7 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
break;
}
+ iee80211_tdls_recalc_chanctx(sdata, sta);
iee80211_tdls_recalc_ht_protection(sdata, sta);
set_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH);
@@ -1390,7 +1417,7 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
iee80211_tdls_recalc_ht_protection(sdata, NULL);
mutex_unlock(&local->sta_mtx);
- iee80211_tdls_recalc_chanctx(sdata);
+ iee80211_tdls_recalc_chanctx(sdata, NULL);
break;
default:
ret = -ENOTSUPP;
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 2b0a17ee907a..8c3b7ae103bc 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -1,3 +1,8 @@
+/*
+* Portions of this file
+* Copyright(c) 2016 Intel Deutschland GmbH
+*/
+
#if !defined(__MAC80211_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ)
#define __MAC80211_DRIVER_TRACE
@@ -899,6 +904,13 @@ DEFINE_EVENT(sta_event, drv_sta_pre_rcu_remove,
TP_ARGS(local, sdata, sta)
);
+DEFINE_EVENT(sta_event, drv_sync_rx_queues,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta *sta),
+ TP_ARGS(local, sdata, sta)
+);
+
DEFINE_EVENT(sta_event, drv_sta_rate_tbl_update,
TP_PROTO(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 62ad5321257d..e04d850726c5 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1116,11 +1116,15 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
reset_agg_timer = true;
} else {
queued = true;
+ if (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER) {
+ clear_sta_flag(tx->sta, WLAN_STA_SP);
+ ps_dbg(tx->sta->sdata,
+ "STA %pM aid %d: SP frame queued, close the SP w/o telling the peer\n",
+ tx->sta->sta.addr, tx->sta->sta.aid);
+ }
info->control.vif = &tx->sdata->vif;
info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
- info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS |
- IEEE80211_TX_CTL_NO_PS_BUFFER |
- IEEE80211_TX_STATUS_EOSP;
+ info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
__skb_queue_tail(&tid_tx->pending, skb);
if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER)
purge_skb = __skb_dequeue(&tid_tx->pending);
@@ -1247,7 +1251,8 @@ static void ieee80211_drv_tx(struct ieee80211_local *local,
struct txq_info *txqi;
u8 ac;
- if (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE)
+ if ((info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) ||
+ (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
goto tx_normal;
if (!ieee80211_is_data(hdr->frame_control))
@@ -1324,6 +1329,10 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
out:
spin_unlock_bh(&txqi->queue.lock);
+ if (skb && skb_has_frag_list(skb) &&
+ !ieee80211_hw_check(&local->hw, TX_FRAG_LIST))
+ skb_linearize(skb);
+
return skb;
}
EXPORT_SYMBOL(ieee80211_tx_dequeue);
@@ -1691,7 +1700,9 @@ static bool ieee80211_parse_tx_radiotap(struct ieee80211_local *local,
bool rate_found = false;
u8 rate_retries = 0;
u16 rate_flags = 0;
- u8 mcs_known, mcs_flags;
+ u8 mcs_known, mcs_flags, mcs_bw;
+ u16 vht_known;
+ u8 vht_mcs = 0, vht_nss = 0;
int i;
info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
@@ -1767,11 +1778,38 @@ static bool ieee80211_parse_tx_radiotap(struct ieee80211_local *local,
mcs_flags & IEEE80211_RADIOTAP_MCS_SGI)
rate_flags |= IEEE80211_TX_RC_SHORT_GI;
+ mcs_bw = mcs_flags & IEEE80211_RADIOTAP_MCS_BW_MASK;
if (mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_BW &&
- mcs_flags & IEEE80211_RADIOTAP_MCS_BW_40)
+ mcs_bw == IEEE80211_RADIOTAP_MCS_BW_40)
rate_flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
break;
+ case IEEE80211_RADIOTAP_VHT:
+ vht_known = get_unaligned_le16(iterator.this_arg);
+ rate_found = true;
+
+ rate_flags = IEEE80211_TX_RC_VHT_MCS;
+ if ((vht_known & IEEE80211_RADIOTAP_VHT_KNOWN_GI) &&
+ (iterator.this_arg[2] &
+ IEEE80211_RADIOTAP_VHT_FLAG_SGI))
+ rate_flags |= IEEE80211_TX_RC_SHORT_GI;
+ if (vht_known &
+ IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH) {
+ if (iterator.this_arg[3] == 1)
+ rate_flags |=
+ IEEE80211_TX_RC_40_MHZ_WIDTH;
+ else if (iterator.this_arg[3] == 4)
+ rate_flags |=
+ IEEE80211_TX_RC_80_MHZ_WIDTH;
+ else if (iterator.this_arg[3] == 11)
+ rate_flags |=
+ IEEE80211_TX_RC_160_MHZ_WIDTH;
+ }
+
+ vht_mcs = iterator.this_arg[4] >> 4;
+ vht_nss = iterator.this_arg[4] & 0xF;
+ break;
+
/*
* Please update the file
* Documentation/networking/mac80211-injection.txt
@@ -1797,6 +1835,9 @@ static bool ieee80211_parse_tx_radiotap(struct ieee80211_local *local,
if (rate_flags & IEEE80211_TX_RC_MCS) {
info->control.rates[0].idx = rate;
+ } else if (rate_flags & IEEE80211_TX_RC_VHT_MCS) {
+ ieee80211_rate_set_vht(info->control.rates, vht_mcs,
+ vht_nss);
} else {
for (i = 0; i < sband->n_bitrates; i++) {
if (rate * 5 != sband->bitrates[i].bitrate)
@@ -1807,6 +1848,9 @@ static bool ieee80211_parse_tx_radiotap(struct ieee80211_local *local,
}
}
+ if (info->control.rates[0].idx < 0)
+ info->control.flags &= ~IEEE80211_TX_CTRL_RATE_INJECT;
+
info->control.rates[0].flags = rate_flags;
info->control.rates[0].count = min_t(u8, rate_retries + 1,
local->hw.max_rate_tries);
@@ -2181,7 +2225,7 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
}
if (mppath && mpath)
- mesh_path_del(mpath->sdata, mpath->dst);
+ mesh_path_del(sdata, mpath->dst);
}
/*
@@ -2767,6 +2811,154 @@ void ieee80211_clear_fast_xmit(struct sta_info *sta)
kfree_rcu(fast_tx, rcu_head);
}
+static bool ieee80211_amsdu_realloc_pad(struct ieee80211_local *local,
+ struct sk_buff *skb, int headroom,
+ int *subframe_len)
+{
+ int amsdu_len = *subframe_len + sizeof(struct ethhdr);
+ int padding = (4 - amsdu_len) & 3;
+
+ if (skb_headroom(skb) < headroom || skb_tailroom(skb) < padding) {
+ I802_DEBUG_INC(local->tx_expand_skb_head);
+
+ if (pskb_expand_head(skb, headroom, padding, GFP_ATOMIC)) {
+ wiphy_debug(local->hw.wiphy,
+ "failed to reallocate TX buffer\n");
+ return false;
+ }
+ }
+
+ if (padding) {
+ *subframe_len += padding;
+ memset(skb_put(skb, padding), 0, padding);
+ }
+
+ return true;
+}
+
+static bool ieee80211_amsdu_prepare_head(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_fast_tx *fast_tx,
+ struct sk_buff *skb)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr;
+ struct ethhdr amsdu_hdr;
+ int hdr_len = fast_tx->hdr_len - sizeof(rfc1042_header);
+ int subframe_len = skb->len - hdr_len;
+ void *data;
+ u8 *qc;
+
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+ return false;
+
+ if (info->control.flags & IEEE80211_TX_CTRL_AMSDU)
+ return true;
+
+ if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(amsdu_hdr),
+ &subframe_len))
+ return false;
+
+ amsdu_hdr.h_proto = cpu_to_be16(subframe_len);
+ memcpy(amsdu_hdr.h_source, skb->data + fast_tx->sa_offs, ETH_ALEN);
+ memcpy(amsdu_hdr.h_dest, skb->data + fast_tx->da_offs, ETH_ALEN);
+
+ data = skb_push(skb, sizeof(amsdu_hdr));
+ memmove(data, data + sizeof(amsdu_hdr), hdr_len);
+ memcpy(data + hdr_len, &amsdu_hdr, sizeof(amsdu_hdr));
+
+ hdr = data;
+ qc = ieee80211_get_qos_ctl(hdr);
+ *qc |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+
+ info->control.flags |= IEEE80211_TX_CTRL_AMSDU;
+
+ return true;
+}
+
+static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
+ struct sta_info *sta,
+ struct ieee80211_fast_tx *fast_tx,
+ struct sk_buff *skb)
+{
+ struct ieee80211_local *local = sdata->local;
+ u8 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
+ struct ieee80211_txq *txq = sta->sta.txq[tid];
+ struct txq_info *txqi;
+ struct sk_buff **frag_tail, *head;
+ int subframe_len = skb->len - ETH_ALEN;
+ u8 max_subframes = sta->sta.max_amsdu_subframes;
+ int max_frags = local->hw.max_tx_fragments;
+ int max_amsdu_len = sta->sta.max_amsdu_len;
+ __be16 len;
+ void *data;
+ bool ret = false;
+ int n = 1, nfrags;
+
+ if (!ieee80211_hw_check(&local->hw, TX_AMSDU))
+ return false;
+
+ if (!txq)
+ return false;
+
+ txqi = to_txq_info(txq);
+ if (test_bit(IEEE80211_TXQ_NO_AMSDU, &txqi->flags))
+ return false;
+
+ if (sta->sta.max_rc_amsdu_len)
+ max_amsdu_len = min_t(int, max_amsdu_len,
+ sta->sta.max_rc_amsdu_len);
+
+ spin_lock_bh(&txqi->queue.lock);
+
+ head = skb_peek_tail(&txqi->queue);
+ if (!head)
+ goto out;
+
+ if (skb->len + head->len > max_amsdu_len)
+ goto out;
+
+ if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head))
+ goto out;
+
+ nfrags = 1 + skb_shinfo(skb)->nr_frags;
+ nfrags += 1 + skb_shinfo(head)->nr_frags;
+ frag_tail = &skb_shinfo(head)->frag_list;
+ while (*frag_tail) {
+ nfrags += 1 + skb_shinfo(*frag_tail)->nr_frags;
+ frag_tail = &(*frag_tail)->next;
+ n++;
+ }
+
+ if (max_subframes && n > max_subframes)
+ goto out;
+
+ if (max_frags && nfrags > max_frags)
+ goto out;
+
+ if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) + 2,
+ &subframe_len))
+ goto out;
+
+ ret = true;
+ data = skb_push(skb, ETH_ALEN + 2);
+ memmove(data, data + ETH_ALEN + 2, 2 * ETH_ALEN);
+
+ data += 2 * ETH_ALEN;
+ len = cpu_to_be16(subframe_len);
+ memcpy(data, &len, 2);
+ memcpy(data + 2, rfc1042_header, sizeof(rfc1042_header));
+
+ head->len += skb->len;
+ head->data_len += skb->len;
+ *frag_tail = skb;
+
+out:
+ spin_unlock_bh(&txqi->queue.lock);
+
+ return ret;
+}
+
static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
struct net_device *dev, struct sta_info *sta,
struct ieee80211_fast_tx *fast_tx,
@@ -2821,6 +3013,10 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
ieee80211_tx_stats(dev, skb->len + extra_head);
+ if ((hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) &&
+ ieee80211_amsdu_aggregate(sdata, sta, fast_tx, skb))
+ return true;
+
/* will not be crypto-handled beyond what we do here, so use false
* as the may-encrypt argument for the resize to not account for
* more room than we already have in 'extra_head'
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 7390de4946a9..0319d6d4f863 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -2724,8 +2724,9 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
rate = cfg80211_calculate_bitrate(&ri);
if (WARN_ONCE(!rate,
- "Invalid bitrate: flags=0x%x, idx=%d, vht_nss=%d\n",
- status->flag, status->rate_idx, status->vht_nss))
+ "Invalid bitrate: flags=0x%llx, idx=%d, vht_nss=%d\n",
+ (unsigned long long)status->flag, status->rate_idx,
+ status->vht_nss))
return 0;
/* rewind from end of MPDU */
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index 89e04d55aa18..e590e2ef9eaf 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -319,7 +319,30 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta)
return IEEE80211_STA_RX_BW_80;
}
-static enum ieee80211_sta_rx_bandwidth
+enum nl80211_chan_width ieee80211_sta_cap_chan_bw(struct sta_info *sta)
+{
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.vht_cap;
+ u32 cap_width;
+
+ if (!vht_cap->vht_supported) {
+ if (!sta->sta.ht_cap.ht_supported)
+ return NL80211_CHAN_WIDTH_20_NOHT;
+
+ return sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
+ NL80211_CHAN_WIDTH_40 : NL80211_CHAN_WIDTH_20;
+ }
+
+ cap_width = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
+
+ if (cap_width == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ)
+ return NL80211_CHAN_WIDTH_160;
+ else if (cap_width == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
+ return NL80211_CHAN_WIDTH_80P80;
+
+ return NL80211_CHAN_WIDTH_80;
+}
+
+enum ieee80211_sta_rx_bandwidth
ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width)
{
switch (width) {
@@ -347,10 +370,7 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta)
bw = ieee80211_sta_cap_rx_bw(sta);
bw = min(bw, sta->cur_max_bandwidth);
-
- /* do not cap the BW of TDLS WIDER_BW peers by the bss */
- if (!test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW))
- bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width));
+ bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width));
return bw;
}
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 18848258adde..7e4f2652bca7 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -504,18 +504,20 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx,
!ieee80211_is_robust_mgmt_frame(skb))
return RX_CONTINUE;
- data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN - mic_len;
- if (!rx->sta || data_len < 0)
- return RX_DROP_UNUSABLE;
-
if (status->flag & RX_FLAG_DECRYPTED) {
if (!pskb_may_pull(rx->skb, hdrlen + IEEE80211_CCMP_HDR_LEN))
return RX_DROP_UNUSABLE;
+ if (status->flag & RX_FLAG_MIC_STRIPPED)
+ mic_len = 0;
} else {
if (skb_linearize(rx->skb))
return RX_DROP_UNUSABLE;
}
+ data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN - mic_len;
+ if (!rx->sta || data_len < 0)
+ return RX_DROP_UNUSABLE;
+
if (!(status->flag & RX_FLAG_PN_VALIDATED)) {
ccmp_hdr2pn(pn, skb->data + hdrlen);
@@ -720,8 +722,7 @@ ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx)
struct sk_buff *skb = rx->skb;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
u8 pn[IEEE80211_GCMP_PN_LEN];
- int data_len;
- int queue;
+ int data_len, queue, mic_len = IEEE80211_GCMP_MIC_LEN;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
@@ -729,19 +730,20 @@ ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx)
!ieee80211_is_robust_mgmt_frame(skb))
return RX_CONTINUE;
- data_len = skb->len - hdrlen - IEEE80211_GCMP_HDR_LEN -
- IEEE80211_GCMP_MIC_LEN;
- if (!rx->sta || data_len < 0)
- return RX_DROP_UNUSABLE;
-
if (status->flag & RX_FLAG_DECRYPTED) {
if (!pskb_may_pull(rx->skb, hdrlen + IEEE80211_GCMP_HDR_LEN))
return RX_DROP_UNUSABLE;
+ if (status->flag & RX_FLAG_MIC_STRIPPED)
+ mic_len = 0;
} else {
if (skb_linearize(rx->skb))
return RX_DROP_UNUSABLE;
}
+ data_len = skb->len - hdrlen - IEEE80211_GCMP_HDR_LEN - mic_len;
+ if (!rx->sta || data_len < 0)
+ return RX_DROP_UNUSABLE;
+
if (!(status->flag & RX_FLAG_PN_VALIDATED)) {
gcmp_hdr2pn(pn, skb->data + hdrlen);
@@ -772,7 +774,7 @@ ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx)
}
/* Remove GCMP header and MIC */
- if (pskb_trim(skb, skb->len - IEEE80211_GCMP_MIC_LEN))
+ if (pskb_trim(skb, skb->len - mic_len))
return RX_DROP_UNUSABLE;
memmove(skb->data + IEEE80211_GCMP_HDR_LEN, skb->data, hdrlen);
skb_pull(skb, IEEE80211_GCMP_HDR_LEN);
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index b18c5ed42d95..0b80a7140cc4 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -543,6 +543,9 @@ static struct net_device *find_outdev(struct net *net,
if (!dev)
return ERR_PTR(-ENODEV);
+ if (IS_ERR(dev))
+ return dev;
+
/* The caller is holding rtnl anyways, so release the dev reference */
dev_put(dev);
diff --git a/net/netfilter/ipset/ip_set_bitmap_gen.h b/net/netfilter/ipset/ip_set_bitmap_gen.h
index b0bc475f641e..2e8e7e5fb4a6 100644
--- a/net/netfilter/ipset/ip_set_bitmap_gen.h
+++ b/net/netfilter/ipset/ip_set_bitmap_gen.h
@@ -95,7 +95,7 @@ mtype_head(struct ip_set *set, struct sk_buff *skb)
if (!nested)
goto nla_put_failure;
if (mtype_do_head(skb, map) ||
- nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
+ nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) ||
nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)))
goto nla_put_failure;
if (unlikely(ip_set_put_flags(skb, set)))
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 7e6568cad494..a748b0c2c981 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -497,6 +497,26 @@ __ip_set_put(struct ip_set *set)
write_unlock_bh(&ip_set_ref_lock);
}
+/* set->ref can be swapped out by ip_set_swap, netlink events (like dump) need
+ * a separate reference counter
+ */
+static inline void
+__ip_set_get_netlink(struct ip_set *set)
+{
+ write_lock_bh(&ip_set_ref_lock);
+ set->ref_netlink++;
+ write_unlock_bh(&ip_set_ref_lock);
+}
+
+static inline void
+__ip_set_put_netlink(struct ip_set *set)
+{
+ write_lock_bh(&ip_set_ref_lock);
+ BUG_ON(set->ref_netlink == 0);
+ set->ref_netlink--;
+ write_unlock_bh(&ip_set_ref_lock);
+}
+
/* Add, del and test set entries from kernel.
*
* The set behind the index must exist and must be referenced
@@ -1002,7 +1022,7 @@ static int ip_set_destroy(struct net *net, struct sock *ctnl,
if (!attr[IPSET_ATTR_SETNAME]) {
for (i = 0; i < inst->ip_set_max; i++) {
s = ip_set(inst, i);
- if (s && s->ref) {
+ if (s && (s->ref || s->ref_netlink)) {
ret = -IPSET_ERR_BUSY;
goto out;
}
@@ -1024,7 +1044,7 @@ static int ip_set_destroy(struct net *net, struct sock *ctnl,
if (!s) {
ret = -ENOENT;
goto out;
- } else if (s->ref) {
+ } else if (s->ref || s->ref_netlink) {
ret = -IPSET_ERR_BUSY;
goto out;
}
@@ -1171,6 +1191,9 @@ static int ip_set_swap(struct net *net, struct sock *ctnl, struct sk_buff *skb,
from->family == to->family))
return -IPSET_ERR_TYPE_MISMATCH;
+ if (from->ref_netlink || to->ref_netlink)
+ return -EBUSY;
+
strncpy(from_name, from->name, IPSET_MAXNAMELEN);
strncpy(from->name, to->name, IPSET_MAXNAMELEN);
strncpy(to->name, from_name, IPSET_MAXNAMELEN);
@@ -1206,7 +1229,7 @@ ip_set_dump_done(struct netlink_callback *cb)
if (set->variant->uref)
set->variant->uref(set, cb, false);
pr_debug("release set %s\n", set->name);
- __ip_set_put_byindex(inst, index);
+ __ip_set_put_netlink(set);
}
return 0;
}
@@ -1328,7 +1351,7 @@ dump_last:
if (!cb->args[IPSET_CB_ARG0]) {
/* Start listing: make sure set won't be destroyed */
pr_debug("reference set\n");
- set->ref++;
+ set->ref_netlink++;
}
write_unlock_bh(&ip_set_ref_lock);
nlh = start_msg(skb, NETLINK_CB(cb->skb).portid,
@@ -1396,7 +1419,7 @@ release_refcount:
if (set->variant->uref)
set->variant->uref(set, cb, false);
pr_debug("release set %s\n", set->name);
- __ip_set_put_byindex(inst, index);
+ __ip_set_put_netlink(set);
cb->args[IPSET_CB_ARG0] = 0;
}
out:
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index e5336ab36d67..d32fd6b036bf 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -1082,7 +1082,7 @@ mtype_head(struct ip_set *set, struct sk_buff *skb)
if (nla_put_u32(skb, IPSET_ATTR_MARKMASK, h->markmask))
goto nla_put_failure;
#endif
- if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
+ if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) ||
nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)))
goto nla_put_failure;
if (unlikely(ip_set_put_flags(skb, set)))
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index 24c6c1962aea..a2a89e4e0a14 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -458,7 +458,7 @@ list_set_head(struct ip_set *set, struct sk_buff *skb)
if (!nested)
goto nla_put_failure;
if (nla_put_net32(skb, IPSET_ATTR_SIZE, htonl(map->size)) ||
- nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
+ nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) ||
nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
htonl(sizeof(*map) + n * set->dsize)))
goto nla_put_failure;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 5bebe78b9bbd..aa93877ab6e2 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -640,7 +640,12 @@ __nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue,
/* nfnetlink_unicast will either free the nskb or add it to a socket */
err = nfnetlink_unicast(nskb, net, queue->peer_portid, MSG_DONTWAIT);
if (err < 0) {
- queue->queue_user_dropped++;
+ if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
+ failopen = 1;
+ err = 0;
+ } else {
+ queue->queue_user_dropped++;
+ }
goto err_out_unlock;
}
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index 3f9d45d3d9b7..6fa016564f90 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -192,7 +192,7 @@ static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
u8 genmask = nft_genmask_cur(read_pnet(&set->pnet));
int err;
- err = rhashtable_walk_init(&priv->ht, &hti);
+ err = rhashtable_walk_init(&priv->ht, &hti, GFP_KERNEL);
iter->err = err;
if (err)
return;
@@ -248,7 +248,7 @@ static void nft_hash_gc(struct work_struct *work)
priv = container_of(work, struct nft_hash, gc_work.work);
set = nft_set_container_of(priv);
- err = rhashtable_walk_init(&priv->ht, &hti);
+ err = rhashtable_walk_init(&priv->ht, &hti, GFP_KERNEL);
if (err)
goto schedule;
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 49d14ecad444..b10ade272b50 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -120,9 +120,9 @@ xt_socket_get_sock_v4(struct net *net, struct sk_buff *skb, const int doff,
{
switch (protocol) {
case IPPROTO_TCP:
- return __inet_lookup(net, &tcp_hashinfo, skb, doff,
- saddr, sport, daddr, dport,
- in->ifindex);
+ return inet_lookup(net, &tcp_hashinfo, skb, doff,
+ saddr, sport, daddr, dport,
+ in->ifindex);
case IPPROTO_UDP:
return udp4_lib_lookup(net, saddr, sport, daddr, dport,
in->ifindex);
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 215fc08c02ab..0f16bf635480 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -2343,7 +2343,8 @@ static int netlink_walk_start(struct nl_seq_iter *iter)
{
int err;
- err = rhashtable_walk_init(&nl_table[iter->link].hash, &iter->hti);
+ err = rhashtable_walk_init(&nl_table[iter->link].hash, &iter->hti,
+ GFP_KERNEL);
if (err) {
iter->link = MAX_LINKS;
return err;
diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig
index 234a73344c6e..ce947292ae77 100644
--- a/net/openvswitch/Kconfig
+++ b/net/openvswitch/Kconfig
@@ -7,7 +7,9 @@ config OPENVSWITCH
depends on INET
depends on !NF_CONNTRACK || \
(NF_CONNTRACK && ((!NF_DEFRAG_IPV6 || NF_DEFRAG_IPV6) && \
- (!NF_NAT || NF_NAT)))
+ (!NF_NAT || NF_NAT) && \
+ (!NF_NAT_IPV4 || NF_NAT_IPV4) && \
+ (!NF_NAT_IPV6 || NF_NAT_IPV6)))
select LIBCRC32C
select MPLS
select NET_MPLS_GSO
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index dc5eb29fe7d6..1b9d286756be 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -535,14 +535,15 @@ static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
switch (ctinfo) {
case IP_CT_RELATED:
case IP_CT_RELATED_REPLY:
- if (skb->protocol == htons(ETH_P_IP) &&
+ if (IS_ENABLED(CONFIG_NF_NAT_IPV4) &&
+ skb->protocol == htons(ETH_P_IP) &&
ip_hdr(skb)->protocol == IPPROTO_ICMP) {
if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
hooknum))
err = NF_DROP;
goto push;
-#if IS_ENABLED(CONFIG_NF_NAT_IPV6)
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ } else if (IS_ENABLED(CONFIG_NF_NAT_IPV6) &&
+ skb->protocol == htons(ETH_P_IPV6)) {
__be16 frag_off;
u8 nexthdr = ipv6_hdr(skb)->nexthdr;
int hdrlen = ipv6_skip_exthdr(skb,
@@ -557,7 +558,6 @@ static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
err = NF_DROP;
goto push;
}
-#endif
}
/* Non-ICMP, fall thru to initialize if needed. */
case IP_CT_NEW:
@@ -664,11 +664,12 @@ static int ovs_ct_nat(struct net *net, struct sw_flow_key *key,
/* Determine NAT type.
* Check if the NAT type can be deduced from the tracked connection.
- * Make sure expected traffic is NATted only when committing.
+ * Make sure new expected connections (IP_CT_RELATED) are NATted only
+ * when committing.
*/
if (info->nat & OVS_CT_NAT && ctinfo != IP_CT_NEW &&
ct->status & IPS_NAT_MASK &&
- (!(ct->status & IPS_EXPECTED_BIT) || info->commit)) {
+ (ctinfo != IP_CT_RELATED || info->commit)) {
/* NAT an established or related connection like before. */
if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY)
/* This is the REPLY direction for a connection
@@ -968,7 +969,8 @@ static int parse_nat(const struct nlattr *attr,
break;
case OVS_NAT_ATTR_IP_MIN:
- nla_memcpy(&info->range.min_addr, a, nla_len(a));
+ nla_memcpy(&info->range.min_addr, a,
+ sizeof(info->range.min_addr));
info->range.flags |= NF_NAT_RANGE_MAP_IPS;
break;
@@ -1238,7 +1240,8 @@ static bool ovs_ct_nat_to_attr(const struct ovs_conntrack_info *info,
}
if (info->range.flags & NF_NAT_RANGE_MAP_IPS) {
- if (info->family == NFPROTO_IPV4) {
+ if (IS_ENABLED(CONFIG_NF_NAT_IPV4) &&
+ info->family == NFPROTO_IPV4) {
if (nla_put_in_addr(skb, OVS_NAT_ATTR_IP_MIN,
info->range.min_addr.ip) ||
(info->range.max_addr.ip
@@ -1246,8 +1249,8 @@ static bool ovs_ct_nat_to_attr(const struct ovs_conntrack_info *info,
(nla_put_in_addr(skb, OVS_NAT_ATTR_IP_MAX,
info->range.max_addr.ip))))
return false;
-#if IS_ENABLED(CONFIG_NF_NAT_IPV6)
- } else if (info->family == NFPROTO_IPV6) {
+ } else if (IS_ENABLED(CONFIG_NF_NAT_IPV6) &&
+ info->family == NFPROTO_IPV6) {
if (nla_put_in6_addr(skb, OVS_NAT_ATTR_IP_MIN,
&info->range.min_addr.in6) ||
(memcmp(&info->range.max_addr.in6,
@@ -1256,7 +1259,6 @@ static bool ovs_ct_nat_to_attr(const struct ovs_conntrack_info *info,
(nla_put_in6_addr(skb, OVS_NAT_ATTR_IP_MAX,
&info->range.max_addr.in6))))
return false;
-#endif
} else {
return false;
}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 1ecfa710ca98..81a4c0574d73 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1837,6 +1837,7 @@ static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
struct sk_buff *skb = NULL;
struct net_device *dev;
+ struct sockcm_cookie sockc;
__be16 proto = 0;
int err;
int extra_len = 0;
@@ -1925,12 +1926,21 @@ retry:
goto out_unlock;
}
+ sockc.tsflags = 0;
+ if (msg->msg_controllen) {
+ err = sock_cmsg_send(sk, msg, &sockc);
+ if (unlikely(err)) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
+ }
+
skb->protocol = proto;
skb->dev = dev;
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
- sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
+ sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags);
if (unlikely(extra_len == 4))
skb->no_fcs = 1;
@@ -2486,7 +2496,8 @@ static int packet_snd_vnet_gso(struct sk_buff *skb,
static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
void *frame, struct net_device *dev, void *data, int tp_len,
- __be16 proto, unsigned char *addr, int hlen, int copylen)
+ __be16 proto, unsigned char *addr, int hlen, int copylen,
+ const struct sockcm_cookie *sockc)
{
union tpacket_uhdr ph;
int to_write, offset, len, nr_frags, len_max;
@@ -2500,7 +2511,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
skb->dev = dev;
skb->priority = po->sk.sk_priority;
skb->mark = po->sk.sk_mark;
- sock_tx_timestamp(&po->sk, &skb_shinfo(skb)->tx_flags);
+ sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags);
skb_shinfo(skb)->destructor_arg = ph.raw;
skb_reserve(skb, hlen);
@@ -2624,6 +2635,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
struct sk_buff *skb;
struct net_device *dev;
struct virtio_net_hdr *vnet_hdr = NULL;
+ struct sockcm_cookie sockc;
__be16 proto;
int err, reserve = 0;
void *ph;
@@ -2655,6 +2667,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
}
+ sockc.tsflags = 0;
+ if (msg->msg_controllen) {
+ err = sock_cmsg_send(&po->sk, msg, &sockc);
+ if (unlikely(err))
+ goto out;
+ }
+
err = -ENXIO;
if (unlikely(dev == NULL))
goto out;
@@ -2712,7 +2731,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
goto out_status;
}
tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
- addr, hlen, copylen);
+ addr, hlen, copylen, &sockc);
if (likely(tp_len >= 0) &&
tp_len > dev->mtu + reserve &&
!po->has_vnet_hdr &&
@@ -2851,6 +2870,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
if (unlikely(!(dev->flags & IFF_UP)))
goto out_unlock;
+ sockc.tsflags = 0;
sockc.mark = sk->sk_mark;
if (msg->msg_controllen) {
err = sock_cmsg_send(sk, msg, &sockc);
@@ -2908,7 +2928,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
goto out_free;
}
- sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
+ sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags);
if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
!packet_extra_vlan_len_allowed(dev, skb)) {
@@ -4151,7 +4171,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
/* Opening a Tx-ring is NOT supported in TPACKET_V3 */
if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
- WARN(1, "Tx-ring is not supported.\n");
+ net_warn_ratelimited("Tx-ring is not supported.\n");
goto out;
}
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index 977fb86065b7..abc8cc805e8d 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -796,7 +796,7 @@ static void rds_ib_cong_recv(struct rds_connection *conn,
addr = kmap_atomic(sg_page(&frag->f_sg));
- src = addr + frag_off;
+ src = addr + frag->f_sg.offset + frag_off;
dst = (void *)map->m_page_addrs[map_page] + map_off;
for (k = 0; k < to_copy; k += 8) {
/* Record ports that became uncongested, ie
diff --git a/net/rds/page.c b/net/rds/page.c
index 616f21f4e7d7..e2b5a5832d3d 100644
--- a/net/rds/page.c
+++ b/net/rds/page.c
@@ -135,8 +135,8 @@ int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
if (rem->r_offset != 0)
rds_stats_inc(s_page_remainder_hit);
- rem->r_offset += bytes;
- if (rem->r_offset == PAGE_SIZE) {
+ rem->r_offset += ALIGN(bytes, 8);
+ if (rem->r_offset >= PAGE_SIZE) {
__free_page(rem->r_page);
rem->r_page = NULL;
}
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 03f26e3a6f48..884027f62783 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -1141,6 +1141,7 @@ static ssize_t rfkill_fop_write(struct file *file, const char __user *buf,
{
struct rfkill *rfkill;
struct rfkill_event ev;
+ int ret;
/* we don't need the 'hard' variable but accept it */
if (count < RFKILL_EVENT_SIZE_V1 - 1)
@@ -1155,29 +1156,36 @@ static ssize_t rfkill_fop_write(struct file *file, const char __user *buf,
if (copy_from_user(&ev, buf, count))
return -EFAULT;
- if (ev.op != RFKILL_OP_CHANGE && ev.op != RFKILL_OP_CHANGE_ALL)
- return -EINVAL;
-
if (ev.type >= NUM_RFKILL_TYPES)
return -EINVAL;
mutex_lock(&rfkill_global_mutex);
- if (ev.op == RFKILL_OP_CHANGE_ALL)
+ switch (ev.op) {
+ case RFKILL_OP_CHANGE_ALL:
rfkill_update_global_state(ev.type, ev.soft);
-
- list_for_each_entry(rfkill, &rfkill_list, node) {
- if (rfkill->idx != ev.idx && ev.op != RFKILL_OP_CHANGE_ALL)
- continue;
-
- if (rfkill->type != ev.type && ev.type != RFKILL_TYPE_ALL)
- continue;
-
- rfkill_set_block(rfkill, ev.soft);
+ list_for_each_entry(rfkill, &rfkill_list, node)
+ if (rfkill->type == ev.type ||
+ ev.type == RFKILL_TYPE_ALL)
+ rfkill_set_block(rfkill, ev.soft);
+ ret = 0;
+ break;
+ case RFKILL_OP_CHANGE:
+ list_for_each_entry(rfkill, &rfkill_list, node)
+ if (rfkill->idx == ev.idx &&
+ (rfkill->type == ev.type ||
+ ev.type == RFKILL_TYPE_ALL))
+ rfkill_set_block(rfkill, ev.soft);
+ ret = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
}
+
mutex_unlock(&rfkill_global_mutex);
- return count;
+ return ret ?: count;
}
static int rfkill_fop_release(struct inode *inode, struct file *file)
diff --git a/net/rxrpc/Kconfig b/net/rxrpc/Kconfig
index 23dcef12b986..784c53163b7b 100644
--- a/net/rxrpc/Kconfig
+++ b/net/rxrpc/Kconfig
@@ -30,7 +30,7 @@ config AF_RXRPC_DEBUG
config RXKAD
- tristate "RxRPC Kerberos security"
+ bool "RxRPC Kerberos security"
depends on AF_RXRPC
select CRYPTO
select CRYPTO_MANAGER
diff --git a/net/rxrpc/Makefile b/net/rxrpc/Makefile
index ec126f91276b..e05a06ef2254 100644
--- a/net/rxrpc/Makefile
+++ b/net/rxrpc/Makefile
@@ -18,11 +18,12 @@ af-rxrpc-y := \
ar-recvmsg.o \
ar-security.o \
ar-skbuff.o \
- ar-transport.o
+ ar-transport.o \
+ insecure.o \
+ misc.o
af-rxrpc-$(CONFIG_PROC_FS) += ar-proc.o
+af-rxrpc-$(CONFIG_RXKAD) += rxkad.o
af-rxrpc-$(CONFIG_SYSCTL) += sysctl.o
obj-$(CONFIG_AF_RXRPC) += af-rxrpc.o
-
-obj-$(CONFIG_RXKAD) += rxkad.o
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 9d935fa5a2a9..e45e94ca030f 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -806,6 +806,12 @@ static int __init af_rxrpc_init(void)
goto error_work_queue;
}
+ ret = rxrpc_init_security();
+ if (ret < 0) {
+ printk(KERN_CRIT "RxRPC: Cannot initialise security\n");
+ goto error_security;
+ }
+
ret = proto_register(&rxrpc_proto, 1);
if (ret < 0) {
printk(KERN_CRIT "RxRPC: Cannot register protocol\n");
@@ -853,6 +859,8 @@ error_sock:
proto_unregister(&rxrpc_proto);
error_proto:
destroy_workqueue(rxrpc_workqueue);
+error_security:
+ rxrpc_exit_security();
error_work_queue:
kmem_cache_destroy(rxrpc_call_jar);
error_call_jar:
@@ -883,6 +891,7 @@ static void __exit af_rxrpc_exit(void)
remove_proc_entry("rxrpc_conns", init_net.proc_net);
remove_proc_entry("rxrpc_calls", init_net.proc_net);
destroy_workqueue(rxrpc_workqueue);
+ rxrpc_exit_security();
kmem_cache_destroy(rxrpc_call_jar);
_leave("");
}
diff --git a/net/rxrpc/ar-accept.c b/net/rxrpc/ar-accept.c
index 277731a5e67a..e7a7f05f13e2 100644
--- a/net/rxrpc/ar-accept.c
+++ b/net/rxrpc/ar-accept.c
@@ -108,7 +108,7 @@ static int rxrpc_accept_incoming_call(struct rxrpc_local *local,
goto error;
}
- conn = rxrpc_incoming_connection(trans, &sp->hdr, GFP_NOIO);
+ conn = rxrpc_incoming_connection(trans, &sp->hdr);
rxrpc_put_transport(trans);
if (IS_ERR(conn)) {
_debug("no conn");
@@ -116,7 +116,7 @@ static int rxrpc_accept_incoming_call(struct rxrpc_local *local,
goto error;
}
- call = rxrpc_incoming_call(rx, conn, &sp->hdr, GFP_NOIO);
+ call = rxrpc_incoming_call(rx, conn, &sp->hdr);
rxrpc_put_connection(conn);
if (IS_ERR(call)) {
_debug("no call");
diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
index 16d967075eaf..374478e006e7 100644
--- a/net/rxrpc/ar-ack.c
+++ b/net/rxrpc/ar-ack.c
@@ -20,74 +20,6 @@
#include "ar-internal.h"
/*
- * How long to wait before scheduling ACK generation after seeing a
- * packet with RXRPC_REQUEST_ACK set (in jiffies).
- */
-unsigned int rxrpc_requested_ack_delay = 1;
-
-/*
- * How long to wait before scheduling an ACK with subtype DELAY (in jiffies).
- *
- * We use this when we've received new data packets. If those packets aren't
- * all consumed within this time we will send a DELAY ACK if an ACK was not
- * requested to let the sender know it doesn't need to resend.
- */
-unsigned int rxrpc_soft_ack_delay = 1 * HZ;
-
-/*
- * How long to wait before scheduling an ACK with subtype IDLE (in jiffies).
- *
- * We use this when we've consumed some previously soft-ACK'd packets when
- * further packets aren't immediately received to decide when to send an IDLE
- * ACK let the other end know that it can free up its Tx buffer space.
- */
-unsigned int rxrpc_idle_ack_delay = 0.5 * HZ;
-
-/*
- * Receive window size in packets. This indicates the maximum number of
- * unconsumed received packets we're willing to retain in memory. Once this
- * limit is hit, we should generate an EXCEEDS_WINDOW ACK and discard further
- * packets.
- */
-unsigned int rxrpc_rx_window_size = 32;
-
-/*
- * Maximum Rx MTU size. This indicates to the sender the size of jumbo packet
- * made by gluing normal packets together that we're willing to handle.
- */
-unsigned int rxrpc_rx_mtu = 5692;
-
-/*
- * The maximum number of fragments in a received jumbo packet that we tell the
- * sender that we're willing to handle.
- */
-unsigned int rxrpc_rx_jumbo_max = 4;
-
-static const char *rxrpc_acks(u8 reason)
-{
- static const char *const str[] = {
- "---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY",
- "IDL", "-?-"
- };
-
- if (reason >= ARRAY_SIZE(str))
- reason = ARRAY_SIZE(str) - 1;
- return str[reason];
-}
-
-static const s8 rxrpc_ack_priority[] = {
- [0] = 0,
- [RXRPC_ACK_DELAY] = 1,
- [RXRPC_ACK_REQUESTED] = 2,
- [RXRPC_ACK_IDLE] = 3,
- [RXRPC_ACK_PING_RESPONSE] = 4,
- [RXRPC_ACK_DUPLICATE] = 5,
- [RXRPC_ACK_OUT_OF_SEQUENCE] = 6,
- [RXRPC_ACK_EXCEEDS_WINDOW] = 7,
- [RXRPC_ACK_NOSPACE] = 8,
-};
-
-/*
* propose an ACK be sent
*/
void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
@@ -426,7 +358,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
int tail = call->acks_tail, old_tail;
int win = CIRC_CNT(call->acks_head, tail, call->acks_winsz);
- kenter("{%u,%u},%u", call->acks_hard, win, hard);
+ _enter("{%u,%u},%u", call->acks_hard, win, hard);
ASSERTCMP(hard - call->acks_hard, <=, win);
@@ -656,7 +588,8 @@ process_further:
_proto("OOSQ DATA %%%u { #%u }", sp->hdr.serial, sp->hdr.seq);
/* secured packets must be verified and possibly decrypted */
- if (rxrpc_verify_packet(call, skb, _abort_code) < 0)
+ if (call->conn->security->verify_packet(call, skb,
+ _abort_code) < 0)
goto protocol_error;
rxrpc_insert_oos_packet(call, skb);
@@ -901,8 +834,8 @@ void rxrpc_process_call(struct work_struct *work)
/* there's a good chance we're going to have to send a message, so set
* one up in advance */
- msg.msg_name = &call->conn->trans->peer->srx.transport.sin;
- msg.msg_namelen = sizeof(call->conn->trans->peer->srx.transport.sin);
+ msg.msg_name = &call->conn->trans->peer->srx.transport;
+ msg.msg_namelen = call->conn->trans->peer->srx.transport_len;
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_flags = 0;
@@ -973,7 +906,7 @@ void rxrpc_process_call(struct work_struct *work)
ECONNABORTED, true) < 0)
goto no_mem;
whdr.type = RXRPC_PACKET_TYPE_ABORT;
- data = htonl(call->abort_code);
+ data = htonl(call->local_abort);
iov[1].iov_base = &data;
iov[1].iov_len = sizeof(data);
genbit = RXRPC_CALL_EV_ABORT;
@@ -1036,7 +969,7 @@ void rxrpc_process_call(struct work_struct *work)
write_lock_bh(&call->state_lock);
if (call->state <= RXRPC_CALL_COMPLETE) {
call->state = RXRPC_CALL_LOCALLY_ABORTED;
- call->abort_code = RX_CALL_TIMEOUT;
+ call->local_abort = RX_CALL_TIMEOUT;
set_bit(RXRPC_CALL_EV_ABORT, &call->events);
}
write_unlock_bh(&call->state_lock);
diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
index 7c8d300ade9b..571a41fd5a32 100644
--- a/net/rxrpc/ar-call.c
+++ b/net/rxrpc/ar-call.c
@@ -411,18 +411,17 @@ found_extant_second:
*/
struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
struct rxrpc_connection *conn,
- struct rxrpc_host_header *hdr,
- gfp_t gfp)
+ struct rxrpc_host_header *hdr)
{
struct rxrpc_call *call, *candidate;
struct rb_node **p, *parent;
u32 call_id;
- _enter(",%d,,%x", conn->debug_id, gfp);
+ _enter(",%d", conn->debug_id);
ASSERT(rx != NULL);
- candidate = rxrpc_alloc_call(gfp);
+ candidate = rxrpc_alloc_call(GFP_NOIO);
if (!candidate)
return ERR_PTR(-EBUSY);
@@ -682,7 +681,7 @@ void rxrpc_release_call(struct rxrpc_call *call)
call->state != RXRPC_CALL_CLIENT_FINAL_ACK) {
_debug("+++ ABORTING STATE %d +++\n", call->state);
call->state = RXRPC_CALL_LOCALLY_ABORTED;
- call->abort_code = RX_CALL_DEAD;
+ call->local_abort = RX_CALL_DEAD;
set_bit(RXRPC_CALL_EV_ABORT, &call->events);
rxrpc_queue_call(call);
}
@@ -758,7 +757,7 @@ static void rxrpc_mark_call_released(struct rxrpc_call *call)
if (call->state < RXRPC_CALL_COMPLETE) {
_debug("abort call %p", call);
call->state = RXRPC_CALL_LOCALLY_ABORTED;
- call->abort_code = RX_CALL_DEAD;
+ call->local_abort = RX_CALL_DEAD;
if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
sched = true;
}
diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
index 9942da1edbf6..97f4fae74bca 100644
--- a/net/rxrpc/ar-connection.c
+++ b/net/rxrpc/ar-connection.c
@@ -207,6 +207,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
INIT_LIST_HEAD(&conn->bundle_link);
conn->calls = RB_ROOT;
skb_queue_head_init(&conn->rx_queue);
+ conn->security = &rxrpc_no_security;
rwlock_init(&conn->lock);
spin_lock_init(&conn->state_lock);
atomic_set(&conn->usage, 1);
@@ -564,8 +565,7 @@ int rxrpc_connect_call(struct rxrpc_sock *rx,
candidate->debug_id, candidate->trans->debug_id);
rxrpc_assign_connection_id(candidate);
- if (candidate->security)
- candidate->security->prime_packet_security(candidate);
+ candidate->security->prime_packet_security(candidate);
/* leave the candidate lurking in zombie mode attached to the
* bundle until we're ready for it */
@@ -619,8 +619,7 @@ interrupted:
*/
struct rxrpc_connection *
rxrpc_incoming_connection(struct rxrpc_transport *trans,
- struct rxrpc_host_header *hdr,
- gfp_t gfp)
+ struct rxrpc_host_header *hdr)
{
struct rxrpc_connection *conn, *candidate = NULL;
struct rb_node *p, **pp;
@@ -659,7 +658,7 @@ rxrpc_incoming_connection(struct rxrpc_transport *trans,
/* not yet present - create a candidate for a new record and then
* redo the search */
- candidate = rxrpc_alloc_connection(gfp);
+ candidate = rxrpc_alloc_connection(GFP_NOIO);
if (!candidate) {
_leave(" = -ENOMEM");
return ERR_PTR(-ENOMEM);
@@ -831,7 +830,10 @@ static void rxrpc_destroy_connection(struct rxrpc_connection *conn)
ASSERT(RB_EMPTY_ROOT(&conn->calls));
rxrpc_purge_queue(&conn->rx_queue);
- rxrpc_clear_conn_security(conn);
+ conn->security->clear(conn);
+ key_put(conn->key);
+ key_put(conn->server_key);
+
rxrpc_put_transport(conn->trans);
kfree(conn);
_leave("");
diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
index 1bdaaed8cdc4..5f9563968a5b 100644
--- a/net/rxrpc/ar-connevent.c
+++ b/net/rxrpc/ar-connevent.c
@@ -40,11 +40,13 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn, int state,
write_lock(&call->state_lock);
if (call->state <= RXRPC_CALL_COMPLETE) {
call->state = state;
- call->abort_code = abort_code;
- if (state == RXRPC_CALL_LOCALLY_ABORTED)
+ if (state == RXRPC_CALL_LOCALLY_ABORTED) {
+ call->local_abort = conn->local_abort;
set_bit(RXRPC_CALL_EV_CONN_ABORT, &call->events);
- else
+ } else {
+ call->remote_abort = conn->remote_abort;
set_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events);
+ }
rxrpc_queue_call(call);
}
write_unlock(&call->state_lock);
@@ -84,8 +86,8 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, abort_code);
- msg.msg_name = &conn->trans->peer->srx.transport.sin;
- msg.msg_namelen = sizeof(conn->trans->peer->srx.transport.sin);
+ msg.msg_name = &conn->trans->peer->srx.transport;
+ msg.msg_namelen = conn->trans->peer->srx.transport_len;
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_flags = 0;
@@ -101,7 +103,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
whdr._rsvd = 0;
whdr.serviceId = htons(conn->service_id);
- word = htonl(abort_code);
+ word = htonl(conn->local_abort);
iov[0].iov_base = &whdr;
iov[0].iov_len = sizeof(whdr);
@@ -112,7 +114,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
serial = atomic_inc_return(&conn->serial);
whdr.serial = htonl(serial);
- _proto("Tx CONN ABORT %%%u { %d }", serial, abort_code);
+ _proto("Tx CONN ABORT %%%u { %d }", serial, conn->local_abort);
ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
if (ret < 0) {
@@ -172,15 +174,10 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
return -ECONNABORTED;
case RXRPC_PACKET_TYPE_CHALLENGE:
- if (conn->security)
- return conn->security->respond_to_challenge(
- conn, skb, _abort_code);
- return -EPROTO;
+ return conn->security->respond_to_challenge(conn, skb,
+ _abort_code);
case RXRPC_PACKET_TYPE_RESPONSE:
- if (!conn->security)
- return -EPROTO;
-
ret = conn->security->verify_response(conn, skb, _abort_code);
if (ret < 0)
return ret;
@@ -236,8 +233,6 @@ static void rxrpc_secure_connection(struct rxrpc_connection *conn)
}
}
- ASSERT(conn->security != NULL);
-
if (conn->security->issue_challenge(conn) < 0) {
abort_code = RX_CALL_DEAD;
ret = -ENOMEM;
diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
index 63ed75c40e29..01e038146b7c 100644
--- a/net/rxrpc/ar-input.c
+++ b/net/rxrpc/ar-input.c
@@ -25,12 +25,6 @@
#include <net/net_namespace.h>
#include "ar-internal.h"
-const char *rxrpc_pkts[] = {
- "?00",
- "DATA", "ACK", "BUSY", "ABORT", "ACKALL", "CHALL", "RESP", "DEBUG",
- "?09", "?10", "?11", "?12", "VERSION", "?14", "?15"
-};
-
/*
* queue a packet for recvmsg to pass to userspace
* - the caller must hold a lock on call->lock
@@ -199,7 +193,7 @@ static int rxrpc_fast_process_data(struct rxrpc_call *call,
/* if the packet need security things doing to it, then it goes down
* the slow path */
- if (call->conn->security)
+ if (call->conn->security_ix)
goto enqueue_packet;
sp->call = call;
@@ -355,7 +349,7 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
write_lock_bh(&call->state_lock);
if (call->state < RXRPC_CALL_COMPLETE) {
call->state = RXRPC_CALL_REMOTELY_ABORTED;
- call->abort_code = abort_code;
+ call->remote_abort = abort_code;
set_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events);
rxrpc_queue_call(call);
}
@@ -428,7 +422,7 @@ protocol_error:
protocol_error_locked:
if (call->state <= RXRPC_CALL_COMPLETE) {
call->state = RXRPC_CALL_LOCALLY_ABORTED;
- call->abort_code = RX_PROTOCOL_ERROR;
+ call->local_abort = RX_PROTOCOL_ERROR;
set_bit(RXRPC_CALL_EV_ABORT, &call->events);
rxrpc_queue_call(call);
}
@@ -500,7 +494,7 @@ protocol_error:
write_lock_bh(&call->state_lock);
if (call->state <= RXRPC_CALL_COMPLETE) {
call->state = RXRPC_CALL_LOCALLY_ABORTED;
- call->abort_code = RX_PROTOCOL_ERROR;
+ call->local_abort = RX_PROTOCOL_ERROR;
set_bit(RXRPC_CALL_EV_ABORT, &call->events);
rxrpc_queue_call(call);
}
@@ -612,9 +606,9 @@ int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
struct rxrpc_wire_header whdr;
/* dig out the RxRPC connection details */
- if (skb_copy_bits(skb, sizeof(struct udphdr), &whdr, sizeof(whdr)) < 0)
+ if (skb_copy_bits(skb, 0, &whdr, sizeof(whdr)) < 0)
return -EBADMSG;
- if (!pskb_pull(skb, sizeof(struct udphdr) + sizeof(whdr)))
+ if (!pskb_pull(skb, sizeof(whdr)))
BUG();
memset(sp, 0, sizeof(*sp));
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index cd6cdbe87125..f0b807a163fa 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -9,6 +9,7 @@
* 2 of the License, or (at your option) any later version.
*/
+#include <net/sock.h>
#include <rxrpc/packet.h>
#if 0
@@ -124,11 +125,15 @@ enum rxrpc_command {
* RxRPC security module interface
*/
struct rxrpc_security {
- struct module *owner; /* providing module */
- struct list_head link; /* link in master list */
const char *name; /* name of this service */
u8 security_index; /* security type provided */
+ /* Initialise a security service */
+ int (*init)(void);
+
+ /* Clean up a security service */
+ void (*exit)(void);
+
/* initialise a connection's security */
int (*init_connection_security)(struct rxrpc_connection *);
@@ -268,7 +273,7 @@ struct rxrpc_connection {
struct rb_root calls; /* calls on this connection */
struct sk_buff_head rx_queue; /* received conn-level packets */
struct rxrpc_call *channels[RXRPC_MAXCALLS]; /* channels (active calls) */
- struct rxrpc_security *security; /* applied security module */
+ const struct rxrpc_security *security; /* applied security module */
struct key *key; /* security for this connection (client) */
struct key *server_key; /* security for this service */
struct crypto_skcipher *cipher; /* encryption handle */
@@ -289,7 +294,9 @@ struct rxrpc_connection {
RXRPC_CONN_LOCALLY_ABORTED, /* - conn aborted locally */
RXRPC_CONN_NETWORK_ERROR, /* - conn terminated by network error */
} state;
- int error; /* error code for local abort */
+ u32 local_abort; /* local abort code */
+ u32 remote_abort; /* remote abort code */
+ int error; /* local error incurred */
int debug_id; /* debug ID for printks */
unsigned int call_counter; /* call ID counter */
atomic_t serial; /* packet serial number counter */
@@ -399,7 +406,9 @@ struct rxrpc_call {
rwlock_t state_lock; /* lock for state transition */
atomic_t usage;
atomic_t sequence; /* Tx data packet sequence counter */
- u32 abort_code; /* local/remote abort code */
+ u32 local_abort; /* local abort code */
+ u32 remote_abort; /* remote abort code */
+ int error; /* local error incurred */
enum rxrpc_call_state state : 8; /* current state of call */
int debug_id; /* debug ID for printks */
u8 channel; /* connection channel occupied by this call */
@@ -453,7 +462,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
{
write_lock_bh(&call->state_lock);
if (call->state < RXRPC_CALL_COMPLETE) {
- call->abort_code = abort_code;
+ call->local_abort = abort_code;
call->state = RXRPC_CALL_LOCALLY_ABORTED;
set_bit(RXRPC_CALL_EV_ABORT, &call->events);
}
@@ -478,13 +487,6 @@ int rxrpc_reject_call(struct rxrpc_sock *);
/*
* ar-ack.c
*/
-extern unsigned int rxrpc_requested_ack_delay;
-extern unsigned int rxrpc_soft_ack_delay;
-extern unsigned int rxrpc_idle_ack_delay;
-extern unsigned int rxrpc_rx_window_size;
-extern unsigned int rxrpc_rx_mtu;
-extern unsigned int rxrpc_rx_jumbo_max;
-
void __rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool);
void rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool);
void rxrpc_process_call(struct work_struct *);
@@ -506,7 +508,7 @@ struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *,
unsigned long, int, gfp_t);
struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *,
struct rxrpc_connection *,
- struct rxrpc_host_header *, gfp_t);
+ struct rxrpc_host_header *);
struct rxrpc_call *rxrpc_find_server_call(struct rxrpc_sock *, unsigned long);
void rxrpc_release_call(struct rxrpc_call *);
void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
@@ -531,8 +533,7 @@ void __exit rxrpc_destroy_all_connections(void);
struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *,
struct rxrpc_host_header *);
extern struct rxrpc_connection *
-rxrpc_incoming_connection(struct rxrpc_transport *, struct rxrpc_host_header *,
- gfp_t);
+rxrpc_incoming_connection(struct rxrpc_transport *, struct rxrpc_host_header *);
/*
* ar-connevent.c
@@ -550,8 +551,6 @@ void rxrpc_UDP_error_handler(struct work_struct *);
/*
* ar-input.c
*/
-extern const char *rxrpc_pkts[];
-
void rxrpc_data_ready(struct sock *);
int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool, bool);
void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *);
@@ -610,14 +609,10 @@ int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int);
/*
* ar-security.c
*/
-int rxrpc_register_security(struct rxrpc_security *);
-void rxrpc_unregister_security(struct rxrpc_security *);
+int __init rxrpc_init_security(void);
+void rxrpc_exit_security(void);
int rxrpc_init_client_conn_security(struct rxrpc_connection *);
int rxrpc_init_server_conn_security(struct rxrpc_connection *);
-int rxrpc_secure_packet(const struct rxrpc_call *, struct sk_buff *, size_t,
- void *);
-int rxrpc_verify_packet(const struct rxrpc_call *, struct sk_buff *, u32 *);
-void rxrpc_clear_conn_security(struct rxrpc_connection *);
/*
* ar-skbuff.c
@@ -637,6 +632,33 @@ struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *,
struct rxrpc_peer *);
/*
+ * insecure.c
+ */
+extern const struct rxrpc_security rxrpc_no_security;
+
+/*
+ * misc.c
+ */
+extern unsigned int rxrpc_requested_ack_delay;
+extern unsigned int rxrpc_soft_ack_delay;
+extern unsigned int rxrpc_idle_ack_delay;
+extern unsigned int rxrpc_rx_window_size;
+extern unsigned int rxrpc_rx_mtu;
+extern unsigned int rxrpc_rx_jumbo_max;
+
+extern const char *const rxrpc_pkts[];
+extern const s8 rxrpc_ack_priority[];
+
+extern const char *rxrpc_acks(u8 reason);
+
+/*
+ * rxkad.c
+ */
+#ifdef CONFIG_RXKAD
+extern const struct rxrpc_security rxkad;
+#endif
+
+/*
* sysctl.c
*/
#ifdef CONFIG_SYSCTL
diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
index d36fb6e1a29c..51cb10062a8d 100644
--- a/net/rxrpc/ar-output.c
+++ b/net/rxrpc/ar-output.c
@@ -110,7 +110,7 @@ static void rxrpc_send_abort(struct rxrpc_call *call, u32 abort_code)
if (call->state <= RXRPC_CALL_COMPLETE) {
call->state = RXRPC_CALL_LOCALLY_ABORTED;
- call->abort_code = abort_code;
+ call->local_abort = abort_code;
set_bit(RXRPC_CALL_EV_ABORT, &call->events);
del_timer_sync(&call->resend_timer);
del_timer_sync(&call->ack_timer);
@@ -663,7 +663,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
size_t pad;
/* pad out if we're using security */
- if (conn->security) {
+ if (conn->security_ix) {
pad = conn->security_size + skb->mark;
pad = conn->size_align - pad;
pad &= conn->size_align - 1;
@@ -695,7 +695,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
if (more && seq & 1)
sp->hdr.flags |= RXRPC_REQUEST_ACK;
- ret = rxrpc_secure_packet(
+ ret = conn->security->secure_packet(
call, skb, skb->mark,
skb->head + sizeof(struct rxrpc_wire_header));
if (ret < 0)
diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
index 525b2ba5a8f4..225163bc658d 100644
--- a/net/rxrpc/ar-proc.c
+++ b/net/rxrpc/ar-proc.c
@@ -80,7 +80,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
call->conn->in_clientflag ? "Svc" : "Clt",
atomic_read(&call->usage),
rxrpc_call_states[call->state],
- call->abort_code,
+ call->remote_abort ?: call->local_abort,
call->user_call_ID);
return 0;
diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c
index 64facba24a45..160f0927aa3e 100644
--- a/net/rxrpc/ar-recvmsg.c
+++ b/net/rxrpc/ar-recvmsg.c
@@ -288,7 +288,11 @@ receive_non_data_message:
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_BUSY, 0, &abort_code);
break;
case RXRPC_SKB_MARK_REMOTE_ABORT:
- abort_code = call->abort_code;
+ abort_code = call->remote_abort;
+ ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &abort_code);
+ break;
+ case RXRPC_SKB_MARK_LOCAL_ABORT:
+ abort_code = call->local_abort;
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &abort_code);
break;
case RXRPC_SKB_MARK_NET_ERROR:
@@ -303,6 +307,7 @@ receive_non_data_message:
&abort_code);
break;
default:
+ pr_err("RxRPC: Unknown packet mark %u\n", skb->mark);
BUG();
break;
}
@@ -401,9 +406,14 @@ u32 rxrpc_kernel_get_abort_code(struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- ASSERTCMP(skb->mark, ==, RXRPC_SKB_MARK_REMOTE_ABORT);
-
- return sp->call->abort_code;
+ switch (skb->mark) {
+ case RXRPC_SKB_MARK_REMOTE_ABORT:
+ return sp->call->remote_abort;
+ case RXRPC_SKB_MARK_LOCAL_ABORT:
+ return sp->call->local_abort;
+ default:
+ BUG();
+ }
}
EXPORT_SYMBOL(rxrpc_kernel_get_abort_code);
diff --git a/net/rxrpc/ar-security.c b/net/rxrpc/ar-security.c
index ceff6394a65f..d223253b22fa 100644
--- a/net/rxrpc/ar-security.c
+++ b/net/rxrpc/ar-security.c
@@ -22,109 +22,60 @@
static LIST_HEAD(rxrpc_security_methods);
static DECLARE_RWSEM(rxrpc_security_sem);
-/*
- * get an RxRPC security module
- */
-static struct rxrpc_security *rxrpc_security_get(struct rxrpc_security *sec)
-{
- return try_module_get(sec->owner) ? sec : NULL;
-}
-
-/*
- * release an RxRPC security module
- */
-static void rxrpc_security_put(struct rxrpc_security *sec)
+static const struct rxrpc_security *rxrpc_security_types[] = {
+ [RXRPC_SECURITY_NONE] = &rxrpc_no_security,
+#ifdef CONFIG_RXKAD
+ [RXRPC_SECURITY_RXKAD] = &rxkad,
+#endif
+};
+
+int __init rxrpc_init_security(void)
{
- module_put(sec->owner);
-}
-
-/*
- * look up an rxrpc security module
- */
-static struct rxrpc_security *rxrpc_security_lookup(u8 security_index)
-{
- struct rxrpc_security *sec = NULL;
-
- _enter("");
+ int i, ret;
- down_read(&rxrpc_security_sem);
-
- list_for_each_entry(sec, &rxrpc_security_methods, link) {
- if (sec->security_index == security_index) {
- if (unlikely(!rxrpc_security_get(sec)))
- break;
- goto out;
+ for (i = 0; i < ARRAY_SIZE(rxrpc_security_types); i++) {
+ if (rxrpc_security_types[i]) {
+ ret = rxrpc_security_types[i]->init();
+ if (ret < 0)
+ goto failed;
}
}
- sec = NULL;
-out:
- up_read(&rxrpc_security_sem);
- _leave(" = %p [%s]", sec, sec ? sec->name : "");
- return sec;
+ return 0;
+
+failed:
+ for (i--; i >= 0; i--)
+ if (rxrpc_security_types[i])
+ rxrpc_security_types[i]->exit();
+ return ret;
}
-/**
- * rxrpc_register_security - register an RxRPC security handler
- * @sec: security module
- *
- * register an RxRPC security handler for use by RxRPC
- */
-int rxrpc_register_security(struct rxrpc_security *sec)
+void rxrpc_exit_security(void)
{
- struct rxrpc_security *psec;
- int ret;
+ int i;
- _enter("");
- down_write(&rxrpc_security_sem);
-
- ret = -EEXIST;
- list_for_each_entry(psec, &rxrpc_security_methods, link) {
- if (psec->security_index == sec->security_index)
- goto out;
- }
-
- list_add(&sec->link, &rxrpc_security_methods);
-
- printk(KERN_NOTICE "RxRPC: Registered security type %d '%s'\n",
- sec->security_index, sec->name);
- ret = 0;
-
-out:
- up_write(&rxrpc_security_sem);
- _leave(" = %d", ret);
- return ret;
+ for (i = 0; i < ARRAY_SIZE(rxrpc_security_types); i++)
+ if (rxrpc_security_types[i])
+ rxrpc_security_types[i]->exit();
}
-EXPORT_SYMBOL_GPL(rxrpc_register_security);
-
-/**
- * rxrpc_unregister_security - unregister an RxRPC security handler
- * @sec: security module
- *
- * unregister an RxRPC security handler
+/*
+ * look up an rxrpc security module
*/
-void rxrpc_unregister_security(struct rxrpc_security *sec)
+static const struct rxrpc_security *rxrpc_security_lookup(u8 security_index)
{
-
- _enter("");
- down_write(&rxrpc_security_sem);
- list_del_init(&sec->link);
- up_write(&rxrpc_security_sem);
-
- printk(KERN_NOTICE "RxRPC: Unregistered security type %d '%s'\n",
- sec->security_index, sec->name);
+ if (security_index >= ARRAY_SIZE(rxrpc_security_types))
+ return NULL;
+ return rxrpc_security_types[security_index];
}
-EXPORT_SYMBOL_GPL(rxrpc_unregister_security);
-
/*
* initialise the security on a client connection
*/
int rxrpc_init_client_conn_security(struct rxrpc_connection *conn)
{
+ const struct rxrpc_security *sec;
struct rxrpc_key_token *token;
- struct rxrpc_security *sec;
struct key *key = conn->key;
int ret;
@@ -148,8 +99,7 @@ int rxrpc_init_client_conn_security(struct rxrpc_connection *conn)
ret = conn->security->init_connection_security(conn);
if (ret < 0) {
- rxrpc_security_put(conn->security);
- conn->security = NULL;
+ conn->security = &rxrpc_no_security;
return ret;
}
@@ -162,7 +112,7 @@ int rxrpc_init_client_conn_security(struct rxrpc_connection *conn)
*/
int rxrpc_init_server_conn_security(struct rxrpc_connection *conn)
{
- struct rxrpc_security *sec;
+ const struct rxrpc_security *sec;
struct rxrpc_local *local = conn->trans->local;
struct rxrpc_sock *rx;
struct key *key;
@@ -188,14 +138,12 @@ int rxrpc_init_server_conn_security(struct rxrpc_connection *conn)
/* the service appears to have died */
read_unlock_bh(&local->services_lock);
- rxrpc_security_put(sec);
_leave(" = -ENOENT");
return -ENOENT;
found_service:
if (!rx->securities) {
read_unlock_bh(&local->services_lock);
- rxrpc_security_put(sec);
_leave(" = -ENOKEY");
return -ENOKEY;
}
@@ -205,7 +153,6 @@ found_service:
&key_type_rxrpc_s, kdesc);
if (IS_ERR(kref)) {
read_unlock_bh(&local->services_lock);
- rxrpc_security_put(sec);
_leave(" = %ld [search]", PTR_ERR(kref));
return PTR_ERR(kref);
}
@@ -219,46 +166,3 @@ found_service:
_leave(" = 0");
return 0;
}
-
-/*
- * secure a packet prior to transmission
- */
-int rxrpc_secure_packet(const struct rxrpc_call *call,
- struct sk_buff *skb,
- size_t data_size,
- void *sechdr)
-{
- if (call->conn->security)
- return call->conn->security->secure_packet(
- call, skb, data_size, sechdr);
- return 0;
-}
-
-/*
- * secure a packet prior to transmission
- */
-int rxrpc_verify_packet(const struct rxrpc_call *call, struct sk_buff *skb,
- u32 *_abort_code)
-{
- if (call->conn->security)
- return call->conn->security->verify_packet(
- call, skb, _abort_code);
- return 0;
-}
-
-/*
- * clear connection security
- */
-void rxrpc_clear_conn_security(struct rxrpc_connection *conn)
-{
- _enter("{%d}", conn->debug_id);
-
- if (conn->security) {
- conn->security->clear(conn);
- rxrpc_security_put(conn->security);
- conn->security = NULL;
- }
-
- key_put(conn->key);
- key_put(conn->server_key);
-}
diff --git a/net/rxrpc/insecure.c b/net/rxrpc/insecure.c
new file mode 100644
index 000000000000..e571403613c1
--- /dev/null
+++ b/net/rxrpc/insecure.c
@@ -0,0 +1,83 @@
+/* Null security operations.
+ *
+ * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <net/af_rxrpc.h>
+#include "ar-internal.h"
+
+static int none_init_connection_security(struct rxrpc_connection *conn)
+{
+ return 0;
+}
+
+static void none_prime_packet_security(struct rxrpc_connection *conn)
+{
+}
+
+static int none_secure_packet(const struct rxrpc_call *call,
+ struct sk_buff *skb,
+ size_t data_size,
+ void *sechdr)
+{
+ return 0;
+}
+
+static int none_verify_packet(const struct rxrpc_call *call,
+ struct sk_buff *skb,
+ u32 *_abort_code)
+{
+ return 0;
+}
+
+static int none_respond_to_challenge(struct rxrpc_connection *conn,
+ struct sk_buff *skb,
+ u32 *_abort_code)
+{
+ *_abort_code = RX_PROTOCOL_ERROR;
+ return -EPROTO;
+}
+
+static int none_verify_response(struct rxrpc_connection *conn,
+ struct sk_buff *skb,
+ u32 *_abort_code)
+{
+ *_abort_code = RX_PROTOCOL_ERROR;
+ return -EPROTO;
+}
+
+static void none_clear(struct rxrpc_connection *conn)
+{
+}
+
+static int none_init(void)
+{
+ return 0;
+}
+
+static void none_exit(void)
+{
+}
+
+/*
+ * RxRPC Kerberos-based security
+ */
+const struct rxrpc_security rxrpc_no_security = {
+ .name = "none",
+ .security_index = RXRPC_SECURITY_NONE,
+ .init = none_init,
+ .exit = none_exit,
+ .init_connection_security = none_init_connection_security,
+ .prime_packet_security = none_prime_packet_security,
+ .secure_packet = none_secure_packet,
+ .verify_packet = none_verify_packet,
+ .respond_to_challenge = none_respond_to_challenge,
+ .verify_response = none_verify_response,
+ .clear = none_clear,
+};
diff --git a/net/rxrpc/misc.c b/net/rxrpc/misc.c
new file mode 100644
index 000000000000..1afe9876e79f
--- /dev/null
+++ b/net/rxrpc/misc.c
@@ -0,0 +1,89 @@
+/* Miscellaneous bits
+ *
+ * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <net/sock.h>
+#include <net/af_rxrpc.h>
+#include "ar-internal.h"
+
+/*
+ * How long to wait before scheduling ACK generation after seeing a
+ * packet with RXRPC_REQUEST_ACK set (in jiffies).
+ */
+unsigned int rxrpc_requested_ack_delay = 1;
+
+/*
+ * How long to wait before scheduling an ACK with subtype DELAY (in jiffies).
+ *
+ * We use this when we've received new data packets. If those packets aren't
+ * all consumed within this time we will send a DELAY ACK if an ACK was not
+ * requested to let the sender know it doesn't need to resend.
+ */
+unsigned int rxrpc_soft_ack_delay = 1 * HZ;
+
+/*
+ * How long to wait before scheduling an ACK with subtype IDLE (in jiffies).
+ *
+ * We use this when we've consumed some previously soft-ACK'd packets when
+ * further packets aren't immediately received to decide when to send an IDLE
+ * ACK let the other end know that it can free up its Tx buffer space.
+ */
+unsigned int rxrpc_idle_ack_delay = 0.5 * HZ;
+
+/*
+ * Receive window size in packets. This indicates the maximum number of
+ * unconsumed received packets we're willing to retain in memory. Once this
+ * limit is hit, we should generate an EXCEEDS_WINDOW ACK and discard further
+ * packets.
+ */
+unsigned int rxrpc_rx_window_size = 32;
+
+/*
+ * Maximum Rx MTU size. This indicates to the sender the size of jumbo packet
+ * made by gluing normal packets together that we're willing to handle.
+ */
+unsigned int rxrpc_rx_mtu = 5692;
+
+/*
+ * The maximum number of fragments in a received jumbo packet that we tell the
+ * sender that we're willing to handle.
+ */
+unsigned int rxrpc_rx_jumbo_max = 4;
+
+const char *const rxrpc_pkts[] = {
+ "?00",
+ "DATA", "ACK", "BUSY", "ABORT", "ACKALL", "CHALL", "RESP", "DEBUG",
+ "?09", "?10", "?11", "?12", "VERSION", "?14", "?15"
+};
+
+const s8 rxrpc_ack_priority[] = {
+ [0] = 0,
+ [RXRPC_ACK_DELAY] = 1,
+ [RXRPC_ACK_REQUESTED] = 2,
+ [RXRPC_ACK_IDLE] = 3,
+ [RXRPC_ACK_PING_RESPONSE] = 4,
+ [RXRPC_ACK_DUPLICATE] = 5,
+ [RXRPC_ACK_OUT_OF_SEQUENCE] = 6,
+ [RXRPC_ACK_EXCEEDS_WINDOW] = 7,
+ [RXRPC_ACK_NOSPACE] = 8,
+};
+
+const char *rxrpc_acks(u8 reason)
+{
+ static const char *const str[] = {
+ "---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY",
+ "IDL", "-?-"
+ };
+
+ if (reason >= ARRAY_SIZE(str))
+ reason = ARRAY_SIZE(str) - 1;
+ return str[reason];
+}
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index f0aeb8163688..6b726a046a7d 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -20,7 +20,6 @@
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include <keys/rxrpc-type.h>
-#define rxrpc_debug rxkad_debug
#include "ar-internal.h"
#define RXKAD_VERSION 2
@@ -31,10 +30,6 @@
#define REALM_SZ 40 /* size of principal's auth domain */
#define SNAME_SZ 40 /* size of service name */
-unsigned int rxrpc_debug;
-module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(debug, "rxkad debugging mask");
-
struct rxkad_level1_hdr {
__be32 data_size; /* true data size (excluding padding) */
};
@@ -44,10 +39,6 @@ struct rxkad_level2_hdr {
__be32 checksum; /* decrypted data checksum */
};
-MODULE_DESCRIPTION("RxRPC network protocol type-2 security (Kerberos 4)");
-MODULE_AUTHOR("Red Hat, Inc.");
-MODULE_LICENSE("GPL");
-
/*
* this holds a pinned cipher so that keventd doesn't get called by the cipher
* alloc routine, but since we have it to hand, we use it to decrypt RESPONSE
@@ -1164,12 +1155,35 @@ static void rxkad_clear(struct rxrpc_connection *conn)
}
/*
+ * Initialise the rxkad security service.
+ */
+static int rxkad_init(void)
+{
+ /* pin the cipher we need so that the crypto layer doesn't invoke
+ * keventd to go get it */
+ rxkad_ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(rxkad_ci))
+ return PTR_ERR(rxkad_ci);
+ return 0;
+}
+
+/*
+ * Clean up the rxkad security service.
+ */
+static void rxkad_exit(void)
+{
+ if (rxkad_ci)
+ crypto_free_skcipher(rxkad_ci);
+}
+
+/*
* RxRPC Kerberos-based security
*/
-static struct rxrpc_security rxkad = {
- .owner = THIS_MODULE,
+const struct rxrpc_security rxkad = {
.name = "rxkad",
.security_index = RXRPC_SECURITY_RXKAD,
+ .init = rxkad_init,
+ .exit = rxkad_exit,
.init_connection_security = rxkad_init_connection_security,
.prime_packet_security = rxkad_prime_packet_security,
.secure_packet = rxkad_secure_packet,
@@ -1179,28 +1193,3 @@ static struct rxrpc_security rxkad = {
.verify_response = rxkad_verify_response,
.clear = rxkad_clear,
};
-
-static __init int rxkad_init(void)
-{
- _enter("");
-
- /* pin the cipher we need so that the crypto layer doesn't invoke
- * keventd to go get it */
- rxkad_ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(rxkad_ci))
- return PTR_ERR(rxkad_ci);
-
- return rxrpc_register_security(&rxkad);
-}
-
-module_init(rxkad_init);
-
-static __exit void rxkad_exit(void)
-{
- _enter("");
-
- rxrpc_unregister_security(&rxkad);
- crypto_free_skcipher(rxkad_ci);
-}
-
-module_exit(rxkad_exit);
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 736c004abfbc..9844fe573029 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -401,7 +401,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
sk = chunk->skb->sk;
/* Allocate the new skb. */
- nskb = alloc_skb(packet->size + MAX_HEADER, GFP_ATOMIC);
+ nskb = alloc_skb(packet->size + MAX_HEADER, gfp);
if (!nskb)
goto nomem;
@@ -523,8 +523,8 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
*/
if (auth)
sctp_auth_calculate_hmac(asoc, nskb,
- (struct sctp_auth_chunk *)auth,
- GFP_ATOMIC);
+ (struct sctp_auth_chunk *)auth,
+ gfp);
/* 2) Calculate the Adler-32 checksum of the whole packet,
* including the SCTP common header and all the
@@ -705,7 +705,8 @@ static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
/* Check whether this chunk and all the rest of pending data will fit
* or delay in hopes of bundling a full sized packet.
*/
- if (chunk->skb->len + q->out_qlen >= transport->pathmtu - packet->overhead)
+ if (chunk->skb->len + q->out_qlen >
+ transport->pathmtu - packet->overhead - sizeof(sctp_data_chunk_t) - 4)
/* Enough data queued to fill a packet */
return SCTP_XMIT_OK;
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 5cfac8d5d3b3..6d45d53321e6 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -319,7 +319,8 @@ static int sctp_transport_walk_start(struct seq_file *seq)
struct sctp_ht_iter *iter = seq->private;
int err;
- err = rhashtable_walk_init(&sctp_transport_hashtable, &iter->hti);
+ err = rhashtable_walk_init(&sctp_transport_hashtable, &iter->hti,
+ GFP_KERNEL);
if (err)
return err;
diff --git a/net/socket.c b/net/socket.c
index 5f77a8e93830..afa3c3470717 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -587,20 +587,20 @@ void sock_release(struct socket *sock)
}
EXPORT_SYMBOL(sock_release);
-void __sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags)
+void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags)
{
u8 flags = *tx_flags;
- if (sk->sk_tsflags & SOF_TIMESTAMPING_TX_HARDWARE)
+ if (tsflags & SOF_TIMESTAMPING_TX_HARDWARE)
flags |= SKBTX_HW_TSTAMP;
- if (sk->sk_tsflags & SOF_TIMESTAMPING_TX_SOFTWARE)
+ if (tsflags & SOF_TIMESTAMPING_TX_SOFTWARE)
flags |= SKBTX_SW_TSTAMP;
- if (sk->sk_tsflags & SOF_TIMESTAMPING_TX_SCHED)
+ if (tsflags & SOF_TIMESTAMPING_TX_SCHED)
flags |= SKBTX_SCHED_TSTAMP;
- if (sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK)
+ if (tsflags & SOF_TIMESTAMPING_TX_ACK)
flags |= SKBTX_ACK_TSTAMP;
*tx_flags = flags;
@@ -1046,7 +1046,7 @@ static int sock_fasync(int fd, struct file *filp, int on)
return -EINVAL;
lock_sock(sk);
- wq = rcu_dereference_protected(sock->wq, sock_owned_by_user(sk));
+ wq = rcu_dereference_protected(sock->wq, lockdep_sock_is_held(sk));
fasync_helper(fd, filp, on, &wq->fasync_list);
if (!wq->fasync_list)
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 8c6bc795f060..15612ffa8d57 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1728,8 +1728,8 @@ alloc_enc_pages(struct rpc_rqst *rqstp)
return 0;
}
- first = snd_buf->page_base >> PAGE_CACHE_SHIFT;
- last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_CACHE_SHIFT;
+ first = snd_buf->page_base >> PAGE_SHIFT;
+ last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_SHIFT;
rqstp->rq_enc_pages_num = last - first + 1 + 1;
rqstp->rq_enc_pages
= kmalloc(rqstp->rq_enc_pages_num * sizeof(struct page *),
@@ -1775,10 +1775,10 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
status = alloc_enc_pages(rqstp);
if (status)
return status;
- first = snd_buf->page_base >> PAGE_CACHE_SHIFT;
+ first = snd_buf->page_base >> PAGE_SHIFT;
inpages = snd_buf->pages + first;
snd_buf->pages = rqstp->rq_enc_pages;
- snd_buf->page_base -= first << PAGE_CACHE_SHIFT;
+ snd_buf->page_base -= first << PAGE_SHIFT;
/*
* Give the tail its own page, in case we need extra space in the
* head when wrapping:
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index d94a8e1e9f05..045e11ecd332 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -465,7 +465,7 @@ encryptor(struct scatterlist *sg, void *data)
page_pos = desc->pos - outbuf->head[0].iov_len;
if (page_pos >= 0 && page_pos < outbuf->page_len) {
/* pages are not in place: */
- int i = (page_pos + outbuf->page_base) >> PAGE_CACHE_SHIFT;
+ int i = (page_pos + outbuf->page_base) >> PAGE_SHIFT;
in_page = desc->pages[i];
} else {
in_page = sg_page(sg);
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index 765088e4ad84..a737c2da0837 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -79,9 +79,9 @@ gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize)
len -= buf->head[0].iov_len;
if (len <= buf->page_len) {
unsigned int last = (buf->page_base + len - 1)
- >>PAGE_CACHE_SHIFT;
+ >>PAGE_SHIFT;
unsigned int offset = (buf->page_base + len - 1)
- & (PAGE_CACHE_SIZE - 1);
+ & (PAGE_SIZE - 1);
ptr = kmap_atomic(buf->pages[last]);
pad = *(ptr + offset);
kunmap_atomic(ptr);
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 008c25d1b9f9..553bf95f7003 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -881,7 +881,7 @@ static ssize_t cache_downcall(struct address_space *mapping,
char *kaddr;
ssize_t ret = -ENOMEM;
- if (count >= PAGE_CACHE_SIZE)
+ if (count >= PAGE_SIZE)
goto out_slow;
page = find_or_create_page(mapping, 0, GFP_KERNEL);
@@ -892,7 +892,7 @@ static ssize_t cache_downcall(struct address_space *mapping,
ret = cache_do_downcall(kaddr, buf, count, cd);
kunmap(page);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
return ret;
out_slow:
return cache_slow_downcall(buf, count, cd);
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 31789ef3e614..fc48eca21fd2 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -1390,8 +1390,8 @@ rpc_fill_super(struct super_block *sb, void *data, int silent)
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
int err;
- sb->s_blocksize = PAGE_CACHE_SIZE;
- sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_blocksize = PAGE_SIZE;
+ sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = RPCAUTH_GSSMAGIC;
sb->s_op = &s_ops;
sb->s_d_op = &simple_dentry_operations;
diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c
index 2df87f78e518..f217c348b341 100644
--- a/net/sunrpc/socklib.c
+++ b/net/sunrpc/socklib.c
@@ -96,8 +96,8 @@ ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct
if (base || xdr->page_base) {
pglen -= base;
base += xdr->page_base;
- ppage += base >> PAGE_CACHE_SHIFT;
- base &= ~PAGE_CACHE_MASK;
+ ppage += base >> PAGE_SHIFT;
+ base &= ~PAGE_MASK;
}
do {
char *kaddr;
@@ -113,7 +113,7 @@ ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct
}
}
- len = PAGE_CACHE_SIZE;
+ len = PAGE_SIZE;
kaddr = kmap_atomic(*ppage);
if (base) {
len -= base;
@@ -155,7 +155,7 @@ int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
struct xdr_skb_reader desc;
desc.skb = skb;
- desc.offset = sizeof(struct udphdr);
+ desc.offset = 0;
desc.count = skb->len - desc.offset;
if (skb_csum_unnecessary(skb))
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 1413cdcc131c..71d6072664d2 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -617,7 +617,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
svsk->sk_sk->sk_stamp = skb->tstamp;
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */
- len = skb->len - sizeof(struct udphdr);
+ len = skb->len;
rqstp->rq_arg.len = len;
rqstp->rq_prot = IPPROTO_UDP;
@@ -641,8 +641,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
skb_free_datagram_locked(svsk->sk_sk, skb);
} else {
/* we can use it in-place */
- rqstp->rq_arg.head[0].iov_base = skb->data +
- sizeof(struct udphdr);
+ rqstp->rq_arg.head[0].iov_base = skb->data;
rqstp->rq_arg.head[0].iov_len = len;
if (skb_checksum_complete(skb))
goto out_free;
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 4439ac4c1b53..6bdb3865212d 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -164,7 +164,7 @@ EXPORT_SYMBOL_GPL(xdr_inline_pages);
* Note: the addresses pgto_base and pgfrom_base are both calculated in
* the same way:
* if a memory area starts at byte 'base' in page 'pages[i]',
- * then its address is given as (i << PAGE_CACHE_SHIFT) + base
+ * then its address is given as (i << PAGE_SHIFT) + base
* Also note: pgfrom_base must be < pgto_base, but the memory areas
* they point to may overlap.
*/
@@ -181,20 +181,20 @@ _shift_data_right_pages(struct page **pages, size_t pgto_base,
pgto_base += len;
pgfrom_base += len;
- pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
- pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT);
+ pgto = pages + (pgto_base >> PAGE_SHIFT);
+ pgfrom = pages + (pgfrom_base >> PAGE_SHIFT);
- pgto_base &= ~PAGE_CACHE_MASK;
- pgfrom_base &= ~PAGE_CACHE_MASK;
+ pgto_base &= ~PAGE_MASK;
+ pgfrom_base &= ~PAGE_MASK;
do {
/* Are any pointers crossing a page boundary? */
if (pgto_base == 0) {
- pgto_base = PAGE_CACHE_SIZE;
+ pgto_base = PAGE_SIZE;
pgto--;
}
if (pgfrom_base == 0) {
- pgfrom_base = PAGE_CACHE_SIZE;
+ pgfrom_base = PAGE_SIZE;
pgfrom--;
}
@@ -236,11 +236,11 @@ _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
char *vto;
size_t copy;
- pgto = pages + (pgbase >> PAGE_CACHE_SHIFT);
- pgbase &= ~PAGE_CACHE_MASK;
+ pgto = pages + (pgbase >> PAGE_SHIFT);
+ pgbase &= ~PAGE_MASK;
for (;;) {
- copy = PAGE_CACHE_SIZE - pgbase;
+ copy = PAGE_SIZE - pgbase;
if (copy > len)
copy = len;
@@ -253,7 +253,7 @@ _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
break;
pgbase += copy;
- if (pgbase == PAGE_CACHE_SIZE) {
+ if (pgbase == PAGE_SIZE) {
flush_dcache_page(*pgto);
pgbase = 0;
pgto++;
@@ -280,11 +280,11 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
char *vfrom;
size_t copy;
- pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT);
- pgbase &= ~PAGE_CACHE_MASK;
+ pgfrom = pages + (pgbase >> PAGE_SHIFT);
+ pgbase &= ~PAGE_MASK;
do {
- copy = PAGE_CACHE_SIZE - pgbase;
+ copy = PAGE_SIZE - pgbase;
if (copy > len)
copy = len;
@@ -293,7 +293,7 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
kunmap_atomic(vfrom);
pgbase += copy;
- if (pgbase == PAGE_CACHE_SIZE) {
+ if (pgbase == PAGE_SIZE) {
pgbase = 0;
pgfrom++;
}
@@ -1038,8 +1038,8 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
if (base < buf->page_len) {
subbuf->page_len = min(buf->page_len - base, len);
base += buf->page_base;
- subbuf->page_base = base & ~PAGE_CACHE_MASK;
- subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT];
+ subbuf->page_base = base & ~PAGE_MASK;
+ subbuf->pages = &buf->pages[base >> PAGE_SHIFT];
len -= subbuf->page_len;
base = 0;
} else {
@@ -1297,9 +1297,9 @@ xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
todo -= avail_here;
base += buf->page_base;
- ppages = buf->pages + (base >> PAGE_CACHE_SHIFT);
- base &= ~PAGE_CACHE_MASK;
- avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base,
+ ppages = buf->pages + (base >> PAGE_SHIFT);
+ base &= ~PAGE_MASK;
+ avail_page = min_t(unsigned int, PAGE_SIZE - base,
avail_here);
c = kmap(*ppages) + base;
@@ -1383,7 +1383,7 @@ xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
}
avail_page = min(avail_here,
- (unsigned int) PAGE_CACHE_SIZE);
+ (unsigned int) PAGE_SIZE);
}
base = buf->page_len; /* align to start of tail */
}
@@ -1479,9 +1479,9 @@ xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
if (page_len > len)
page_len = len;
len -= page_len;
- page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
- i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
- thislen = PAGE_CACHE_SIZE - page_offset;
+ page_offset = (offset + buf->page_base) & (PAGE_SIZE - 1);
+ i = (offset + buf->page_base) >> PAGE_SHIFT;
+ thislen = PAGE_SIZE - page_offset;
do {
if (thislen > page_len)
thislen = page_len;
@@ -1492,7 +1492,7 @@ xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
page_len -= thislen;
i++;
page_offset = 0;
- thislen = PAGE_CACHE_SIZE;
+ thislen = PAGE_SIZE;
} while (page_len != 0);
offset = 0;
}
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 65e759569e48..c1fc7b20bbc1 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -995,15 +995,14 @@ static void xs_udp_data_read_skb(struct rpc_xprt *xprt,
u32 _xid;
__be32 *xp;
- repsize = skb->len - sizeof(struct udphdr);
+ repsize = skb->len;
if (repsize < 4) {
dprintk("RPC: impossible RPC reply size %d!\n", repsize);
return;
}
/* Copy the XID from the skb... */
- xp = skb_header_pointer(skb, sizeof(struct udphdr),
- sizeof(_xid), &_xid);
+ xp = skb_header_pointer(skb, 0, sizeof(_xid), &_xid);
if (xp == NULL)
return;
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index 8b5833c1ff2e..2b9b98f1c2ff 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -1079,7 +1079,7 @@ nla_put_failure:
* @filter_dev: filter device
* @idx:
*
- * Delete FDB entry from switch device.
+ * Dump FDB entries from switch device.
*/
int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
struct net_device *dev,
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 27a5406213c6..6f11c62bc8f9 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -205,6 +205,7 @@ static int tipc_enable_bearer(struct net *net, const char *name,
struct tipc_bearer *b;
struct tipc_media *m;
struct tipc_bearer_names b_names;
+ struct sk_buff *skb;
char addr_string[16];
u32 bearer_id;
u32 with_this_prio;
@@ -301,7 +302,7 @@ restart:
b->net_plane = bearer_id + 'A';
b->priority = priority;
- res = tipc_disc_create(net, b, &b->bcast_addr);
+ res = tipc_disc_create(net, b, &b->bcast_addr, &skb);
if (res) {
bearer_disable(net, b);
pr_warn("Bearer <%s> rejected, discovery object creation failed\n",
@@ -310,7 +311,8 @@ restart:
}
rcu_assign_pointer(tn->bearer_list[bearer_id], b);
-
+ if (skb)
+ tipc_bearer_xmit_skb(net, bearer_id, skb, &b->bcast_addr);
pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
name,
tipc_addr_string_fill(addr_string, disc_domain), priority);
@@ -335,23 +337,16 @@ static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b)
*/
static void bearer_disable(struct net *net, struct tipc_bearer *b)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- u32 i;
+ struct tipc_net *tn = tipc_net(net);
+ int bearer_id = b->identity;
pr_info("Disabling bearer <%s>\n", b->name);
b->media->disable_media(b);
-
- tipc_node_delete_links(net, b->identity);
+ tipc_node_delete_links(net, bearer_id);
RCU_INIT_POINTER(b->media_ptr, NULL);
if (b->link_req)
tipc_disc_delete(b->link_req);
-
- for (i = 0; i < MAX_BEARERS; i++) {
- if (b == rtnl_dereference(tn->bearer_list[i])) {
- RCU_INIT_POINTER(tn->bearer_list[i], NULL);
- break;
- }
- }
+ RCU_INIT_POINTER(tn->bearer_list[bearer_id], NULL);
kfree_rcu(b, rcu);
}
@@ -394,7 +389,7 @@ void tipc_disable_l2_media(struct tipc_bearer *b)
/**
* tipc_l2_send_msg - send a TIPC packet out over an L2 interface
- * @buf: the packet to be sent
+ * @skb: the packet to be sent
* @b: the bearer through which the packet is to be sent
* @dest: peer destination address
*/
@@ -403,17 +398,21 @@ int tipc_l2_send_msg(struct net *net, struct sk_buff *skb,
{
struct net_device *dev;
int delta;
+ void *tipc_ptr;
dev = (struct net_device *)rcu_dereference_rtnl(b->media_ptr);
if (!dev)
return 0;
+ /* Send RESET message even if bearer is detached from device */
+ tipc_ptr = rtnl_dereference(dev->tipc_ptr);
+ if (unlikely(!tipc_ptr && !msg_is_reset(buf_msg(skb))))
+ goto drop;
+
delta = dev->hard_header_len - skb_headroom(skb);
if ((delta > 0) &&
- pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) {
- kfree_skb(skb);
- return 0;
- }
+ pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC))
+ goto drop;
skb_reset_network_header(skb);
skb->dev = dev;
@@ -422,6 +421,9 @@ int tipc_l2_send_msg(struct net *net, struct sk_buff *skb,
dev->dev_addr, skb->len);
dev_queue_xmit(skb);
return 0;
+drop:
+ kfree_skb(skb);
+ return 0;
}
int tipc_bearer_mtu(struct net *net, u32 bearer_id)
@@ -450,6 +452,8 @@ void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id,
b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
if (likely(b))
b->media->send_msg(net, skb, b, dest);
+ else
+ kfree_skb(skb);
rcu_read_unlock();
}
@@ -468,11 +472,11 @@ void tipc_bearer_xmit(struct net *net, u32 bearer_id,
rcu_read_lock();
b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
- if (likely(b)) {
- skb_queue_walk_safe(xmitq, skb, tmp) {
- __skb_dequeue(xmitq);
- b->media->send_msg(net, skb, b, dst);
- }
+ if (unlikely(!b))
+ __skb_queue_purge(xmitq);
+ skb_queue_walk_safe(xmitq, skb, tmp) {
+ __skb_dequeue(xmitq);
+ b->media->send_msg(net, skb, b, dst);
}
rcu_read_unlock();
}
@@ -490,14 +494,14 @@ void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id,
rcu_read_lock();
b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
- if (likely(b)) {
- skb_queue_walk_safe(xmitq, skb, tmp) {
- hdr = buf_msg(skb);
- msg_set_non_seq(hdr, 1);
- msg_set_mc_netid(hdr, net_id);
- __skb_dequeue(xmitq);
- b->media->send_msg(net, skb, b, &b->bcast_addr);
- }
+ if (unlikely(!b))
+ __skb_queue_purge(xmitq);
+ skb_queue_walk_safe(xmitq, skb, tmp) {
+ hdr = buf_msg(skb);
+ msg_set_non_seq(hdr, 1);
+ msg_set_mc_netid(hdr, net_id);
+ __skb_dequeue(xmitq);
+ b->media->send_msg(net, skb, b, &b->bcast_addr);
}
rcu_read_unlock();
}
@@ -513,24 +517,21 @@ void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id,
* ignores packets sent using interface multicast, and traffic sent to other
* nodes (which can happen if interface is running in promiscuous mode).
*/
-static int tipc_l2_rcv_msg(struct sk_buff *buf, struct net_device *dev,
+static int tipc_l2_rcv_msg(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
struct tipc_bearer *b;
rcu_read_lock();
b = rcu_dereference_rtnl(dev->tipc_ptr);
- if (likely(b)) {
- if (likely(buf->pkt_type <= PACKET_BROADCAST)) {
- buf->next = NULL;
- tipc_rcv(dev_net(dev), buf, b);
- rcu_read_unlock();
- return NET_RX_SUCCESS;
- }
+ if (likely(b && (skb->pkt_type <= PACKET_BROADCAST))) {
+ skb->next = NULL;
+ tipc_rcv(dev_net(dev), skb, b);
+ rcu_read_unlock();
+ return NET_RX_SUCCESS;
}
rcu_read_unlock();
-
- kfree_skb(buf);
+ kfree_skb(skb);
return NET_RX_DROP;
}
@@ -548,9 +549,18 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct net *net = dev_net(dev);
+ struct tipc_net *tn = tipc_net(net);
struct tipc_bearer *b;
+ int i;
b = rtnl_dereference(dev->tipc_ptr);
+ if (!b) {
+ for (i = 0; i < MAX_BEARERS; b = NULL, i++) {
+ b = rtnl_dereference(tn->bearer_list[i]);
+ if (b && (b->media_ptr == dev))
+ break;
+ }
+ }
if (!b)
return NOTIFY_DONE;
@@ -560,13 +570,20 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
case NETDEV_CHANGE:
if (netif_carrier_ok(dev))
break;
+ case NETDEV_UP:
+ rcu_assign_pointer(dev->tipc_ptr, b);
+ break;
case NETDEV_GOING_DOWN:
+ RCU_INIT_POINTER(dev->tipc_ptr, NULL);
+ synchronize_net();
+ tipc_reset_bearer(net, b);
+ break;
case NETDEV_CHANGEMTU:
tipc_reset_bearer(net, b);
break;
case NETDEV_CHANGEADDR:
b->media->raw2addr(b, &b->addr,
- (char *)dev->dev_addr);
+ (char *)dev->dev_addr);
tipc_reset_bearer(net, b);
break;
case NETDEV_UNREGISTER:
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index f1e738e80535..ad9d477cc242 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -268,10 +268,9 @@ exit:
* Returns 0 if successful, otherwise -errno.
*/
int tipc_disc_create(struct net *net, struct tipc_bearer *b,
- struct tipc_media_addr *dest)
+ struct tipc_media_addr *dest, struct sk_buff **skb)
{
struct tipc_link_req *req;
- struct sk_buff *skb;
req = kmalloc(sizeof(*req), GFP_ATOMIC);
if (!req)
@@ -293,9 +292,7 @@ int tipc_disc_create(struct net *net, struct tipc_bearer *b,
setup_timer(&req->timer, disc_timeout, (unsigned long)req);
mod_timer(&req->timer, jiffies + req->timer_intv);
b->link_req = req;
- skb = skb_clone(req->buf, GFP_ATOMIC);
- if (skb)
- tipc_bearer_xmit_skb(net, req->bearer_id, skb, &req->dest);
+ *skb = skb_clone(req->buf, GFP_ATOMIC);
return 0;
}
diff --git a/net/tipc/discover.h b/net/tipc/discover.h
index c9b12770c5ed..b80a335389c0 100644
--- a/net/tipc/discover.h
+++ b/net/tipc/discover.h
@@ -40,7 +40,7 @@
struct tipc_link_req;
int tipc_disc_create(struct net *net, struct tipc_bearer *b_ptr,
- struct tipc_media_addr *dest);
+ struct tipc_media_addr *dest, struct sk_buff **skb);
void tipc_disc_delete(struct tipc_link_req *req);
void tipc_disc_reset(struct net *net, struct tipc_bearer *b_ptr);
void tipc_disc_add_dest(struct tipc_link_req *req);
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 55778a0aebf3..f34f639df643 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -779,6 +779,11 @@ static inline bool msg_peer_node_is_up(struct tipc_msg *m)
return msg_redundant_link(m);
}
+static inline bool msg_is_reset(struct tipc_msg *hdr)
+{
+ return (msg_user(hdr) == LINK_PROTOCOL) && (msg_type(hdr) == RESET_MSG);
+}
+
struct sk_buff *tipc_buf_acquire(u32 size);
bool tipc_msg_validate(struct sk_buff *skb);
bool tipc_msg_reverse(u32 own_addr, struct sk_buff **skb, int err);
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index 0a369bb440e7..662bdd20a748 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -842,7 +842,7 @@ static void vmci_transport_peer_detach_cb(u32 sub_id,
* qp_handle.
*/
if (vmci_handle_is_invalid(e_payload->handle) ||
- vmci_handle_is_equal(trans->qp_handle, e_payload->handle))
+ !vmci_handle_is_equal(trans->qp_handle, e_payload->handle))
return;
/* We don't ask for delayed CBs when we subscribe to this event (we
@@ -2154,7 +2154,7 @@ module_exit(vmci_transport_exit);
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
-MODULE_VERSION("1.0.2.0-k");
+MODULE_VERSION("1.0.3.0-k");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("vmware_vsock");
MODULE_ALIAS_NETPROTO(PF_VSOCK);
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 9f1c4aa851ef..5327e4b974fa 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -626,6 +626,13 @@ int wiphy_register(struct wiphy *wiphy)
!rdev->ops->set_mac_acl)))
return -EINVAL;
+ /* assure only valid behaviours are flagged by driver
+ * hence subtract 2 as bit 0 is invalid.
+ */
+ if (WARN_ON(wiphy->bss_select_support &&
+ (wiphy->bss_select_support & ~(BIT(__NL80211_BSS_SELECT_ATTR_AFTER_LAST) - 2))))
+ return -EINVAL;
+
if (wiphy->addresses)
memcpy(wiphy->perm_addr, wiphy->addresses[0].addr, ETH_ALEN);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 98c924260b3d..4f45a2913104 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -402,6 +402,8 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_SCHED_SCAN_DELAY] = { .type = NLA_U32 },
[NL80211_ATTR_REG_INDOOR] = { .type = NLA_FLAG },
[NL80211_ATTR_PBSS] = { .type = NLA_FLAG },
+ [NL80211_ATTR_BSS_SELECT] = { .type = NLA_NESTED },
+ [NL80211_ATTR_STA_SUPPORT_P2P_PS] = { .type = NLA_U8 },
};
/* policy for the key attributes */
@@ -486,6 +488,15 @@ nl80211_plan_policy[NL80211_SCHED_SCAN_PLAN_MAX + 1] = {
[NL80211_SCHED_SCAN_PLAN_ITERATIONS] = { .type = NLA_U32 },
};
+static const struct nla_policy
+nl80211_bss_select_policy[NL80211_BSS_SELECT_ATTR_MAX + 1] = {
+ [NL80211_BSS_SELECT_ATTR_RSSI] = { .type = NLA_FLAG },
+ [NL80211_BSS_SELECT_ATTR_BAND_PREF] = { .type = NLA_U32 },
+ [NL80211_BSS_SELECT_ATTR_RSSI_ADJUST] = {
+ .len = sizeof(struct nl80211_bss_select_rssi_adjust)
+ },
+};
+
static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
struct netlink_callback *cb,
struct cfg80211_registered_device **rdev,
@@ -1731,6 +1742,25 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
rdev->wiphy.ext_features))
goto nla_put_failure;
+ if (rdev->wiphy.bss_select_support) {
+ struct nlattr *nested;
+ u32 bss_select_support = rdev->wiphy.bss_select_support;
+
+ nested = nla_nest_start(msg, NL80211_ATTR_BSS_SELECT);
+ if (!nested)
+ goto nla_put_failure;
+
+ i = 0;
+ while (bss_select_support) {
+ if ((bss_select_support & 1) &&
+ nla_put_flag(msg, i))
+ goto nla_put_failure;
+ i++;
+ bss_select_support >>= 1;
+ }
+ nla_nest_end(msg, nested);
+ }
+
/* done */
state->split_start = 0;
break;
@@ -3977,6 +4007,10 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
statype != CFG80211_STA_AP_CLIENT_UNASSOC)
return -EINVAL;
+ if (params->support_p2p_ps != -1 &&
+ statype != CFG80211_STA_AP_CLIENT_UNASSOC)
+ return -EINVAL;
+
if (params->aid &&
!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) &&
statype != CFG80211_STA_AP_CLIENT_UNASSOC)
@@ -4270,6 +4304,18 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
else
params.listen_interval = -1;
+ if (info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]) {
+ u8 tmp;
+
+ tmp = nla_get_u8(info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]);
+ if (tmp >= NUM_NL80211_P2P_PS_STATUS)
+ return -EINVAL;
+
+ params.support_p2p_ps = tmp;
+ } else {
+ params.support_p2p_ps = -1;
+ }
+
if (!info->attrs[NL80211_ATTR_MAC])
return -EINVAL;
@@ -4393,6 +4439,23 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
params.listen_interval =
nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]);
+ if (info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]) {
+ u8 tmp;
+
+ tmp = nla_get_u8(info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]);
+ if (tmp >= NUM_NL80211_P2P_PS_STATUS)
+ return -EINVAL;
+
+ params.support_p2p_ps = tmp;
+ } else {
+ /*
+ * if not specified, assume it's supported for P2P GO interface,
+ * and is NOT supported for AP interface
+ */
+ params.support_p2p_ps =
+ dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO;
+ }
+
if (info->attrs[NL80211_ATTR_PEER_AID])
params.aid = nla_get_u16(info->attrs[NL80211_ATTR_PEER_AID]);
else
@@ -5758,6 +5821,73 @@ static int validate_scan_freqs(struct nlattr *freqs)
return n_channels;
}
+static bool is_band_valid(struct wiphy *wiphy, enum ieee80211_band b)
+{
+ return b < IEEE80211_NUM_BANDS && wiphy->bands[b];
+}
+
+static int parse_bss_select(struct nlattr *nla, struct wiphy *wiphy,
+ struct cfg80211_bss_selection *bss_select)
+{
+ struct nlattr *attr[NL80211_BSS_SELECT_ATTR_MAX + 1];
+ struct nlattr *nest;
+ int err;
+ bool found = false;
+ int i;
+
+ /* only process one nested attribute */
+ nest = nla_data(nla);
+ if (!nla_ok(nest, nla_len(nest)))
+ return -EINVAL;
+
+ err = nla_parse(attr, NL80211_BSS_SELECT_ATTR_MAX, nla_data(nest),
+ nla_len(nest), nl80211_bss_select_policy);
+ if (err)
+ return err;
+
+ /* only one attribute may be given */
+ for (i = 0; i <= NL80211_BSS_SELECT_ATTR_MAX; i++) {
+ if (attr[i]) {
+ if (found)
+ return -EINVAL;
+ found = true;
+ }
+ }
+
+ bss_select->behaviour = __NL80211_BSS_SELECT_ATTR_INVALID;
+
+ if (attr[NL80211_BSS_SELECT_ATTR_RSSI])
+ bss_select->behaviour = NL80211_BSS_SELECT_ATTR_RSSI;
+
+ if (attr[NL80211_BSS_SELECT_ATTR_BAND_PREF]) {
+ bss_select->behaviour = NL80211_BSS_SELECT_ATTR_BAND_PREF;
+ bss_select->param.band_pref =
+ nla_get_u32(attr[NL80211_BSS_SELECT_ATTR_BAND_PREF]);
+ if (!is_band_valid(wiphy, bss_select->param.band_pref))
+ return -EINVAL;
+ }
+
+ if (attr[NL80211_BSS_SELECT_ATTR_RSSI_ADJUST]) {
+ struct nl80211_bss_select_rssi_adjust *adj_param;
+
+ adj_param = nla_data(attr[NL80211_BSS_SELECT_ATTR_RSSI_ADJUST]);
+ bss_select->behaviour = NL80211_BSS_SELECT_ATTR_RSSI_ADJUST;
+ bss_select->param.adjust.band = adj_param->band;
+ bss_select->param.adjust.delta = adj_param->delta;
+ if (!is_band_valid(wiphy, bss_select->param.adjust.band))
+ return -EINVAL;
+ }
+
+ /* user-space did not provide behaviour attribute */
+ if (bss_select->behaviour == __NL80211_BSS_SELECT_ATTR_INVALID)
+ return -EINVAL;
+
+ if (!(wiphy->bss_select_support & BIT(bss_select->behaviour)))
+ return -EINVAL;
+
+ return 0;
+}
+
static int nl80211_parse_random_mac(struct nlattr **attrs,
u8 *mac_addr, u8 *mac_addr_mask)
{
@@ -5996,6 +6126,12 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
request->no_cck =
nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]);
+ if (info->attrs[NL80211_ATTR_MAC])
+ memcpy(request->bssid, nla_data(info->attrs[NL80211_ATTR_MAC]),
+ ETH_ALEN);
+ else
+ eth_broadcast_addr(request->bssid);
+
request->wdev = wdev;
request->wiphy = &rdev->wiphy;
request->scan_start = jiffies;
@@ -7922,6 +8058,10 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
connect.mfp = NL80211_MFP_NO;
}
+ if (info->attrs[NL80211_ATTR_PREV_BSSID])
+ connect.prev_bssid =
+ nla_data(info->attrs[NL80211_ATTR_PREV_BSSID]);
+
if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
connect.channel = nl80211_get_valid_chan(
wiphy, info->attrs[NL80211_ATTR_WIPHY_FREQ]);
@@ -7995,8 +8135,24 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
return -EOPNOTSUPP;
}
+ if (info->attrs[NL80211_ATTR_BSS_SELECT]) {
+ /* bss selection makes no sense if bssid is set */
+ if (connect.bssid) {
+ kzfree(connkeys);
+ return -EINVAL;
+ }
+
+ err = parse_bss_select(info->attrs[NL80211_ATTR_BSS_SELECT],
+ wiphy, &connect.bss_select);
+ if (err) {
+ kzfree(connkeys);
+ return err;
+ }
+ }
+
wdev_lock(dev->ieee80211_ptr);
- err = cfg80211_connect(rdev, dev, &connect, connkeys, NULL);
+ err = cfg80211_connect(rdev, dev, &connect, connkeys,
+ connect.prev_bssid);
wdev_unlock(dev->ieee80211_ptr);
if (err)
kzfree(connkeys);
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 14d5369eb778..50ea8e3fcbeb 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -1293,6 +1293,8 @@ int cfg80211_wext_siwscan(struct net_device *dev,
if (wiphy->bands[i])
creq->rates[i] = (1 << wiphy->bands[i]->n_bitrates) - 1;
+ eth_broadcast_addr(creq->bssid);
+
rdev->scan_req = creq;
err = rdev_scan(rdev, creq);
if (err) {
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 544558171787..1fba41676428 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -119,6 +119,8 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
wdev->conn->params.ssid_len);
request->ssids[0].ssid_len = wdev->conn->params.ssid_len;
+ eth_broadcast_addr(request->bssid);
+
request->wdev = wdev;
request->wiphy = &rdev->wiphy;
request->scan_start = jiffies;
@@ -490,8 +492,18 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
if (!rdev->ops->auth || !rdev->ops->assoc)
return -EOPNOTSUPP;
- if (wdev->current_bss)
- return -EALREADY;
+ if (wdev->current_bss) {
+ if (!prev_bssid)
+ return -EALREADY;
+ if (prev_bssid &&
+ !ether_addr_equal(prev_bssid, wdev->current_bss->pub.bssid))
+ return -ENOTCONN;
+ cfg80211_unhold_bss(wdev->current_bss);
+ cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
+ wdev->current_bss = NULL;
+
+ cfg80211_sme_free(wdev);
+ }
if (WARN_ON(wdev->conn))
return -EINPROGRESS;
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 09b242b09bed..8da1fae23cfb 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -1259,6 +1259,7 @@ TRACE_EVENT(rdev_connect,
__field(bool, privacy)
__field(u32, wpa_versions)
__field(u32, flags)
+ MAC_ENTRY(prev_bssid)
),
TP_fast_assign(
WIPHY_ASSIGN;
@@ -1270,13 +1271,14 @@ TRACE_EVENT(rdev_connect,
__entry->privacy = sme->privacy;
__entry->wpa_versions = sme->crypto.wpa_versions;
__entry->flags = sme->flags;
+ MAC_ASSIGN(prev_bssid, sme->prev_bssid);
),
TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT
", ssid: %s, auth type: %d, privacy: %s, wpa versions: %u, "
- "flags: %u",
+ "flags: %u, previous bssid: " MAC_PR_FMT,
WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(bssid), __entry->ssid,
__entry->auth_type, BOOL_TO_STR(__entry->privacy),
- __entry->wpa_versions, __entry->flags)
+ __entry->wpa_versions, __entry->flags, MAC_PR_ARG(prev_bssid))
);
TRACE_EVENT(rdev_set_cqm_rssi_config,
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index b50ee5d622e1..6250b1cfcde5 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -399,7 +399,10 @@ static int __init wireless_nlevent_init(void)
if (err)
return err;
- return register_netdevice_notifier(&wext_netdev_notifier);
+ err = register_netdevice_notifier(&wext_netdev_notifier);
+ if (err)
+ unregister_pernet_subsys(&wext_pernet_ops);
+ return err;
}
subsys_initcall(wireless_nlevent_init);
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index ad7f5b3f9b61..1c4ad477ce93 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -292,12 +292,15 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
skb_dst_force(skb);
+ dev_hold(skb->dev);
nexthdr = x->type->input(x, skb);
if (nexthdr == -EINPROGRESS)
return 0;
resume:
+ dev_put(skb->dev);
+
spin_lock(&x->lock);
if (nexthdr <= 0) {
if (nexthdr == -EBADMSG) {