aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2022-09-01 12:58:02 -0700
committerJakub Kicinski <kuba@kernel.org>2022-09-01 12:58:02 -0700
commit60ad1100d525699bce83690757ff3077c6ab83ab (patch)
treefd25eb112f4d52f4f5209bb0c83b6c36d25793ae /net
parentnet: sched: gred: remove NULL check before free table->tab in gred_destroy() (diff)
parentMerge tag 'net-6.0-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net (diff)
downloadlinux-dev-60ad1100d525699bce83690757ff3077c6ab83ab.tar.xz
linux-dev-60ad1100d525699bce83690757ff3077c6ab83ab.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
tools/testing/selftests/net/.gitignore sort the net-next version and use it Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net')
-rw-r--r--net/bluetooth/hci_event.c13
-rw-r--r--net/bluetooth/hci_sync.c30
-rw-r--r--net/bluetooth/hidp/core.c6
-rw-r--r--net/bluetooth/iso.c35
-rw-r--r--net/bluetooth/l2cap_core.c10
-rw-r--r--net/bluetooth/mgmt.c72
-rw-r--r--net/core/skmsg.c4
-rw-r--r--net/dsa/tag_hellcreek.c2
-rw-r--r--net/ipv4/fib_frontend.c4
-rw-r--r--net/ipv4/ip_gre.c2
-rw-r--r--net/ipv4/ip_tunnel.c7
-rw-r--r--net/ipv4/tcp_input.c21
-rw-r--r--net/ipv4/tcp_ipv4.c6
-rw-r--r--net/kcm/kcmsock.c15
-rw-r--r--net/mac80211/ibss.c4
-rw-r--r--net/mac80211/scan.c11
-rw-r--r--net/mac80211/sta_info.c10
-rw-r--r--net/mac80211/tx.c3
-rw-r--r--net/mac802154/rx.c2
-rw-r--r--net/mpls/af_mpls.c4
-rw-r--r--net/openvswitch/datapath.c4
-rw-r--r--net/sched/sch_generic.c31
-rw-r--r--net/sched/sch_tbf.c4
-rw-r--r--net/smc/af_smc.c1
-rw-r--r--net/wireless/debugfs.c3
-rw-r--r--net/xdp/xsk_buff_pool.c16
26 files changed, 192 insertions, 128 deletions
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 485c814cf44a..6643c9c20fa4 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -4179,6 +4179,17 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
}
}
+ if (i == ARRAY_SIZE(hci_cc_table)) {
+ /* Unknown opcode, assume byte 0 contains the status, so
+ * that e.g. __hci_cmd_sync() properly returns errors
+ * for vendor specific commands send by HCI drivers.
+ * If a vendor doesn't actually follow this convention we may
+ * need to introduce a vendor CC table in order to properly set
+ * the status.
+ */
+ *status = skb->data[0];
+ }
+
handle_cmd_cnt_and_timer(hdev, ev->ncmd);
hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
@@ -5790,7 +5801,7 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
*/
hci_dev_clear_flag(hdev, HCI_LE_ADV);
- conn = hci_lookup_le_connect(hdev);
+ conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
if (!conn) {
/* In case of error status and there is no connection pending
* just unlock as there is nothing to cleanup.
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index e6d804b82b67..187786454d98 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -4773,9 +4773,11 @@ int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason)
/* Cleanup hci_conn object if it cannot be cancelled as it
* likelly means the controller and host stack are out of sync.
*/
- if (err)
+ if (err) {
+ hci_dev_lock(hdev);
hci_conn_failed(conn, err);
-
+ hci_dev_unlock(hdev);
+ }
return err;
case BT_CONNECT2:
return hci_reject_conn_sync(hdev, conn, reason);
@@ -5288,17 +5290,21 @@ int hci_suspend_sync(struct hci_dev *hdev)
/* Prevent disconnects from causing scanning to be re-enabled */
hci_pause_scan_sync(hdev);
- /* Soft disconnect everything (power off) */
- err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF);
- if (err) {
- /* Set state to BT_RUNNING so resume doesn't notify */
- hdev->suspend_state = BT_RUNNING;
- hci_resume_sync(hdev);
- return err;
- }
+ if (hci_conn_count(hdev)) {
+ /* Soft disconnect everything (power off) */
+ err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF);
+ if (err) {
+ /* Set state to BT_RUNNING so resume doesn't notify */
+ hdev->suspend_state = BT_RUNNING;
+ hci_resume_sync(hdev);
+ return err;
+ }
- /* Update event mask so only the allowed event can wakeup the host */
- hci_set_event_mask_sync(hdev);
+ /* Update event mask so only the allowed event can wakeup the
+ * host.
+ */
+ hci_set_event_mask_sync(hdev);
+ }
/* Only configure accept list if disconnect succeeded and wake
* isn't being prevented.
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 5940744a8cd8..cc20e706c639 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -83,14 +83,14 @@ static void hidp_copy_session(struct hidp_session *session, struct hidp_conninfo
ci->product = session->input->id.product;
ci->version = session->input->id.version;
if (session->input->name)
- strlcpy(ci->name, session->input->name, 128);
+ strscpy(ci->name, session->input->name, 128);
else
- strlcpy(ci->name, "HID Boot Device", 128);
+ strscpy(ci->name, "HID Boot Device", 128);
} else if (session->hid) {
ci->vendor = session->hid->vendor;
ci->product = session->hid->product;
ci->version = session->hid->version;
- strlcpy(ci->name, session->hid->name, 128);
+ strscpy(ci->name, session->hid->name, 128);
}
}
diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
index ced8ad4fed4f..613039ba5dbf 100644
--- a/net/bluetooth/iso.c
+++ b/net/bluetooth/iso.c
@@ -1309,7 +1309,7 @@ static int iso_sock_shutdown(struct socket *sock, int how)
struct sock *sk = sock->sk;
int err = 0;
- BT_DBG("sock %p, sk %p", sock, sk);
+ BT_DBG("sock %p, sk %p, how %d", sock, sk, how);
if (!sk)
return 0;
@@ -1317,17 +1317,32 @@ static int iso_sock_shutdown(struct socket *sock, int how)
sock_hold(sk);
lock_sock(sk);
- if (!sk->sk_shutdown) {
- sk->sk_shutdown = SHUTDOWN_MASK;
- iso_sock_clear_timer(sk);
- __iso_sock_close(sk);
-
- if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
- !(current->flags & PF_EXITING))
- err = bt_sock_wait_state(sk, BT_CLOSED,
- sk->sk_lingertime);
+ switch (how) {
+ case SHUT_RD:
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ goto unlock;
+ sk->sk_shutdown |= RCV_SHUTDOWN;
+ break;
+ case SHUT_WR:
+ if (sk->sk_shutdown & SEND_SHUTDOWN)
+ goto unlock;
+ sk->sk_shutdown |= SEND_SHUTDOWN;
+ break;
+ case SHUT_RDWR:
+ if (sk->sk_shutdown & SHUTDOWN_MASK)
+ goto unlock;
+ sk->sk_shutdown |= SHUTDOWN_MASK;
+ break;
}
+ iso_sock_clear_timer(sk);
+ __iso_sock_close(sk);
+
+ if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
+ !(current->flags & PF_EXITING))
+ err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
+
+unlock:
release_sock(sk);
sock_put(sk);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index cbe0cae73434..2c9de67daadc 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1992,11 +1992,11 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
src_match = !bacmp(&c->src, src);
dst_match = !bacmp(&c->dst, dst);
if (src_match && dst_match) {
- c = l2cap_chan_hold_unless_zero(c);
- if (c) {
- read_unlock(&chan_list_lock);
- return c;
- }
+ if (!l2cap_chan_hold_unless_zero(c))
+ continue;
+
+ read_unlock(&chan_list_lock);
+ return c;
}
/* Closest match */
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 6e31023b84f5..72e6595a71cc 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -4547,6 +4547,22 @@ static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
MGMT_STATUS_NOT_SUPPORTED);
}
+static u32 get_params_flags(struct hci_dev *hdev,
+ struct hci_conn_params *params)
+{
+ u32 flags = hdev->conn_flags;
+
+ /* Devices using RPAs can only be programmed in the acceptlist if
+ * LL Privacy has been enable otherwise they cannot mark
+ * HCI_CONN_FLAG_REMOTE_WAKEUP.
+ */
+ if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
+ hci_find_irk_by_addr(hdev, &params->addr, params->addr_type))
+ flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
+
+ return flags;
+}
+
static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len)
{
@@ -4578,10 +4594,10 @@ static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
} else {
params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
le_addr_type(cp->addr.type));
-
if (!params)
goto done;
+ supported_flags = get_params_flags(hdev, params);
current_flags = params->flags;
}
@@ -4649,38 +4665,35 @@ static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
&cp->addr.bdaddr, cp->addr.type);
}
- } else {
- params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
- le_addr_type(cp->addr.type));
- if (params) {
- /* Devices using RPAs can only be programmed in the
- * acceptlist LL Privacy has been enable otherwise they
- * cannot mark HCI_CONN_FLAG_REMOTE_WAKEUP.
- */
- if ((current_flags & HCI_CONN_FLAG_REMOTE_WAKEUP) &&
- !use_ll_privacy(hdev) &&
- hci_find_irk_by_addr(hdev, &params->addr,
- params->addr_type)) {
- bt_dev_warn(hdev,
- "Cannot set wakeable for RPA");
- goto unlock;
- }
- params->flags = current_flags;
- status = MGMT_STATUS_SUCCESS;
+ goto unlock;
+ }
- /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
- * has been set.
- */
- if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
- hci_update_passive_scan(hdev);
- } else {
- bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
- &cp->addr.bdaddr,
- le_addr_type(cp->addr.type));
- }
+ params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
+ le_addr_type(cp->addr.type));
+ if (!params) {
+ bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
+ &cp->addr.bdaddr, le_addr_type(cp->addr.type));
+ goto unlock;
}
+ supported_flags = get_params_flags(hdev, params);
+
+ if ((supported_flags | current_flags) != supported_flags) {
+ bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
+ current_flags, supported_flags);
+ goto unlock;
+ }
+
+ params->flags = current_flags;
+ status = MGMT_STATUS_SUCCESS;
+
+ /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
+ * has been set.
+ */
+ if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
+ hci_update_passive_scan(hdev);
+
unlock:
hci_dev_unlock(hdev);
@@ -5054,7 +5067,6 @@ static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
else
status = MGMT_STATUS_FAILED;
- mgmt_pending_remove(cmd);
goto unlock;
}
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index 59e75ffcc1f4..188f8558d27d 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -461,7 +461,7 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
if (copied == len)
break;
- } while (!sg_is_last(sge));
+ } while ((i != msg_rx->sg.end) && !sg_is_last(sge));
if (unlikely(peek)) {
msg_rx = sk_psock_next_msg(psock, msg_rx);
@@ -471,7 +471,7 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
}
msg_rx->sg.start = i;
- if (!sge->length && sg_is_last(sge)) {
+ if (!sge->length && (i == msg_rx->sg.end || sg_is_last(sge))) {
msg_rx = sk_psock_dequeue_msg(psock);
kfree_sk_msg(msg_rx);
}
diff --git a/net/dsa/tag_hellcreek.c b/net/dsa/tag_hellcreek.c
index eb204ad36eee..846588c0070a 100644
--- a/net/dsa/tag_hellcreek.c
+++ b/net/dsa/tag_hellcreek.c
@@ -45,7 +45,7 @@ static struct sk_buff *hellcreek_rcv(struct sk_buff *skb,
skb->dev = dsa_master_find_slave(dev, 0, port);
if (!skb->dev) {
- netdev_warn(dev, "Failed to get source port: %d\n", port);
+ netdev_warn_once(dev, "Failed to get source port: %d\n", port);
return NULL;
}
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index f361d3d56be2..943edf4ad4db 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -389,7 +389,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
dev_match = dev_match || (res.type == RTN_LOCAL &&
dev == net->loopback_dev);
if (dev_match) {
- ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_HOST;
+ ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_LINK;
return ret;
}
if (no_addr)
@@ -401,7 +401,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
ret = 0;
if (fib_lookup(net, &fl4, &res, FIB_LOOKUP_IGNORE_LINKSTATE) == 0) {
if (res.type == RTN_UNICAST)
- ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_HOST;
+ ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_LINK;
}
return ret;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 5c58e21f724e..f866d6282b2b 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -609,7 +609,7 @@ static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
ip_tunnel_init_flow(&fl4, IPPROTO_GRE, key->u.ipv4.dst, key->u.ipv4.src,
tunnel_id_to_key32(key->tun_id),
key->tos & ~INET_ECN_MASK, dev_net(dev), 0,
- skb->mark, skb_get_hash(skb));
+ skb->mark, skb_get_hash(skb), key->flow_flags);
rt = ip_route_output_key(dev_net(dev), &fl4);
if (IS_ERR(rt))
return PTR_ERR(rt);
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index e65e948cab9f..019f3b0839c5 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -295,7 +295,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
ip_tunnel_init_flow(&fl4, iph->protocol, iph->daddr,
iph->saddr, tunnel->parms.o_key,
RT_TOS(iph->tos), dev_net(dev),
- tunnel->parms.link, tunnel->fwmark, 0);
+ tunnel->parms.link, tunnel->fwmark, 0, 0);
rt = ip_route_output_key(tunnel->net, &fl4);
if (!IS_ERR(rt)) {
@@ -570,7 +570,8 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
}
ip_tunnel_init_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src,
tunnel_id_to_key32(key->tun_id), RT_TOS(tos),
- dev_net(dev), 0, skb->mark, skb_get_hash(skb));
+ dev_net(dev), 0, skb->mark, skb_get_hash(skb),
+ key->flow_flags);
if (tunnel->encap.type != TUNNEL_ENCAP_NONE)
goto tx_error;
@@ -729,7 +730,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr,
tunnel->parms.o_key, RT_TOS(tos),
dev_net(dev), tunnel->parms.link,
- tunnel->fwmark, skb_get_hash(skb));
+ tunnel->fwmark, skb_get_hash(skb), 0);
if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0)
goto tx_error;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index ab5f0ea166f1..b85a9f755da4 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3614,12 +3614,9 @@ bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
/* RFC 5961 7 [ACK Throttling] */
static void tcp_send_challenge_ack(struct sock *sk)
{
- /* unprotected vars, we dont care of overwrites */
- static u32 challenge_timestamp;
- static unsigned int challenge_count;
struct tcp_sock *tp = tcp_sk(sk);
struct net *net = sock_net(sk);
- u32 count, now;
+ u32 count, now, ack_limit;
/* First check our per-socket dupack rate limit. */
if (__tcp_oow_rate_limited(net,
@@ -3627,18 +3624,22 @@ static void tcp_send_challenge_ack(struct sock *sk)
&tp->last_oow_ack_time))
return;
+ ack_limit = READ_ONCE(net->ipv4.sysctl_tcp_challenge_ack_limit);
+ if (ack_limit == INT_MAX)
+ goto send_ack;
+
/* Then check host-wide RFC 5961 rate limit. */
now = jiffies / HZ;
- if (now != challenge_timestamp) {
- u32 ack_limit = READ_ONCE(net->ipv4.sysctl_tcp_challenge_ack_limit);
+ if (now != READ_ONCE(net->ipv4.tcp_challenge_timestamp)) {
u32 half = (ack_limit + 1) >> 1;
- challenge_timestamp = now;
- WRITE_ONCE(challenge_count, half + prandom_u32_max(ack_limit));
+ WRITE_ONCE(net->ipv4.tcp_challenge_timestamp, now);
+ WRITE_ONCE(net->ipv4.tcp_challenge_count, half + prandom_u32_max(ack_limit));
}
- count = READ_ONCE(challenge_count);
+ count = READ_ONCE(net->ipv4.tcp_challenge_count);
if (count > 0) {
- WRITE_ONCE(challenge_count, count - 1);
+ WRITE_ONCE(net->ipv4.tcp_challenge_count, count - 1);
+send_ack:
NET_INC_STATS(net, LINUX_MIB_TCPCHALLENGEACK);
tcp_send_ack(sk);
}
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index cc2ad67f75be..01b31f5c7aba 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -3158,8 +3158,10 @@ static int __net_init tcp_sk_init(struct net *net)
net->ipv4.sysctl_tcp_tso_win_divisor = 3;
/* Default TSQ limit of 16 TSO segments */
net->ipv4.sysctl_tcp_limit_output_bytes = 16 * 65536;
- /* rfc5961 challenge ack rate limiting */
- net->ipv4.sysctl_tcp_challenge_ack_limit = 1000;
+
+ /* rfc5961 challenge ack rate limiting, per net-ns, disabled by default. */
+ net->ipv4.sysctl_tcp_challenge_ack_limit = INT_MAX;
+
net->ipv4.sysctl_tcp_min_tso_segs = 2;
net->ipv4.sysctl_tcp_tso_rtt_log = 9; /* 2^9 = 512 usec */
net->ipv4.sysctl_tcp_min_rtt_wlen = 300;
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 71899e5a5a11..1215c863e1c4 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1412,12 +1412,6 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
psock->sk = csk;
psock->bpf_prog = prog;
- err = strp_init(&psock->strp, csk, &cb);
- if (err) {
- kmem_cache_free(kcm_psockp, psock);
- goto out;
- }
-
write_lock_bh(&csk->sk_callback_lock);
/* Check if sk_user_data is already by KCM or someone else.
@@ -1425,13 +1419,18 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
*/
if (csk->sk_user_data) {
write_unlock_bh(&csk->sk_callback_lock);
- strp_stop(&psock->strp);
- strp_done(&psock->strp);
kmem_cache_free(kcm_psockp, psock);
err = -EALREADY;
goto out;
}
+ err = strp_init(&psock->strp, csk, &cb);
+ if (err) {
+ write_unlock_bh(&csk->sk_callback_lock);
+ kmem_cache_free(kcm_psockp, psock);
+ goto out;
+ }
+
psock->save_data_ready = csk->sk_data_ready;
psock->save_write_space = csk->sk_write_space;
psock->save_state_change = csk->sk_state_change;
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index c14d740b0122..9dffc3079588 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -530,6 +530,10 @@ int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata)
sdata_assert_lock(sdata);
+ /* When not connected/joined, sending CSA doesn't make sense. */
+ if (ifibss->state != IEEE80211_IBSS_MLME_JOINED)
+ return -ENOLINK;
+
/* update cfg80211 bss information with the new channel */
if (!is_zero_ether_addr(ifibss->bssid)) {
cbss = cfg80211_get_bss(sdata->local->hw.wiphy,
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index fa8ddf576bc1..c4f2aeb31da3 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -469,16 +469,19 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
scan_req = rcu_dereference_protected(local->scan_req,
lockdep_is_held(&local->mtx));
- if (scan_req != local->int_scan_req) {
- local->scan_info.aborted = aborted;
- cfg80211_scan_done(scan_req, &local->scan_info);
- }
RCU_INIT_POINTER(local->scan_req, NULL);
RCU_INIT_POINTER(local->scan_sdata, NULL);
local->scanning = 0;
local->scan_chandef.chan = NULL;
+ synchronize_rcu();
+
+ if (scan_req != local->int_scan_req) {
+ local->scan_info.aborted = aborted;
+ cfg80211_scan_done(scan_req, &local->scan_info);
+ }
+
/* Set power back to normal operating levels. */
ieee80211_hw_config(local, 0);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 4944d929def6..fe8702d92892 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -495,7 +495,7 @@ __sta_info_alloc(struct ieee80211_sub_if_data *sdata,
sta->sdata = sdata;
if (sta_info_alloc_link(local, &sta->deflink, gfp))
- return NULL;
+ goto free;
if (link_id >= 0) {
sta_info_add_link(sta, link_id, &sta->deflink,
@@ -2317,9 +2317,9 @@ static inline u64 sta_get_tidstats_msdu(struct ieee80211_sta_rx_stats *rxstats,
u64 value;
do {
- start = u64_stats_fetch_begin(&rxstats->syncp);
+ start = u64_stats_fetch_begin_irq(&rxstats->syncp);
value = rxstats->msdu[tid];
- } while (u64_stats_fetch_retry(&rxstats->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start));
return value;
}
@@ -2385,9 +2385,9 @@ static inline u64 sta_get_stats_bytes(struct ieee80211_sta_rx_stats *rxstats)
u64 value;
do {
- start = u64_stats_fetch_begin(&rxstats->syncp);
+ start = u64_stats_fetch_begin_irq(&rxstats->syncp);
value = rxstats->bytes;
- } while (u64_stats_fetch_retry(&rxstats->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start));
return value;
}
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 51d564c8742b..1be8c9d83d6a 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -5924,6 +5924,7 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
rcu_read_lock();
err = ieee80211_lookup_ra_sta(sdata, skb, &sta);
if (err) {
+ dev_kfree_skb(skb);
rcu_read_unlock();
return err;
}
@@ -5938,7 +5939,7 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
* for MLO STA, the SA should be the AP MLD address, but
* the link ID has been selected already
*/
- if (sta->sta.mlo)
+ if (sta && sta->sta.mlo)
memcpy(ehdr->h_source, sdata->vif.addr, ETH_ALEN);
}
rcu_read_unlock();
diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c
index b8ce84618a55..c439125ef2b9 100644
--- a/net/mac802154/rx.c
+++ b/net/mac802154/rx.c
@@ -44,7 +44,7 @@ ieee802154_subif_frame(struct ieee802154_sub_if_data *sdata,
switch (mac_cb(skb)->dest.mode) {
case IEEE802154_ADDR_NONE:
- if (mac_cb(skb)->dest.mode != IEEE802154_ADDR_NONE)
+ if (hdr->source.mode != IEEE802154_ADDR_NONE)
/* FIXME: check if we are PAN coordinator */
skb->pkt_type = PACKET_OTHERHOST;
else
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 35b5f806fdda..b52afe316dc4 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -1079,9 +1079,9 @@ static void mpls_get_stats(struct mpls_dev *mdev,
p = per_cpu_ptr(mdev->stats, i);
do {
- start = u64_stats_fetch_begin(&p->syncp);
+ start = u64_stats_fetch_begin_irq(&p->syncp);
local = p->stats;
- } while (u64_stats_fetch_retry(&p->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&p->syncp, start));
stats->rx_packets += local.rx_packets;
stats->rx_bytes += local.rx_bytes;
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 8f49e2359a1b..c8a9075ddd0a 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -1822,7 +1822,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
ovs_dp_reset_user_features(skb, info);
}
- goto err_unlock_and_destroy_meters;
+ goto err_destroy_portids;
}
err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
@@ -1837,6 +1837,8 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
ovs_notify(&dp_datapath_genl_family, reply, info);
return 0;
+err_destroy_portids:
+ kfree(rcu_dereference_raw(dp->upcall_portids));
err_unlock_and_destroy_meters:
ovs_unlock();
ovs_meters_exit(dp);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 8d25b41d22ec..a9aadc4e6858 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -1121,6 +1121,21 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
}
EXPORT_SYMBOL(dev_graft_qdisc);
+static void shutdown_scheduler_queue(struct net_device *dev,
+ struct netdev_queue *dev_queue,
+ void *_qdisc_default)
+{
+ struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
+ struct Qdisc *qdisc_default = _qdisc_default;
+
+ if (qdisc) {
+ rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
+ dev_queue->qdisc_sleeping = qdisc_default;
+
+ qdisc_put(qdisc);
+ }
+}
+
static void attach_one_default_qdisc(struct net_device *dev,
struct netdev_queue *dev_queue,
void *_unused)
@@ -1168,6 +1183,7 @@ static void attach_default_qdiscs(struct net_device *dev)
if (qdisc == &noop_qdisc) {
netdev_warn(dev, "default qdisc (%s) fail, fallback to %s\n",
default_qdisc_ops->id, noqueue_qdisc_ops.id);
+ netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
dev->priv_flags |= IFF_NO_QUEUE;
netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
qdisc = txq->qdisc_sleeping;
@@ -1446,21 +1462,6 @@ void dev_init_scheduler(struct net_device *dev)
timer_setup(&dev->watchdog_timer, dev_watchdog, 0);
}
-static void shutdown_scheduler_queue(struct net_device *dev,
- struct netdev_queue *dev_queue,
- void *_qdisc_default)
-{
- struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
- struct Qdisc *qdisc_default = _qdisc_default;
-
- if (qdisc) {
- rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
- dev_queue->qdisc_sleeping = qdisc_default;
-
- qdisc_put(qdisc);
- }
-}
-
void dev_shutdown(struct net_device *dev)
{
netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index d0288e223542..e031c1a41ea6 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -354,6 +354,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
struct nlattr *tb[TCA_TBF_MAX + 1];
struct tc_tbf_qopt *qopt;
struct Qdisc *child = NULL;
+ struct Qdisc *old = NULL;
struct psched_ratecfg rate;
struct psched_ratecfg peak;
u64 max_size;
@@ -445,7 +446,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
sch_tree_lock(sch);
if (child) {
qdisc_tree_flush_backlog(q->qdisc);
- qdisc_put(q->qdisc);
+ old = q->qdisc;
q->qdisc = child;
}
q->limit = qopt->limit;
@@ -465,6 +466,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg));
sch_tree_unlock(sch);
+ qdisc_put(old);
err = 0;
tbf_offload_change(sch);
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 79c1318af1fe..0939cc3b915a 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -1855,7 +1855,6 @@ static void smc_listen_out_connected(struct smc_sock *new_smc)
{
struct sock *newsmcsk = &new_smc->sk;
- sk_refcnt_debug_inc(newsmcsk);
if (newsmcsk->sk_state == SMC_INIT)
newsmcsk->sk_state = SMC_ACTIVE;
diff --git a/net/wireless/debugfs.c b/net/wireless/debugfs.c
index aab43469a2f0..0878b162890a 100644
--- a/net/wireless/debugfs.c
+++ b/net/wireless/debugfs.c
@@ -65,9 +65,10 @@ static ssize_t ht40allow_map_read(struct file *file,
{
struct wiphy *wiphy = file->private_data;
char *buf;
- unsigned int offset = 0, buf_size = PAGE_SIZE, i, r;
+ unsigned int offset = 0, buf_size = PAGE_SIZE, i;
enum nl80211_band band;
struct ieee80211_supported_band *sband;
+ ssize_t r;
buf = kzalloc(buf_size, GFP_KERNEL);
if (!buf)
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index f70112176b7c..a71a8c6edf55 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -379,6 +379,16 @@ static void xp_check_dma_contiguity(struct xsk_dma_map *dma_map)
static int xp_init_dma_info(struct xsk_buff_pool *pool, struct xsk_dma_map *dma_map)
{
+ if (!pool->unaligned) {
+ u32 i;
+
+ for (i = 0; i < pool->heads_cnt; i++) {
+ struct xdp_buff_xsk *xskb = &pool->heads[i];
+
+ xp_init_xskb_dma(xskb, pool, dma_map->dma_pages, xskb->orig_addr);
+ }
+ }
+
pool->dma_pages = kvcalloc(dma_map->dma_pages_cnt, sizeof(*pool->dma_pages), GFP_KERNEL);
if (!pool->dma_pages)
return -ENOMEM;
@@ -428,12 +438,6 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
if (pool->unaligned)
xp_check_dma_contiguity(dma_map);
- else
- for (i = 0; i < pool->heads_cnt; i++) {
- struct xdp_buff_xsk *xskb = &pool->heads[i];
-
- xp_init_xskb_dma(xskb, pool, dma_map->dma_pages, xskb->orig_addr);
- }
err = xp_init_dma_info(pool, dma_map);
if (err) {