aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_dev.c10
-rw-r--r--net/8021q/vlan_netlink.c7
-rw-r--r--net/ax25/af_ax25.c3
-rw-r--r--net/ax25/ax25_ds_timer.c5
-rw-r--r--net/ax25/ax25_std_timer.c5
-rw-r--r--net/ax25/ax25_subr.c3
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c116
-rw-r--r--net/batman-adv/distributed-arp-table.c10
-rw-r--r--net/batman-adv/originator.c15
-rw-r--r--net/batman-adv/routing.c53
-rw-r--r--net/batman-adv/send.c4
-rw-r--r--net/batman-adv/soft-interface.c9
-rw-r--r--net/batman-adv/translation-table.c50
-rw-r--r--net/batman-adv/types.h8
-rw-r--r--net/bridge/br_input.c15
-rw-r--r--net/bridge/br_multicast.c4
-rw-r--r--net/bridge/br_netfilter_hooks.c2
-rw-r--r--net/bridge/br_netlink.c2
-rw-r--r--net/bridge/br_private.h23
-rw-r--r--net/ceph/osdmap.c156
-rw-r--r--net/core/filter.c26
-rw-r--r--net/core/flow_dissector.c43
-rw-r--r--net/core/neighbour.c6
-rw-r--r--net/core/skbuff.c18
-rw-r--r--net/core/sock.c11
-rw-r--r--net/dccp/ipv4.c12
-rw-r--r--net/dccp/ipv6.c2
-rw-r--r--net/decnet/dn_fib.c21
-rw-r--r--net/ipv4/esp4.c52
-rw-r--r--net/ipv4/fib_semantics.c6
-rw-r--r--net/ipv4/gre_demux.c10
-rw-r--r--net/ipv4/ip_gre.c26
-rw-r--r--net/ipv4/ip_output.c4
-rw-r--r--net/ipv4/ipconfig.c4
-rw-r--r--net/ipv4/ipmr.c4
-rw-r--r--net/ipv4/tcp_input.c54
-rw-r--r--net/ipv4/tcp_output.c7
-rw-r--r--net/ipv4/udp.c82
-rw-r--r--net/ipv6/icmp.c2
-rw-r--r--net/ipv6/ip6_checksum.c7
-rw-r--r--net/ipv6/ip6_fib.c1
-rw-r--r--net/ipv6/ip6_gre.c2
-rw-r--r--net/ipv6/ip6mr.c1
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/ipv6/sit.c4
-rw-r--r--net/ipv6/tcp_ipv6.c4
-rw-r--r--net/ipv6/udp.c73
-rw-r--r--net/irda/ircomm/ircomm_tty_ioctl.c8
-rw-r--r--net/kcm/kcmproc.c1
-rw-r--r--net/mac80211/mesh.c7
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c6
-rw-r--r--net/netfilter/nf_conntrack_core.c10
-rw-r--r--net/netfilter/nf_tables_api.c28
-rw-r--r--net/netfilter/nf_tables_core.c2
-rw-r--r--net/netfilter/nft_ct.c6
-rw-r--r--net/netfilter/nft_hash.c3
-rw-r--r--net/netfilter/nft_meta.c2
-rw-r--r--net/netfilter/nft_rbtree.c3
-rw-r--r--net/openvswitch/conntrack.c14
-rw-r--r--net/packet/af_packet.c12
-rw-r--r--net/rds/ib_cm.c2
-rw-r--r--net/rds/loop.c5
-rw-r--r--net/rds/sysctl.c3
-rw-r--r--net/rds/tcp.c5
-rw-r--r--net/rds/tcp.h2
-rw-r--r--net/rds/tcp_connect.c26
-rw-r--r--net/rds/tcp_listen.c2
-rw-r--r--net/rds/tcp_recv.c2
-rw-r--r--net/rds/tcp_send.c14
-rw-r--r--net/rds/transport.c3
-rw-r--r--net/rose/rose_in.c3
-rw-r--r--net/sched/act_api.c2
-rw-r--r--net/sched/act_ife.c53
-rw-r--r--net/sched/act_ipt.c7
-rw-r--r--net/sched/act_mirred.c2
-rw-r--r--net/sched/sch_fifo.c4
-rw-r--r--net/sched/sch_htb.c8
-rw-r--r--net/sched/sch_netem.c12
-rw-r--r--net/sched/sch_prio.c67
-rw-r--r--net/sctp/input.c5
-rw-r--r--net/sctp/sctp_diag.c6
-rw-r--r--net/sunrpc/clnt.c31
-rw-r--r--net/sunrpc/svc_xprt.c2
-rw-r--r--net/sunrpc/xprtsock.c1
-rw-r--r--net/tipc/bearer.c17
-rw-r--r--net/tipc/bearer.h1
-rw-r--r--net/tipc/link.c12
-rw-r--r--net/tipc/msg.c6
-rw-r--r--net/tipc/msg.h11
-rw-r--r--net/tipc/netlink_compat.c2
-rw-r--r--net/tipc/node.c15
-rw-r--r--net/tipc/socket.c54
-rw-r--r--net/unix/af_unix.c6
-rw-r--r--net/vmw_vsock/af_vsock.c12
-rw-r--r--net/wireless/nl80211.c8
-rw-r--r--net/wireless/util.c4
96 files changed, 946 insertions, 555 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 86ae75b77390..516b0e73263c 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -146,10 +146,12 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
{
- /* TODO: gotta make sure the underlying layer can handle it,
- * maybe an IFF_VLAN_CAPABLE flag for devices?
- */
- if (vlan_dev_priv(dev)->real_dev->mtu < new_mtu)
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+ unsigned int max_mtu = real_dev->mtu;
+
+ if (netif_reduces_vlan_mtu(real_dev))
+ max_mtu -= VLAN_HLEN;
+ if (max_mtu < new_mtu)
return -ERANGE;
dev->mtu = new_mtu;
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index c92b52f37d38..1270207f3d7c 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -118,6 +118,7 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev,
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct net_device *real_dev;
+ unsigned int max_mtu;
__be16 proto;
int err;
@@ -144,9 +145,11 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev,
if (err < 0)
return err;
+ max_mtu = netif_reduces_vlan_mtu(real_dev) ? real_dev->mtu - VLAN_HLEN :
+ real_dev->mtu;
if (!tb[IFLA_MTU])
- dev->mtu = real_dev->mtu;
- else if (dev->mtu > real_dev->mtu)
+ dev->mtu = max_mtu;
+ else if (dev->mtu > max_mtu)
return -EINVAL;
err = vlan_changelink(dev, tb, data);
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index fbd0acf80b13..2fdebabbfacd 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -976,7 +976,8 @@ static int ax25_release(struct socket *sock)
release_sock(sk);
ax25_disconnect(ax25, 0);
lock_sock(sk);
- ax25_destroy_socket(ax25);
+ if (!sock_flag(ax25->sk, SOCK_DESTROY))
+ ax25_destroy_socket(ax25);
break;
case AX25_STATE_3:
diff --git a/net/ax25/ax25_ds_timer.c b/net/ax25/ax25_ds_timer.c
index 951cd57bb07d..5237dff6941d 100644
--- a/net/ax25/ax25_ds_timer.c
+++ b/net/ax25/ax25_ds_timer.c
@@ -102,6 +102,7 @@ void ax25_ds_heartbeat_expiry(ax25_cb *ax25)
switch (ax25->state) {
case AX25_STATE_0:
+ case AX25_STATE_2:
/* Magic here: If we listen() and a new link dies before it
is accepted() it isn't 'dead' so doesn't get removed. */
if (!sk || sock_flag(sk, SOCK_DESTROY) ||
@@ -111,6 +112,7 @@ void ax25_ds_heartbeat_expiry(ax25_cb *ax25)
sock_hold(sk);
ax25_destroy_socket(ax25);
bh_unlock_sock(sk);
+ /* Ungrab socket and destroy it */
sock_put(sk);
} else
ax25_destroy_socket(ax25);
@@ -213,7 +215,8 @@ void ax25_ds_t1_timeout(ax25_cb *ax25)
case AX25_STATE_2:
if (ax25->n2count == ax25->n2) {
ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
- ax25_disconnect(ax25, ETIMEDOUT);
+ if (!sock_flag(ax25->sk, SOCK_DESTROY))
+ ax25_disconnect(ax25, ETIMEDOUT);
return;
} else {
ax25->n2count++;
diff --git a/net/ax25/ax25_std_timer.c b/net/ax25/ax25_std_timer.c
index 004467c9e6e1..2c0d6ef66f9d 100644
--- a/net/ax25/ax25_std_timer.c
+++ b/net/ax25/ax25_std_timer.c
@@ -38,6 +38,7 @@ void ax25_std_heartbeat_expiry(ax25_cb *ax25)
switch (ax25->state) {
case AX25_STATE_0:
+ case AX25_STATE_2:
/* Magic here: If we listen() and a new link dies before it
is accepted() it isn't 'dead' so doesn't get removed. */
if (!sk || sock_flag(sk, SOCK_DESTROY) ||
@@ -47,6 +48,7 @@ void ax25_std_heartbeat_expiry(ax25_cb *ax25)
sock_hold(sk);
ax25_destroy_socket(ax25);
bh_unlock_sock(sk);
+ /* Ungrab socket and destroy it */
sock_put(sk);
} else
ax25_destroy_socket(ax25);
@@ -144,7 +146,8 @@ void ax25_std_t1timer_expiry(ax25_cb *ax25)
case AX25_STATE_2:
if (ax25->n2count == ax25->n2) {
ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
- ax25_disconnect(ax25, ETIMEDOUT);
+ if (!sock_flag(ax25->sk, SOCK_DESTROY))
+ ax25_disconnect(ax25, ETIMEDOUT);
return;
} else {
ax25->n2count++;
diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c
index 3b78e8473a01..655a7d4c96e1 100644
--- a/net/ax25/ax25_subr.c
+++ b/net/ax25/ax25_subr.c
@@ -264,7 +264,8 @@ void ax25_disconnect(ax25_cb *ax25, int reason)
{
ax25_clear_queues(ax25);
- ax25_stop_heartbeat(ax25);
+ if (!sock_flag(ax25->sk, SOCK_DESTROY))
+ ax25_stop_heartbeat(ax25);
ax25_stop_t1timer(ax25);
ax25_stop_t2timer(ax25);
ax25_stop_t3timer(ax25);
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 748a9ead7ce5..825a5cdf4382 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -177,10 +177,21 @@ static void batadv_backbone_gw_put(struct batadv_bla_backbone_gw *backbone_gw)
static void batadv_claim_release(struct kref *ref)
{
struct batadv_bla_claim *claim;
+ struct batadv_bla_backbone_gw *old_backbone_gw;
claim = container_of(ref, struct batadv_bla_claim, refcount);
- batadv_backbone_gw_put(claim->backbone_gw);
+ spin_lock_bh(&claim->backbone_lock);
+ old_backbone_gw = claim->backbone_gw;
+ claim->backbone_gw = NULL;
+ spin_unlock_bh(&claim->backbone_lock);
+
+ spin_lock_bh(&old_backbone_gw->crc_lock);
+ old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
+ spin_unlock_bh(&old_backbone_gw->crc_lock);
+
+ batadv_backbone_gw_put(old_backbone_gw);
+
kfree_rcu(claim, rcu);
}
@@ -418,9 +429,12 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
break;
}
- if (vid & BATADV_VLAN_HAS_TAG)
+ if (vid & BATADV_VLAN_HAS_TAG) {
skb = vlan_insert_tag(skb, htons(ETH_P_8021Q),
vid & VLAN_VID_MASK);
+ if (!skb)
+ goto out;
+ }
skb_reset_mac_header(skb);
skb->protocol = eth_type_trans(skb, soft_iface);
@@ -674,8 +688,10 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
const u8 *mac, const unsigned short vid,
struct batadv_bla_backbone_gw *backbone_gw)
{
+ struct batadv_bla_backbone_gw *old_backbone_gw;
struct batadv_bla_claim *claim;
struct batadv_bla_claim search_claim;
+ bool remove_crc = false;
int hash_added;
ether_addr_copy(search_claim.addr, mac);
@@ -689,8 +705,10 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
return;
ether_addr_copy(claim->addr, mac);
+ spin_lock_init(&claim->backbone_lock);
claim->vid = vid;
claim->lasttime = jiffies;
+ kref_get(&backbone_gw->refcount);
claim->backbone_gw = backbone_gw;
kref_init(&claim->refcount);
@@ -718,15 +736,26 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
"bla_add_claim(): changing ownership for %pM, vid %d\n",
mac, BATADV_PRINT_VID(vid));
- spin_lock_bh(&claim->backbone_gw->crc_lock);
- claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
- spin_unlock_bh(&claim->backbone_gw->crc_lock);
- batadv_backbone_gw_put(claim->backbone_gw);
+ remove_crc = true;
}
- /* set (new) backbone gw */
+
+ /* replace backbone_gw atomically and adjust reference counters */
+ spin_lock_bh(&claim->backbone_lock);
+ old_backbone_gw = claim->backbone_gw;
kref_get(&backbone_gw->refcount);
claim->backbone_gw = backbone_gw;
+ spin_unlock_bh(&claim->backbone_lock);
+ if (remove_crc) {
+ /* remove claim address from old backbone_gw */
+ spin_lock_bh(&old_backbone_gw->crc_lock);
+ old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
+ spin_unlock_bh(&old_backbone_gw->crc_lock);
+ }
+
+ batadv_backbone_gw_put(old_backbone_gw);
+
+ /* add claim address to new backbone_gw */
spin_lock_bh(&backbone_gw->crc_lock);
backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
spin_unlock_bh(&backbone_gw->crc_lock);
@@ -737,6 +766,26 @@ claim_free_ref:
}
/**
+ * batadv_bla_claim_get_backbone_gw - Get valid reference for backbone_gw of
+ * claim
+ * @claim: claim whose backbone_gw should be returned
+ *
+ * Return: valid reference to claim::backbone_gw
+ */
+static struct batadv_bla_backbone_gw *
+batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim *claim)
+{
+ struct batadv_bla_backbone_gw *backbone_gw;
+
+ spin_lock_bh(&claim->backbone_lock);
+ backbone_gw = claim->backbone_gw;
+ kref_get(&backbone_gw->refcount);
+ spin_unlock_bh(&claim->backbone_lock);
+
+ return backbone_gw;
+}
+
+/**
* batadv_bla_del_claim - delete a claim from the claim hash
* @bat_priv: the bat priv with all the soft interface information
* @mac: mac address of the claim to be removed
@@ -760,10 +809,6 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
batadv_choose_claim, claim);
batadv_claim_put(claim); /* reference from the hash is gone */
- spin_lock_bh(&claim->backbone_gw->crc_lock);
- claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
- spin_unlock_bh(&claim->backbone_gw->crc_lock);
-
/* don't need the reference from hash_find() anymore */
batadv_claim_put(claim);
}
@@ -1216,6 +1261,7 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
struct batadv_hard_iface *primary_if,
int now)
{
+ struct batadv_bla_backbone_gw *backbone_gw;
struct batadv_bla_claim *claim;
struct hlist_head *head;
struct batadv_hashtable *hash;
@@ -1230,14 +1276,17 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
rcu_read_lock();
hlist_for_each_entry_rcu(claim, head, hash_entry) {
+ backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
if (now)
goto purge_now;
- if (!batadv_compare_eth(claim->backbone_gw->orig,
+
+ if (!batadv_compare_eth(backbone_gw->orig,
primary_if->net_dev->dev_addr))
- continue;
+ goto skip;
+
if (!batadv_has_timed_out(claim->lasttime,
BATADV_BLA_CLAIM_TIMEOUT))
- continue;
+ goto skip;
batadv_dbg(BATADV_DBG_BLA, bat_priv,
"bla_purge_claims(): %pM, vid %d, time out\n",
@@ -1245,8 +1294,10 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
purge_now:
batadv_handle_unclaim(bat_priv, primary_if,
- claim->backbone_gw->orig,
+ backbone_gw->orig,
claim->addr, claim->vid);
+skip:
+ batadv_backbone_gw_put(backbone_gw);
}
rcu_read_unlock();
}
@@ -1757,9 +1808,11 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
unsigned short vid, bool is_bcast)
{
+ struct batadv_bla_backbone_gw *backbone_gw;
struct ethhdr *ethhdr;
struct batadv_bla_claim search_claim, *claim = NULL;
struct batadv_hard_iface *primary_if;
+ bool own_claim;
bool ret;
ethhdr = eth_hdr(skb);
@@ -1794,8 +1847,12 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
}
/* if it is our own claim ... */
- if (batadv_compare_eth(claim->backbone_gw->orig,
- primary_if->net_dev->dev_addr)) {
+ backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
+ own_claim = batadv_compare_eth(backbone_gw->orig,
+ primary_if->net_dev->dev_addr);
+ batadv_backbone_gw_put(backbone_gw);
+
+ if (own_claim) {
/* ... allow it in any case */
claim->lasttime = jiffies;
goto allow;
@@ -1859,7 +1916,9 @@ bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
{
struct ethhdr *ethhdr;
struct batadv_bla_claim search_claim, *claim = NULL;
+ struct batadv_bla_backbone_gw *backbone_gw;
struct batadv_hard_iface *primary_if;
+ bool client_roamed;
bool ret = false;
primary_if = batadv_primary_if_get_selected(bat_priv);
@@ -1889,8 +1948,12 @@ bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
goto allow;
/* check if we are responsible. */
- if (batadv_compare_eth(claim->backbone_gw->orig,
- primary_if->net_dev->dev_addr)) {
+ backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
+ client_roamed = batadv_compare_eth(backbone_gw->orig,
+ primary_if->net_dev->dev_addr);
+ batadv_backbone_gw_put(backbone_gw);
+
+ if (client_roamed) {
/* if yes, the client has roamed and we have
* to unclaim it.
*/
@@ -1938,6 +2001,7 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
struct net_device *net_dev = (struct net_device *)seq->private;
struct batadv_priv *bat_priv = netdev_priv(net_dev);
struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
+ struct batadv_bla_backbone_gw *backbone_gw;
struct batadv_bla_claim *claim;
struct batadv_hard_iface *primary_if;
struct hlist_head *head;
@@ -1962,17 +2026,21 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
rcu_read_lock();
hlist_for_each_entry_rcu(claim, head, hash_entry) {
- is_own = batadv_compare_eth(claim->backbone_gw->orig,
+ backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
+
+ is_own = batadv_compare_eth(backbone_gw->orig,
primary_addr);
- spin_lock_bh(&claim->backbone_gw->crc_lock);
- backbone_crc = claim->backbone_gw->crc;
- spin_unlock_bh(&claim->backbone_gw->crc_lock);
+ spin_lock_bh(&backbone_gw->crc_lock);
+ backbone_crc = backbone_gw->crc;
+ spin_unlock_bh(&backbone_gw->crc_lock);
seq_printf(seq, " * %pM on %5d by %pM [%c] (%#.4x)\n",
claim->addr, BATADV_PRINT_VID(claim->vid),
- claim->backbone_gw->orig,
+ backbone_gw->orig,
(is_own ? 'x' : ' '),
backbone_crc);
+
+ batadv_backbone_gw_put(backbone_gw);
}
rcu_read_unlock();
}
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 278800a99c69..aee3b3991471 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -1009,9 +1009,12 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
if (!skb_new)
goto out;
- if (vid & BATADV_VLAN_HAS_TAG)
+ if (vid & BATADV_VLAN_HAS_TAG) {
skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q),
vid & VLAN_VID_MASK);
+ if (!skb_new)
+ goto out;
+ }
skb_reset_mac_header(skb_new);
skb_new->protocol = eth_type_trans(skb_new,
@@ -1089,9 +1092,12 @@ bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
*/
skb_reset_mac_header(skb_new);
- if (vid & BATADV_VLAN_HAS_TAG)
+ if (vid & BATADV_VLAN_HAS_TAG) {
skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q),
vid & VLAN_VID_MASK);
+ if (!skb_new)
+ goto out;
+ }
/* To preserve backwards compatibility, the node has choose the outgoing
* format based on the incoming request packet type. The assumption is
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 7f51bc2c06eb..ab8c4f9738fe 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -765,6 +765,8 @@ static void batadv_orig_node_release(struct kref *ref)
struct batadv_neigh_node *neigh_node;
struct batadv_orig_node *orig_node;
struct batadv_orig_ifinfo *orig_ifinfo;
+ struct batadv_orig_node_vlan *vlan;
+ struct batadv_orig_ifinfo *last_candidate;
orig_node = container_of(ref, struct batadv_orig_node, refcount);
@@ -782,8 +784,21 @@ static void batadv_orig_node_release(struct kref *ref)
hlist_del_rcu(&orig_ifinfo->list);
batadv_orig_ifinfo_put(orig_ifinfo);
}
+
+ last_candidate = orig_node->last_bonding_candidate;
+ orig_node->last_bonding_candidate = NULL;
spin_unlock_bh(&orig_node->neigh_list_lock);
+ if (last_candidate)
+ batadv_orig_ifinfo_put(last_candidate);
+
+ spin_lock_bh(&orig_node->vlan_list_lock);
+ hlist_for_each_entry_safe(vlan, node_tmp, &orig_node->vlan_list, list) {
+ hlist_del_rcu(&vlan->list);
+ batadv_orig_node_vlan_put(vlan);
+ }
+ spin_unlock_bh(&orig_node->vlan_list_lock);
+
/* Free nc_nodes */
batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index e3857ed4057f..bfac086b4d01 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -374,6 +374,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
if (skb_cow(skb, ETH_HLEN) < 0)
goto out;
+ ethhdr = eth_hdr(skb);
icmph = (struct batadv_icmp_header *)skb->data;
icmp_packet_rr = (struct batadv_icmp_packet_rr *)icmph;
if (icmp_packet_rr->rr_cur >= BATADV_RR_LEN)
@@ -455,6 +456,29 @@ static int batadv_check_unicast_packet(struct batadv_priv *bat_priv,
}
/**
+ * batadv_last_bonding_replace - Replace last_bonding_candidate of orig_node
+ * @orig_node: originator node whose bonding candidates should be replaced
+ * @new_candidate: new bonding candidate or NULL
+ */
+static void
+batadv_last_bonding_replace(struct batadv_orig_node *orig_node,
+ struct batadv_orig_ifinfo *new_candidate)
+{
+ struct batadv_orig_ifinfo *old_candidate;
+
+ spin_lock_bh(&orig_node->neigh_list_lock);
+ old_candidate = orig_node->last_bonding_candidate;
+
+ if (new_candidate)
+ kref_get(&new_candidate->refcount);
+ orig_node->last_bonding_candidate = new_candidate;
+ spin_unlock_bh(&orig_node->neigh_list_lock);
+
+ if (old_candidate)
+ batadv_orig_ifinfo_put(old_candidate);
+}
+
+/**
* batadv_find_router - find a suitable router for this originator
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: the destination node
@@ -561,10 +585,6 @@ next:
}
rcu_read_unlock();
- /* last_bonding_candidate is reset below, remove the old reference. */
- if (orig_node->last_bonding_candidate)
- batadv_orig_ifinfo_put(orig_node->last_bonding_candidate);
-
/* After finding candidates, handle the three cases:
* 1) there is a next candidate, use that
* 2) there is no next candidate, use the first of the list
@@ -573,21 +593,28 @@ next:
if (next_candidate) {
batadv_neigh_node_put(router);
- /* remove references to first candidate, we don't need it. */
- if (first_candidate) {
- batadv_neigh_node_put(first_candidate_router);
- batadv_orig_ifinfo_put(first_candidate);
- }
+ kref_get(&next_candidate_router->refcount);
router = next_candidate_router;
- orig_node->last_bonding_candidate = next_candidate;
+ batadv_last_bonding_replace(orig_node, next_candidate);
} else if (first_candidate) {
batadv_neigh_node_put(router);
- /* refcounting has already been done in the loop above. */
+ kref_get(&first_candidate_router->refcount);
router = first_candidate_router;
- orig_node->last_bonding_candidate = first_candidate;
+ batadv_last_bonding_replace(orig_node, first_candidate);
} else {
- orig_node->last_bonding_candidate = NULL;
+ batadv_last_bonding_replace(orig_node, NULL);
+ }
+
+ /* cleanup of candidates */
+ if (first_candidate) {
+ batadv_neigh_node_put(first_candidate_router);
+ batadv_orig_ifinfo_put(first_candidate);
+ }
+
+ if (next_candidate) {
+ batadv_neigh_node_put(next_candidate_router);
+ batadv_orig_ifinfo_put(next_candidate);
}
return router;
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index f2f125684ed9..010397650fa5 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -424,8 +424,8 @@ int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
struct batadv_orig_node *orig_node;
orig_node = batadv_gw_get_selected_orig(bat_priv);
- return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0,
- orig_node, vid);
+ return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST_4ADDR,
+ BATADV_P_DATA, orig_node, vid);
}
void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 343d2c904399..287a3879ed7e 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -1033,7 +1033,9 @@ void batadv_softif_destroy_sysfs(struct net_device *soft_iface)
static void batadv_softif_destroy_netlink(struct net_device *soft_iface,
struct list_head *head)
{
+ struct batadv_priv *bat_priv = netdev_priv(soft_iface);
struct batadv_hard_iface *hard_iface;
+ struct batadv_softif_vlan *vlan;
list_for_each_entry(hard_iface, &batadv_hardif_list, list) {
if (hard_iface->soft_iface == soft_iface)
@@ -1041,6 +1043,13 @@ static void batadv_softif_destroy_netlink(struct net_device *soft_iface,
BATADV_IF_CLEANUP_KEEP);
}
+ /* destroy the "untagged" VLAN */
+ vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS);
+ if (vlan) {
+ batadv_softif_destroy_vlan(bat_priv, vlan);
+ batadv_softif_vlan_put(vlan);
+ }
+
batadv_sysfs_del_meshif(soft_iface);
unregister_netdevice_queue(soft_iface, head);
}
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index feaf492b01ca..57ec87f37050 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -650,8 +650,10 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
/* increase the refcounter of the related vlan */
vlan = batadv_softif_vlan_get(bat_priv, vid);
- if (WARN(!vlan, "adding TT local entry %pM to non-existent VLAN %d",
- addr, BATADV_PRINT_VID(vid))) {
+ if (!vlan) {
+ net_ratelimited_function(batadv_info, soft_iface,
+ "adding TT local entry %pM to non-existent VLAN %d\n",
+ addr, BATADV_PRINT_VID(vid));
kfree(tt_local);
tt_local = NULL;
goto out;
@@ -691,7 +693,6 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
if (unlikely(hash_added != 0)) {
/* remove the reference for the hash */
batadv_tt_local_entry_put(tt_local);
- batadv_softif_vlan_put(vlan);
goto out;
}
@@ -2269,6 +2270,29 @@ static u32 batadv_tt_local_crc(struct batadv_priv *bat_priv,
return crc;
}
+/**
+ * batadv_tt_req_node_release - free tt_req node entry
+ * @ref: kref pointer of the tt req_node entry
+ */
+static void batadv_tt_req_node_release(struct kref *ref)
+{
+ struct batadv_tt_req_node *tt_req_node;
+
+ tt_req_node = container_of(ref, struct batadv_tt_req_node, refcount);
+
+ kfree(tt_req_node);
+}
+
+/**
+ * batadv_tt_req_node_put - decrement the tt_req_node refcounter and
+ * possibly release it
+ * @tt_req_node: tt_req_node to be free'd
+ */
+static void batadv_tt_req_node_put(struct batadv_tt_req_node *tt_req_node)
+{
+ kref_put(&tt_req_node->refcount, batadv_tt_req_node_release);
+}
+
static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
{
struct batadv_tt_req_node *node;
@@ -2278,7 +2302,7 @@ static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
hlist_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
hlist_del_init(&node->list);
- kfree(node);
+ batadv_tt_req_node_put(node);
}
spin_unlock_bh(&bat_priv->tt.req_list_lock);
@@ -2315,7 +2339,7 @@ static void batadv_tt_req_purge(struct batadv_priv *bat_priv)
if (batadv_has_timed_out(node->issued_at,
BATADV_TT_REQUEST_TIMEOUT)) {
hlist_del_init(&node->list);
- kfree(node);
+ batadv_tt_req_node_put(node);
}
}
spin_unlock_bh(&bat_priv->tt.req_list_lock);
@@ -2347,9 +2371,11 @@ batadv_tt_req_node_new(struct batadv_priv *bat_priv,
if (!tt_req_node)
goto unlock;
+ kref_init(&tt_req_node->refcount);
ether_addr_copy(tt_req_node->addr, orig_node->orig);
tt_req_node->issued_at = jiffies;
+ kref_get(&tt_req_node->refcount);
hlist_add_head(&tt_req_node->list, &bat_priv->tt.req_list);
unlock:
spin_unlock_bh(&bat_priv->tt.req_list_lock);
@@ -2613,13 +2639,19 @@ static bool batadv_send_tt_request(struct batadv_priv *bat_priv,
out:
if (primary_if)
batadv_hardif_put(primary_if);
+
if (ret && tt_req_node) {
spin_lock_bh(&bat_priv->tt.req_list_lock);
- /* hlist_del_init() verifies tt_req_node still is in the list */
- hlist_del_init(&tt_req_node->list);
+ if (!hlist_unhashed(&tt_req_node->list)) {
+ hlist_del_init(&tt_req_node->list);
+ batadv_tt_req_node_put(tt_req_node);
+ }
spin_unlock_bh(&bat_priv->tt.req_list_lock);
- kfree(tt_req_node);
}
+
+ if (tt_req_node)
+ batadv_tt_req_node_put(tt_req_node);
+
kfree(tvlv_tt_data);
return ret;
}
@@ -3055,7 +3087,7 @@ static void batadv_handle_tt_response(struct batadv_priv *bat_priv,
if (!batadv_compare_eth(node->addr, resp_src))
continue;
hlist_del_init(&node->list);
- kfree(node);
+ batadv_tt_req_node_put(node);
}
spin_unlock_bh(&bat_priv->tt.req_list_lock);
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 6a577f4f8ba7..74d865a4df46 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -330,7 +330,9 @@ struct batadv_orig_node {
DECLARE_BITMAP(bcast_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
u32 last_bcast_seqno;
struct hlist_head neigh_list;
- /* neigh_list_lock protects: neigh_list and router */
+ /* neigh_list_lock protects: neigh_list, ifinfo_list,
+ * last_bonding_candidate and router
+ */
spinlock_t neigh_list_lock;
struct hlist_node hash_entry;
struct batadv_priv *bat_priv;
@@ -1042,6 +1044,7 @@ struct batadv_bla_backbone_gw {
* @addr: mac address of claimed non-mesh client
* @vid: vlan id this client was detected on
* @backbone_gw: pointer to backbone gw claiming this client
+ * @backbone_lock: lock protecting backbone_gw pointer
* @lasttime: last time we heard of claim (locals only)
* @hash_entry: hlist node for batadv_priv_bla::claim_hash
* @refcount: number of contexts the object is used
@@ -1051,6 +1054,7 @@ struct batadv_bla_claim {
u8 addr[ETH_ALEN];
unsigned short vid;
struct batadv_bla_backbone_gw *backbone_gw;
+ spinlock_t backbone_lock; /* protects backbone_gw */
unsigned long lasttime;
struct hlist_node hash_entry;
struct rcu_head rcu;
@@ -1137,11 +1141,13 @@ struct batadv_tt_change_node {
* struct batadv_tt_req_node - data to keep track of the tt requests in flight
* @addr: mac address address of the originator this request was sent to
* @issued_at: timestamp used for purging stale tt requests
+ * @refcount: number of contexts the object is used by
* @list: list node for batadv_priv_tt::req_list
*/
struct batadv_tt_req_node {
u8 addr[ETH_ALEN];
unsigned long issued_at;
+ struct kref refcount;
struct hlist_node list;
};
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 160797722228..43d2cd862bc2 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -213,8 +213,7 @@ drop:
}
EXPORT_SYMBOL_GPL(br_handle_frame_finish);
-/* note: already called with rcu_read_lock */
-static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+static void __br_handle_local_finish(struct sk_buff *skb)
{
struct net_bridge_port *p = br_port_get_rcu(skb->dev);
u16 vid = 0;
@@ -222,6 +221,14 @@ static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_bu
/* check if vlan is allowed, to avoid spoofing */
if (p->flags & BR_LEARNING && br_should_learn(p, skb, &vid))
br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, false);
+}
+
+/* note: already called with rcu_read_lock */
+static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+ struct net_bridge_port *p = br_port_get_rcu(skb->dev);
+
+ __br_handle_local_finish(skb);
BR_INPUT_SKB_CB(skb)->brdev = p->br->dev;
br_pass_frame_up(skb);
@@ -274,7 +281,9 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
if (p->br->stp_enabled == BR_NO_STP ||
fwd_mask & (1u << dest[5]))
goto forward;
- break;
+ *pskb = skb;
+ __br_handle_local_finish(skb);
+ return RX_HANDLER_PASS;
case 0x01: /* IEEE MAC (Pause) */
goto drop;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 6852f3c7009c..43844144c9c4 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -464,8 +464,11 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
&ip6h->saddr)) {
kfree_skb(skb);
+ br->has_ipv6_addr = 0;
return NULL;
}
+
+ br->has_ipv6_addr = 1;
ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
hopopt = (u8 *)(ip6h + 1);
@@ -1745,6 +1748,7 @@ void br_multicast_init(struct net_bridge *br)
br->ip6_other_query.delay_time = 0;
br->ip6_querier.port = NULL;
#endif
+ br->has_ipv6_addr = 1;
spin_lock_init(&br->multicast_lock);
setup_timer(&br->multicast_router_timer,
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 2d25979273a6..77e7f69bf80d 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -700,7 +700,7 @@ static int
br_nf_ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
int (*output)(struct net *, struct sock *, struct sk_buff *))
{
- unsigned int mtu = ip_skb_dst_mtu(skb);
+ unsigned int mtu = ip_skb_dst_mtu(sk, skb);
struct iphdr *iph = ip_hdr(skb);
if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) ||
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index a5343c7232bf..85e89f693589 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -1273,7 +1273,7 @@ static int br_fill_linkxstats(struct sk_buff *skb, const struct net_device *dev,
struct bridge_vlan_xstats vxi;
struct br_vlan_stats stats;
- if (vl_idx++ < *prividx)
+ if (++vl_idx < *prividx)
continue;
memset(&vxi, 0, sizeof(vxi));
vxi.vid = v->vid;
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index c7fb5d7a7218..52edecf3c294 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -314,6 +314,7 @@ struct net_bridge
u8 multicast_disabled:1;
u8 multicast_querier:1;
u8 multicast_query_use_ifaddr:1;
+ u8 has_ipv6_addr:1;
u32 hash_elasticity;
u32 hash_max;
@@ -588,10 +589,22 @@ static inline bool br_multicast_is_router(struct net_bridge *br)
static inline bool
__br_multicast_querier_exists(struct net_bridge *br,
- struct bridge_mcast_other_query *querier)
+ struct bridge_mcast_other_query *querier,
+ const bool is_ipv6)
{
+ bool own_querier_enabled;
+
+ if (br->multicast_querier) {
+ if (is_ipv6 && !br->has_ipv6_addr)
+ own_querier_enabled = false;
+ else
+ own_querier_enabled = true;
+ } else {
+ own_querier_enabled = false;
+ }
+
return time_is_before_jiffies(querier->delay_time) &&
- (br->multicast_querier || timer_pending(&querier->timer));
+ (own_querier_enabled || timer_pending(&querier->timer));
}
static inline bool br_multicast_querier_exists(struct net_bridge *br,
@@ -599,10 +612,12 @@ static inline bool br_multicast_querier_exists(struct net_bridge *br,
{
switch (eth->h_proto) {
case (htons(ETH_P_IP)):
- return __br_multicast_querier_exists(br, &br->ip4_other_query);
+ return __br_multicast_querier_exists(br,
+ &br->ip4_other_query, false);
#if IS_ENABLED(CONFIG_IPV6)
case (htons(ETH_P_IPV6)):
- return __br_multicast_querier_exists(br, &br->ip6_other_query);
+ return __br_multicast_querier_exists(br,
+ &br->ip6_other_query, true);
#endif
default:
return false;
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 03062bb763b3..7e480bf75bcf 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -1261,6 +1261,115 @@ struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end)
}
/*
+ * Encoding order is (new_up_client, new_state, new_weight). Need to
+ * apply in the (new_weight, new_state, new_up_client) order, because
+ * an incremental map may look like e.g.
+ *
+ * new_up_client: { osd=6, addr=... } # set osd_state and addr
+ * new_state: { osd=6, xorstate=EXISTS } # clear osd_state
+ */
+static int decode_new_up_state_weight(void **p, void *end,
+ struct ceph_osdmap *map)
+{
+ void *new_up_client;
+ void *new_state;
+ void *new_weight_end;
+ u32 len;
+
+ new_up_client = *p;
+ ceph_decode_32_safe(p, end, len, e_inval);
+ len *= sizeof(u32) + sizeof(struct ceph_entity_addr);
+ ceph_decode_need(p, end, len, e_inval);
+ *p += len;
+
+ new_state = *p;
+ ceph_decode_32_safe(p, end, len, e_inval);
+ len *= sizeof(u32) + sizeof(u8);
+ ceph_decode_need(p, end, len, e_inval);
+ *p += len;
+
+ /* new_weight */
+ ceph_decode_32_safe(p, end, len, e_inval);
+ while (len--) {
+ s32 osd;
+ u32 w;
+
+ ceph_decode_need(p, end, 2*sizeof(u32), e_inval);
+ osd = ceph_decode_32(p);
+ w = ceph_decode_32(p);
+ BUG_ON(osd >= map->max_osd);
+ pr_info("osd%d weight 0x%x %s\n", osd, w,
+ w == CEPH_OSD_IN ? "(in)" :
+ (w == CEPH_OSD_OUT ? "(out)" : ""));
+ map->osd_weight[osd] = w;
+
+ /*
+ * If we are marking in, set the EXISTS, and clear the
+ * AUTOOUT and NEW bits.
+ */
+ if (w) {
+ map->osd_state[osd] |= CEPH_OSD_EXISTS;
+ map->osd_state[osd] &= ~(CEPH_OSD_AUTOOUT |
+ CEPH_OSD_NEW);
+ }
+ }
+ new_weight_end = *p;
+
+ /* new_state (up/down) */
+ *p = new_state;
+ len = ceph_decode_32(p);
+ while (len--) {
+ s32 osd;
+ u8 xorstate;
+ int ret;
+
+ osd = ceph_decode_32(p);
+ xorstate = ceph_decode_8(p);
+ if (xorstate == 0)
+ xorstate = CEPH_OSD_UP;
+ BUG_ON(osd >= map->max_osd);
+ if ((map->osd_state[osd] & CEPH_OSD_UP) &&
+ (xorstate & CEPH_OSD_UP))
+ pr_info("osd%d down\n", osd);
+ if ((map->osd_state[osd] & CEPH_OSD_EXISTS) &&
+ (xorstate & CEPH_OSD_EXISTS)) {
+ pr_info("osd%d does not exist\n", osd);
+ map->osd_weight[osd] = CEPH_OSD_IN;
+ ret = set_primary_affinity(map, osd,
+ CEPH_OSD_DEFAULT_PRIMARY_AFFINITY);
+ if (ret)
+ return ret;
+ memset(map->osd_addr + osd, 0, sizeof(*map->osd_addr));
+ map->osd_state[osd] = 0;
+ } else {
+ map->osd_state[osd] ^= xorstate;
+ }
+ }
+
+ /* new_up_client */
+ *p = new_up_client;
+ len = ceph_decode_32(p);
+ while (len--) {
+ s32 osd;
+ struct ceph_entity_addr addr;
+
+ osd = ceph_decode_32(p);
+ ceph_decode_copy(p, &addr, sizeof(addr));
+ ceph_decode_addr(&addr);
+ BUG_ON(osd >= map->max_osd);
+ pr_info("osd%d up\n", osd);
+ map->osd_state[osd] |= CEPH_OSD_EXISTS | CEPH_OSD_UP;
+ map->osd_addr[osd] = addr;
+ }
+
+ *p = new_weight_end;
+ return 0;
+
+e_inval:
+ return -EINVAL;
+}
+
+/*
* decode and apply an incremental map update.
*/
struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
@@ -1358,49 +1467,10 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
__remove_pg_pool(&map->pg_pools, pi);
}
- /* new_up */
- ceph_decode_32_safe(p, end, len, e_inval);
- while (len--) {
- u32 osd;
- struct ceph_entity_addr addr;
- ceph_decode_32_safe(p, end, osd, e_inval);
- ceph_decode_copy_safe(p, end, &addr, sizeof(addr), e_inval);
- ceph_decode_addr(&addr);
- pr_info("osd%d up\n", osd);
- BUG_ON(osd >= map->max_osd);
- map->osd_state[osd] |= CEPH_OSD_UP | CEPH_OSD_EXISTS;
- map->osd_addr[osd] = addr;
- }
-
- /* new_state */
- ceph_decode_32_safe(p, end, len, e_inval);
- while (len--) {
- u32 osd;
- u8 xorstate;
- ceph_decode_32_safe(p, end, osd, e_inval);
- xorstate = **(u8 **)p;
- (*p)++; /* clean flag */
- if (xorstate == 0)
- xorstate = CEPH_OSD_UP;
- if (xorstate & CEPH_OSD_UP)
- pr_info("osd%d down\n", osd);
- if (osd < map->max_osd)
- map->osd_state[osd] ^= xorstate;
- }
-
- /* new_weight */
- ceph_decode_32_safe(p, end, len, e_inval);
- while (len--) {
- u32 osd, off;
- ceph_decode_need(p, end, sizeof(u32)*2, e_inval);
- osd = ceph_decode_32(p);
- off = ceph_decode_32(p);
- pr_info("osd%d weight 0x%x %s\n", osd, off,
- off == CEPH_OSD_IN ? "(in)" :
- (off == CEPH_OSD_OUT ? "(out)" : ""));
- if (osd < map->max_osd)
- map->osd_weight[osd] = off;
- }
+ /* new_up_client, new_state, new_weight */
+ err = decode_new_up_state_weight(p, end, map);
+ if (err)
+ goto bad;
/* new_pg_temp */
err = decode_new_pg_temp(p, end, map);
diff --git a/net/core/filter.c b/net/core/filter.c
index 68adb5f52110..e759d90e8cef 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -53,9 +53,10 @@
#include <net/sock_reuseport.h>
/**
- * sk_filter - run a packet through a socket filter
+ * sk_filter_trim_cap - run a packet through a socket filter
* @sk: sock associated with &sk_buff
* @skb: buffer to filter
+ * @cap: limit on how short the eBPF program may trim the packet
*
* Run the eBPF program and then cut skb->data to correct size returned by
* the program. If pkt_len is 0 we toss packet. If skb->len is smaller
@@ -64,7 +65,7 @@
* be accepted or -EPERM if the packet should be tossed.
*
*/
-int sk_filter(struct sock *sk, struct sk_buff *skb)
+int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
{
int err;
struct sk_filter *filter;
@@ -85,14 +86,13 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
filter = rcu_dereference(sk->sk_filter);
if (filter) {
unsigned int pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
-
- err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
+ err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
}
rcu_read_unlock();
return err;
}
-EXPORT_SYMBOL(sk_filter);
+EXPORT_SYMBOL(sk_filter_trim_cap);
static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
{
@@ -2085,7 +2085,8 @@ static bool __is_valid_access(int off, int size, enum bpf_access_type type)
}
static bool sk_filter_is_valid_access(int off, int size,
- enum bpf_access_type type)
+ enum bpf_access_type type,
+ enum bpf_reg_type *reg_type)
{
switch (off) {
case offsetof(struct __sk_buff, tc_classid):
@@ -2108,7 +2109,8 @@ static bool sk_filter_is_valid_access(int off, int size,
}
static bool tc_cls_act_is_valid_access(int off, int size,
- enum bpf_access_type type)
+ enum bpf_access_type type,
+ enum bpf_reg_type *reg_type)
{
if (type == BPF_WRITE) {
switch (off) {
@@ -2123,6 +2125,16 @@ static bool tc_cls_act_is_valid_access(int off, int size,
return false;
}
}
+
+ switch (off) {
+ case offsetof(struct __sk_buff, data):
+ *reg_type = PTR_TO_PACKET;
+ break;
+ case offsetof(struct __sk_buff, data_end):
+ *reg_type = PTR_TO_PACKET_END;
+ break;
+ }
+
return __is_valid_access(off, size, type);
}
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index a669dea146c6..61ad43f61c5e 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -651,6 +651,23 @@ void make_flow_keys_digest(struct flow_keys_digest *digest,
}
EXPORT_SYMBOL(make_flow_keys_digest);
+static struct flow_dissector flow_keys_dissector_symmetric __read_mostly;
+
+u32 __skb_get_hash_symmetric(struct sk_buff *skb)
+{
+ struct flow_keys keys;
+
+ __flow_hash_secret_init();
+
+ memset(&keys, 0, sizeof(keys));
+ __skb_flow_dissect(skb, &flow_keys_dissector_symmetric, &keys,
+ NULL, 0, 0, 0,
+ FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
+
+ return __flow_hash_from_keys(&keys, hashrnd);
+}
+EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
+
/**
* __skb_get_hash: calculate a flow hash
* @skb: sk_buff to calculate flow hash from
@@ -868,6 +885,29 @@ static const struct flow_dissector_key flow_keys_dissector_keys[] = {
},
};
+static const struct flow_dissector_key flow_keys_dissector_symmetric_keys[] = {
+ {
+ .key_id = FLOW_DISSECTOR_KEY_CONTROL,
+ .offset = offsetof(struct flow_keys, control),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_BASIC,
+ .offset = offsetof(struct flow_keys, basic),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+ .offset = offsetof(struct flow_keys, addrs.v4addrs),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+ .offset = offsetof(struct flow_keys, addrs.v6addrs),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_PORTS,
+ .offset = offsetof(struct flow_keys, ports),
+ },
+};
+
static const struct flow_dissector_key flow_keys_buf_dissector_keys[] = {
{
.key_id = FLOW_DISSECTOR_KEY_CONTROL,
@@ -889,6 +929,9 @@ static int __init init_default_flow_dissectors(void)
skb_flow_dissector_init(&flow_keys_dissector,
flow_keys_dissector_keys,
ARRAY_SIZE(flow_keys_dissector_keys));
+ skb_flow_dissector_init(&flow_keys_dissector_symmetric,
+ flow_keys_dissector_symmetric_keys,
+ ARRAY_SIZE(flow_keys_dissector_symmetric_keys));
skb_flow_dissector_init(&flow_keys_buf_dissector,
flow_keys_buf_dissector_keys,
ARRAY_SIZE(flow_keys_buf_dissector_keys));
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 29dd8cc22bbf..510cd62fcb99 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2469,13 +2469,17 @@ int neigh_xmit(int index, struct net_device *dev,
tbl = neigh_tables[index];
if (!tbl)
goto out;
+ rcu_read_lock_bh();
neigh = __neigh_lookup_noref(tbl, addr, dev);
if (!neigh)
neigh = __neigh_create(tbl, addr, dev, false);
err = PTR_ERR(neigh);
- if (IS_ERR(neigh))
+ if (IS_ERR(neigh)) {
+ rcu_read_unlock_bh();
goto out_kfree_skb;
+ }
err = neigh->output(neigh, skb);
+ rcu_read_unlock_bh();
}
else if (index == NEIGH_LINK_TABLE) {
err = dev_hard_header(skb, dev, ntohs(skb->protocol),
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index f2b77e549c03..eb12d2161fb2 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3016,24 +3016,6 @@ int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
EXPORT_SYMBOL_GPL(skb_append_pagefrags);
/**
- * skb_push_rcsum - push skb and update receive checksum
- * @skb: buffer to update
- * @len: length of data pulled
- *
- * This function performs an skb_push on the packet and updates
- * the CHECKSUM_COMPLETE checksum. It should be used on
- * receive path processing instead of skb_push unless you know
- * that the checksum difference is zero (e.g., a valid IP header)
- * or you are setting ip_summed to CHECKSUM_NONE.
- */
-static unsigned char *skb_push_rcsum(struct sk_buff *skb, unsigned len)
-{
- skb_push(skb, len);
- skb_postpush_rcsum(skb, skb->data, len);
- return skb->data;
-}
-
-/**
* skb_pull_rcsum - pull skb and update receive checksum
* @skb: buffer to update
* @len: length of data pulled
diff --git a/net/core/sock.c b/net/core/sock.c
index 08bf97eceeb3..25dab8b60223 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -452,11 +452,12 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
}
EXPORT_SYMBOL(sock_queue_rcv_skb);
-int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
+int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
+ const int nested, unsigned int trim_cap)
{
int rc = NET_RX_SUCCESS;
- if (sk_filter(sk, skb))
+ if (sk_filter_trim_cap(sk, skb, trim_cap))
goto discard_and_relse;
skb->dev = NULL;
@@ -492,7 +493,7 @@ discard_and_relse:
kfree_skb(skb);
goto out;
}
-EXPORT_SYMBOL(sk_receive_skb);
+EXPORT_SYMBOL(__sk_receive_skb);
struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
{
@@ -1938,6 +1939,10 @@ int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK;
sockc->tsflags |= tsflags;
break;
+ /* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */
+ case SCM_RIGHTS:
+ case SCM_CREDENTIALS:
+ break;
default:
return -EINVAL;
}
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 5c7e413a3ae4..345a3aeb8c7e 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -462,7 +462,7 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
rt = ip_route_output_flow(net, &fl4, sk);
if (IS_ERR(rt)) {
- __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
+ IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
return NULL;
}
@@ -527,17 +527,19 @@ static void dccp_v4_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
rxiph->daddr);
skb_dst_set(skb, dst_clone(dst));
+ local_bh_disable();
bh_lock_sock(ctl_sk);
err = ip_build_and_send_pkt(skb, ctl_sk,
rxiph->daddr, rxiph->saddr, NULL);
bh_unlock_sock(ctl_sk);
if (net_xmit_eval(err) == 0) {
- DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
- DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
+ __DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
+ __DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
}
+ local_bh_enable();
out:
- dst_release(dst);
+ dst_release(dst);
}
static void dccp_v4_reqsk_destructor(struct request_sock *req)
@@ -866,7 +868,7 @@ lookup:
goto discard_and_relse;
nf_reset(skb);
- return sk_receive_skb(sk, skb, 1);
+ return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4);
no_dccp_socket:
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index d176f4e66369..3ff137d9471d 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -732,7 +732,7 @@ lookup:
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_and_relse;
- return sk_receive_skb(sk, skb, 1) ? -1 : 0;
+ return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4) ? -1 : 0;
no_dccp_socket:
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index df4803437888..a796fc7cbc35 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -41,6 +41,7 @@
#include <net/dn_fib.h>
#include <net/dn_neigh.h>
#include <net/dn_dev.h>
+#include <net/nexthop.h>
#define RT_MIN_TABLE 1
@@ -150,14 +151,13 @@ static int dn_fib_count_nhs(const struct nlattr *attr)
struct rtnexthop *nhp = nla_data(attr);
int nhs = 0, nhlen = nla_len(attr);
- while(nhlen >= (int)sizeof(struct rtnexthop)) {
- if ((nhlen -= nhp->rtnh_len) < 0)
- return 0;
+ while (rtnh_ok(nhp, nhlen)) {
nhs++;
- nhp = RTNH_NEXT(nhp);
+ nhp = rtnh_next(nhp, &nhlen);
}
- return nhs;
+ /* leftover implies invalid nexthop configuration, discard it */
+ return nhlen > 0 ? 0 : nhs;
}
static int dn_fib_get_nhs(struct dn_fib_info *fi, const struct nlattr *attr,
@@ -167,21 +167,24 @@ static int dn_fib_get_nhs(struct dn_fib_info *fi, const struct nlattr *attr,
int nhlen = nla_len(attr);
change_nexthops(fi) {
- int attrlen = nhlen - sizeof(struct rtnexthop);
- if (attrlen < 0 || (nhlen -= nhp->rtnh_len) < 0)
+ int attrlen;
+
+ if (!rtnh_ok(nhp, nhlen))
return -EINVAL;
nh->nh_flags = (r->rtm_flags&~0xFF) | nhp->rtnh_flags;
nh->nh_oif = nhp->rtnh_ifindex;
nh->nh_weight = nhp->rtnh_hops + 1;
- if (attrlen) {
+ attrlen = rtnh_attrlen(nhp);
+ if (attrlen > 0) {
struct nlattr *gw_attr;
gw_attr = nla_find((struct nlattr *) (nhp + 1), attrlen, RTA_GATEWAY);
nh->nh_gw = gw_attr ? nla_get_le16(gw_attr) : 0;
}
- nhp = RTNH_NEXT(nhp);
+
+ nhp = rtnh_next(nhp, &nhlen);
} endfor_nexthops(fi);
return 0;
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 477937465a20..d95631d09248 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -23,6 +23,11 @@ struct esp_skb_cb {
void *tmp;
};
+struct esp_output_extra {
+ __be32 seqhi;
+ u32 esphoff;
+};
+
#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
static u32 esp4_get_mtu(struct xfrm_state *x, int mtu);
@@ -35,11 +40,11 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu);
*
* TODO: Use spare space in skb for this where possible.
*/
-static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqhilen)
+static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int extralen)
{
unsigned int len;
- len = seqhilen;
+ len = extralen;
len += crypto_aead_ivsize(aead);
@@ -57,15 +62,16 @@ static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqhilen)
return kmalloc(len, GFP_ATOMIC);
}
-static inline __be32 *esp_tmp_seqhi(void *tmp)
+static inline void *esp_tmp_extra(void *tmp)
{
- return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32));
+ return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra));
}
-static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
+
+static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int extralen)
{
return crypto_aead_ivsize(aead) ?
- PTR_ALIGN((u8 *)tmp + seqhilen,
- crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
+ PTR_ALIGN((u8 *)tmp + extralen,
+ crypto_aead_alignmask(aead) + 1) : tmp + extralen;
}
static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
@@ -99,7 +105,7 @@ static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
{
struct ip_esp_hdr *esph = (void *)(skb->data + offset);
void *tmp = ESP_SKB_CB(skb)->tmp;
- __be32 *seqhi = esp_tmp_seqhi(tmp);
+ __be32 *seqhi = esp_tmp_extra(tmp);
esph->seq_no = esph->spi;
esph->spi = *seqhi;
@@ -107,7 +113,11 @@ static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
static void esp_output_restore_header(struct sk_buff *skb)
{
- esp_restore_header(skb, skb_transport_offset(skb) - sizeof(__be32));
+ void *tmp = ESP_SKB_CB(skb)->tmp;
+ struct esp_output_extra *extra = esp_tmp_extra(tmp);
+
+ esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff -
+ sizeof(__be32));
}
static void esp_output_done_esn(struct crypto_async_request *base, int err)
@@ -121,6 +131,7 @@ static void esp_output_done_esn(struct crypto_async_request *base, int err)
static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
{
int err;
+ struct esp_output_extra *extra;
struct ip_esp_hdr *esph;
struct crypto_aead *aead;
struct aead_request *req;
@@ -137,8 +148,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
int tfclen;
int nfrags;
int assoclen;
- int seqhilen;
- __be32 *seqhi;
+ int extralen;
__be64 seqno;
/* skb is pure payload to encrypt */
@@ -166,21 +176,21 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
nfrags = err;
assoclen = sizeof(*esph);
- seqhilen = 0;
+ extralen = 0;
if (x->props.flags & XFRM_STATE_ESN) {
- seqhilen += sizeof(__be32);
- assoclen += seqhilen;
+ extralen += sizeof(*extra);
+ assoclen += sizeof(__be32);
}
- tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
+ tmp = esp_alloc_tmp(aead, nfrags, extralen);
if (!tmp) {
err = -ENOMEM;
goto error;
}
- seqhi = esp_tmp_seqhi(tmp);
- iv = esp_tmp_iv(aead, tmp, seqhilen);
+ extra = esp_tmp_extra(tmp);
+ iv = esp_tmp_iv(aead, tmp, extralen);
req = esp_tmp_req(aead, iv);
sg = esp_req_sg(aead, req);
@@ -247,8 +257,10 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
* encryption.
*/
if ((x->props.flags & XFRM_STATE_ESN)) {
- esph = (void *)(skb_transport_header(skb) - sizeof(__be32));
- *seqhi = esph->spi;
+ extra->esphoff = (unsigned char *)esph -
+ skb_transport_header(skb);
+ esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4);
+ extra->seqhi = esph->spi;
esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
aead_request_set_callback(req, 0, esp_output_done_esn, skb);
}
@@ -445,7 +457,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
goto out;
ESP_SKB_CB(skb)->tmp = tmp;
- seqhi = esp_tmp_seqhi(tmp);
+ seqhi = esp_tmp_extra(tmp);
iv = esp_tmp_iv(aead, tmp, seqhilen);
req = esp_tmp_req(aead, iv);
sg = esp_req_sg(aead, req);
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index d09173bf9500..539fa264e67d 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -479,6 +479,9 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
if (!rtnh_ok(rtnh, remaining))
return -EINVAL;
+ if (rtnh->rtnh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
+ return -EINVAL;
+
nexthop_nh->nh_flags =
(cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags;
nexthop_nh->nh_oif = rtnh->rtnh_ifindex;
@@ -1003,6 +1006,9 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
if (fib_props[cfg->fc_type].scope > cfg->fc_scope)
goto err_inval;
+ if (cfg->fc_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
+ goto err_inval;
+
#ifdef CONFIG_IP_ROUTE_MULTIPATH
if (cfg->fc_mp) {
nhs = fib_count_nexthops(cfg->fc_mp, cfg->fc_mp_len);
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index 4c39f4fd332a..de1d119a4497 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -62,26 +62,26 @@ EXPORT_SYMBOL_GPL(gre_del_protocol);
/* Fills in tpi and returns header length to be pulled. */
int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
- bool *csum_err, __be16 proto)
+ bool *csum_err, __be16 proto, int nhs)
{
const struct gre_base_hdr *greh;
__be32 *options;
int hdr_len;
- if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr))))
+ if (unlikely(!pskb_may_pull(skb, nhs + sizeof(struct gre_base_hdr))))
return -EINVAL;
- greh = (struct gre_base_hdr *)skb_transport_header(skb);
+ greh = (struct gre_base_hdr *)(skb->data + nhs);
if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
return -EINVAL;
tpi->flags = gre_flags_to_tnl_flags(greh->flags);
hdr_len = gre_calc_hlen(tpi->flags);
- if (!pskb_may_pull(skb, hdr_len))
+ if (!pskb_may_pull(skb, nhs + hdr_len))
return -EINVAL;
- greh = (struct gre_base_hdr *)skb_transport_header(skb);
+ greh = (struct gre_base_hdr *)(skb->data + nhs);
tpi->proto = greh->protocol;
options = (__be32 *)(greh + 1);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 4d2025f7ec57..1d000af7f561 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -49,12 +49,6 @@
#include <net/gre.h>
#include <net/dst_metadata.h>
-#if IS_ENABLED(CONFIG_IPV6)
-#include <net/ipv6.h>
-#include <net/ip6_fib.h>
-#include <net/ip6_route.h>
-#endif
-
/*
Problems & solutions
--------------------
@@ -217,12 +211,14 @@ static void gre_err(struct sk_buff *skb, u32 info)
* by themselves???
*/
+ const struct iphdr *iph = (struct iphdr *)skb->data;
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
struct tnl_ptk_info tpi;
bool csum_err = false;
- if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP)) < 0) {
+ if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP),
+ iph->ihl * 4) < 0) {
if (!csum_err) /* ignore csum errors. */
return;
}
@@ -338,7 +334,7 @@ static int gre_rcv(struct sk_buff *skb)
}
#endif
- hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP));
+ hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
if (hdr_len < 0)
goto drop;
@@ -1121,6 +1117,7 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
{
struct nlattr *tb[IFLA_MAX + 1];
struct net_device *dev;
+ LIST_HEAD(list_kill);
struct ip_tunnel *t;
int err;
@@ -1136,8 +1133,10 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
t->collect_md = true;
err = ipgre_newlink(net, dev, tb, NULL);
- if (err < 0)
- goto out;
+ if (err < 0) {
+ free_netdev(dev);
+ return ERR_PTR(err);
+ }
/* openvswitch users expect packet sizes to be unrestricted,
* so set the largest MTU we can.
@@ -1146,9 +1145,14 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
if (err)
goto out;
+ err = rtnl_configure_link(dev, NULL);
+ if (err < 0)
+ goto out;
+
return dev;
out:
- free_netdev(dev);
+ ip_tunnel_dellink(dev, &list_kill);
+ unregister_netdevice_many(&list_kill);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 124bf0a66328..4bd4921639c3 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -271,7 +271,7 @@ static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *sk
return dst_output(net, sk, skb);
}
#endif
- mtu = ip_skb_dst_mtu(skb);
+ mtu = ip_skb_dst_mtu(sk, skb);
if (skb_is_gso(skb))
return ip_finish_output_gso(net, sk, skb, mtu);
@@ -541,7 +541,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
iph = ip_hdr(skb);
- mtu = ip_skb_dst_mtu(skb);
+ mtu = ip_skb_dst_mtu(sk, skb);
if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu)
mtu = IPCB(skb)->frag_max_size;
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 2ed9dd2b5f2f..1d71c40eaaf3 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -127,7 +127,9 @@ __be32 ic_myaddr = NONE; /* My IP address */
static __be32 ic_netmask = NONE; /* Netmask for local subnet */
__be32 ic_gateway = NONE; /* Gateway IP address */
-__be32 ic_addrservaddr = NONE; /* IP Address of the IP addresses'server */
+#ifdef IPCONFIG_DYNAMIC
+static __be32 ic_addrservaddr = NONE; /* IP Address of the IP addresses'server */
+#endif
__be32 ic_servaddr = NONE; /* Boot server IP address */
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 21a38e296fe2..5ad48ec77710 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -891,8 +891,10 @@ static struct mfc_cache *ipmr_cache_alloc(void)
{
struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
- if (c)
+ if (c) {
+ c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
c->mfc_un.res.minvif = MAXVIFS;
+ }
return c;
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index d6c8f4cd0800..42bf89aaf6a5 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -87,7 +87,7 @@ int sysctl_tcp_adv_win_scale __read_mostly = 1;
EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
/* rfc5961 challenge ack rate limiting */
-int sysctl_tcp_challenge_ack_limit = 100;
+int sysctl_tcp_challenge_ack_limit = 1000;
int sysctl_tcp_stdurg __read_mostly;
int sysctl_tcp_rfc1337 __read_mostly;
@@ -3421,6 +3421,23 @@ static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32
return flag;
}
+static bool __tcp_oow_rate_limited(struct net *net, int mib_idx,
+ u32 *last_oow_ack_time)
+{
+ if (*last_oow_ack_time) {
+ s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
+
+ if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
+ NET_INC_STATS(net, mib_idx);
+ return true; /* rate-limited: don't send yet! */
+ }
+ }
+
+ *last_oow_ack_time = tcp_time_stamp;
+
+ return false; /* not rate-limited: go ahead, send dupack now! */
+}
+
/* Return true if we're currently rate-limiting out-of-window ACKs and
* thus shouldn't send a dupack right now. We rate-limit dupacks in
* response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS
@@ -3434,21 +3451,9 @@ bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
/* Data packets without SYNs are not likely part of an ACK loop. */
if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) &&
!tcp_hdr(skb)->syn)
- goto not_rate_limited;
-
- if (*last_oow_ack_time) {
- s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
-
- if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
- NET_INC_STATS(net, mib_idx);
- return true; /* rate-limited: don't send yet! */
- }
- }
-
- *last_oow_ack_time = tcp_time_stamp;
+ return false;
-not_rate_limited:
- return false; /* not rate-limited: go ahead, send dupack now! */
+ return __tcp_oow_rate_limited(net, mib_idx, last_oow_ack_time);
}
/* RFC 5961 7 [ACK Throttling] */
@@ -3458,21 +3463,26 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
static u32 challenge_timestamp;
static unsigned int challenge_count;
struct tcp_sock *tp = tcp_sk(sk);
- u32 now;
+ u32 count, now;
/* First check our per-socket dupack rate limit. */
- if (tcp_oow_rate_limited(sock_net(sk), skb,
- LINUX_MIB_TCPACKSKIPPEDCHALLENGE,
- &tp->last_oow_ack_time))
+ if (__tcp_oow_rate_limited(sock_net(sk),
+ LINUX_MIB_TCPACKSKIPPEDCHALLENGE,
+ &tp->last_oow_ack_time))
return;
- /* Then check the check host-wide RFC 5961 rate limit. */
+ /* Then check host-wide RFC 5961 rate limit. */
now = jiffies / HZ;
if (now != challenge_timestamp) {
+ u32 half = (sysctl_tcp_challenge_ack_limit + 1) >> 1;
+
challenge_timestamp = now;
- challenge_count = 0;
+ WRITE_ONCE(challenge_count, half +
+ prandom_u32_max(sysctl_tcp_challenge_ack_limit));
}
- if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
+ count = READ_ONCE(challenge_count);
+ if (count > 0) {
+ WRITE_ONCE(challenge_count, count - 1);
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
tcp_send_ack(sk);
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 8bd9911fdd16..e00e972c4e6a 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2751,7 +2751,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
struct sk_buff *hole = NULL;
- u32 last_lost;
+ u32 max_segs, last_lost;
int mib_idx;
int fwd_rexmitting = 0;
@@ -2771,6 +2771,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
last_lost = tp->snd_una;
}
+ max_segs = tcp_tso_autosize(sk, tcp_current_mss(sk));
tcp_for_write_queue_from(skb, sk) {
__u8 sacked = TCP_SKB_CB(skb)->sacked;
int segs;
@@ -2784,6 +2785,10 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
segs = tp->snd_cwnd - tcp_packets_in_flight(tp);
if (segs <= 0)
return;
+ /* In case tcp_shift_skb_data() have aggregated large skbs,
+ * we need to make sure not sending too bigs TSO packets
+ */
+ segs = min_t(int, segs, max_segs);
if (fwd_rexmitting) {
begin_fwd:
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 0ff31d97d485..4aed8fc23d32 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -391,9 +391,9 @@ int udp_v4_get_port(struct sock *sk, unsigned short snum)
return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal, hash2_nulladdr);
}
-static inline int compute_score(struct sock *sk, struct net *net,
- __be32 saddr, unsigned short hnum, __be16 sport,
- __be32 daddr, __be16 dport, int dif)
+static int compute_score(struct sock *sk, struct net *net,
+ __be32 saddr, __be16 sport,
+ __be32 daddr, unsigned short hnum, int dif)
{
int score;
struct inet_sock *inet;
@@ -434,52 +434,6 @@ static inline int compute_score(struct sock *sk, struct net *net,
return score;
}
-/*
- * In this second variant, we check (daddr, dport) matches (inet_rcv_sadd, inet_num)
- */
-static inline int compute_score2(struct sock *sk, struct net *net,
- __be32 saddr, __be16 sport,
- __be32 daddr, unsigned int hnum, int dif)
-{
- int score;
- struct inet_sock *inet;
-
- if (!net_eq(sock_net(sk), net) ||
- ipv6_only_sock(sk))
- return -1;
-
- inet = inet_sk(sk);
-
- if (inet->inet_rcv_saddr != daddr ||
- inet->inet_num != hnum)
- return -1;
-
- score = (sk->sk_family == PF_INET) ? 2 : 1;
-
- if (inet->inet_daddr) {
- if (inet->inet_daddr != saddr)
- return -1;
- score += 4;
- }
-
- if (inet->inet_dport) {
- if (inet->inet_dport != sport)
- return -1;
- score += 4;
- }
-
- if (sk->sk_bound_dev_if) {
- if (sk->sk_bound_dev_if != dif)
- return -1;
- score += 4;
- }
-
- if (sk->sk_incoming_cpu == raw_smp_processor_id())
- score++;
-
- return score;
-}
-
static u32 udp_ehashfn(const struct net *net, const __be32 laddr,
const __u16 lport, const __be32 faddr,
const __be16 fport)
@@ -492,11 +446,11 @@ static u32 udp_ehashfn(const struct net *net, const __be32 laddr,
udp_ehash_secret + net_hash_mix(net));
}
-/* called with read_rcu_lock() */
+/* called with rcu_read_lock() */
static struct sock *udp4_lib_lookup2(struct net *net,
__be32 saddr, __be16 sport,
__be32 daddr, unsigned int hnum, int dif,
- struct udp_hslot *hslot2, unsigned int slot2,
+ struct udp_hslot *hslot2,
struct sk_buff *skb)
{
struct sock *sk, *result;
@@ -506,7 +460,7 @@ static struct sock *udp4_lib_lookup2(struct net *net,
result = NULL;
badness = 0;
udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
- score = compute_score2(sk, net, saddr, sport,
+ score = compute_score(sk, net, saddr, sport,
daddr, hnum, dif);
if (score > badness) {
reuseport = sk->sk_reuseport;
@@ -554,17 +508,22 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
result = udp4_lib_lookup2(net, saddr, sport,
daddr, hnum, dif,
- hslot2, slot2, skb);
+ hslot2, skb);
if (!result) {
+ unsigned int old_slot2 = slot2;
hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
slot2 = hash2 & udptable->mask;
+ /* avoid searching the same slot again. */
+ if (unlikely(slot2 == old_slot2))
+ return result;
+
hslot2 = &udptable->hash2[slot2];
if (hslot->count < hslot2->count)
goto begin;
result = udp4_lib_lookup2(net, saddr, sport,
- htonl(INADDR_ANY), hnum, dif,
- hslot2, slot2, skb);
+ daddr, hnum, dif,
+ hslot2, skb);
}
return result;
}
@@ -572,8 +531,8 @@ begin:
result = NULL;
badness = 0;
sk_for_each_rcu(sk, &hslot->head) {
- score = compute_score(sk, net, saddr, hnum, sport,
- daddr, dport, dif);
+ score = compute_score(sk, net, saddr, sport,
+ daddr, hnum, dif);
if (score > badness) {
reuseport = sk->sk_reuseport;
if (reuseport) {
@@ -1624,6 +1583,8 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
if (sk_filter(sk, skb))
goto drop;
+ if (unlikely(skb->len < sizeof(struct udphdr)))
+ goto drop;
udp_csum_pull_header(skb);
if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
@@ -1755,8 +1716,11 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
return err;
}
- return skb_checksum_init_zero_check(skb, proto, uh->check,
- inet_compute_pseudo);
+ /* Note, we are only interested in != 0 or == 0, thus the
+ * force to int.
+ */
+ return (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
+ inet_compute_pseudo);
}
/*
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 4527285fcaa2..a4fa84076969 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -98,7 +98,7 @@ static void icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (!(type & ICMPV6_INFOMSG_MASK))
if (icmp6->icmp6_type == ICMPV6_ECHO_REQUEST)
- ping_err(skb, offset, info);
+ ping_err(skb, offset, ntohl(info));
}
static int icmpv6_rcv(struct sk_buff *skb);
diff --git a/net/ipv6/ip6_checksum.c b/net/ipv6/ip6_checksum.c
index b2025bf3da4a..c0cbcb259f5a 100644
--- a/net/ipv6/ip6_checksum.c
+++ b/net/ipv6/ip6_checksum.c
@@ -78,9 +78,12 @@ int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto)
* we accept a checksum of zero here. When we find the socket
* for the UDP packet we'll check if that socket allows zero checksum
* for IPv6 (set by socket option).
+ *
+ * Note, we are only interested in != 0 or == 0, thus the
+ * force to int.
*/
- return skb_checksum_init_zero_check(skb, proto, uh->check,
- ip6_compute_pseudo);
+ return (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
+ ip6_compute_pseudo);
}
EXPORT_SYMBOL(udp6_csum_init);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 1bcef2369d64..771be1fa4176 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -177,6 +177,7 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
}
}
+ free_percpu(non_pcpu_rt->rt6i_pcpu);
non_pcpu_rt->rt6i_pcpu = NULL;
}
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index fdc9de276ab1..776d145113e1 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -468,7 +468,7 @@ static int gre_rcv(struct sk_buff *skb)
bool csum_err = false;
int hdr_len;
- hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IPV6));
+ hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IPV6), 0);
if (hdr_len < 0)
goto drop;
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index f2e2013f8346..487ef3bc7bbc 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1074,6 +1074,7 @@ static struct mfc6_cache *ip6mr_cache_alloc(void)
struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
if (!c)
return NULL;
+ c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
c->mfc_un.res.minvif = MAXMIFS;
return c;
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 969913da494f..520b7884d0c2 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1782,7 +1782,7 @@ static struct rt6_info *ip6_nh_lookup_table(struct net *net,
};
struct fib6_table *table;
struct rt6_info *rt;
- int flags = 0;
+ int flags = RT6_LOOKUP_F_IFACE;
table = fib6_get_table(net, cfg->fc_table);
if (!table)
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 0a5a255277e5..0619ac70836d 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -560,13 +560,13 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
ipv4_update_pmtu(skb, dev_net(skb->dev), info,
- t->parms.link, 0, IPPROTO_IPV6, 0);
+ t->parms.link, 0, iph->protocol, 0);
err = 0;
goto out;
}
if (type == ICMP_REDIRECT) {
ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
- IPPROTO_IPV6, 0);
+ iph->protocol, 0);
err = 0;
goto out;
}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index f36c2d076fce..2255d2bf5f6b 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -738,7 +738,7 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
u32 ack, u32 win, u32 tsval, u32 tsecr,
int oif, struct tcp_md5sig_key *key, int rst,
- u8 tclass, u32 label)
+ u8 tclass, __be32 label)
{
const struct tcphdr *th = tcp_hdr(skb);
struct tcphdr *t1;
@@ -911,7 +911,7 @@ out:
static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
struct tcp_md5sig_key *key, u8 tclass,
- u32 label)
+ __be32 label)
{
tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
tclass, label);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index f421c9f23c5b..acc09705618b 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -115,11 +115,10 @@ static void udp_v6_rehash(struct sock *sk)
udp_lib_rehash(sk, new_hash);
}
-static inline int compute_score(struct sock *sk, struct net *net,
- unsigned short hnum,
- const struct in6_addr *saddr, __be16 sport,
- const struct in6_addr *daddr, __be16 dport,
- int dif)
+static int compute_score(struct sock *sk, struct net *net,
+ const struct in6_addr *saddr, __be16 sport,
+ const struct in6_addr *daddr, unsigned short hnum,
+ int dif)
{
int score;
struct inet_sock *inet;
@@ -162,54 +161,11 @@ static inline int compute_score(struct sock *sk, struct net *net,
return score;
}
-static inline int compute_score2(struct sock *sk, struct net *net,
- const struct in6_addr *saddr, __be16 sport,
- const struct in6_addr *daddr,
- unsigned short hnum, int dif)
-{
- int score;
- struct inet_sock *inet;
-
- if (!net_eq(sock_net(sk), net) ||
- udp_sk(sk)->udp_port_hash != hnum ||
- sk->sk_family != PF_INET6)
- return -1;
-
- if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
- return -1;
-
- score = 0;
- inet = inet_sk(sk);
-
- if (inet->inet_dport) {
- if (inet->inet_dport != sport)
- return -1;
- score++;
- }
-
- if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
- if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
- return -1;
- score++;
- }
-
- if (sk->sk_bound_dev_if) {
- if (sk->sk_bound_dev_if != dif)
- return -1;
- score++;
- }
-
- if (sk->sk_incoming_cpu == raw_smp_processor_id())
- score++;
-
- return score;
-}
-
-/* called with read_rcu_lock() */
+/* called with rcu_read_lock() */
static struct sock *udp6_lib_lookup2(struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, unsigned int hnum, int dif,
- struct udp_hslot *hslot2, unsigned int slot2,
+ struct udp_hslot *hslot2,
struct sk_buff *skb)
{
struct sock *sk, *result;
@@ -219,7 +175,7 @@ static struct sock *udp6_lib_lookup2(struct net *net,
result = NULL;
badness = -1;
udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
- score = compute_score2(sk, net, saddr, sport,
+ score = compute_score(sk, net, saddr, sport,
daddr, hnum, dif);
if (score > badness) {
reuseport = sk->sk_reuseport;
@@ -268,17 +224,22 @@ struct sock *__udp6_lib_lookup(struct net *net,
result = udp6_lib_lookup2(net, saddr, sport,
daddr, hnum, dif,
- hslot2, slot2, skb);
+ hslot2, skb);
if (!result) {
+ unsigned int old_slot2 = slot2;
hash2 = udp6_portaddr_hash(net, &in6addr_any, hnum);
slot2 = hash2 & udptable->mask;
+ /* avoid searching the same slot again. */
+ if (unlikely(slot2 == old_slot2))
+ return result;
+
hslot2 = &udptable->hash2[slot2];
if (hslot->count < hslot2->count)
goto begin;
result = udp6_lib_lookup2(net, saddr, sport,
- &in6addr_any, hnum, dif,
- hslot2, slot2, skb);
+ daddr, hnum, dif,
+ hslot2, skb);
}
return result;
}
@@ -286,7 +247,7 @@ begin:
result = NULL;
badness = -1;
sk_for_each_rcu(sk, &hslot->head) {
- score = compute_score(sk, net, hnum, saddr, sport, daddr, dport, dif);
+ score = compute_score(sk, net, saddr, sport, daddr, hnum, dif);
if (score > badness) {
reuseport = sk->sk_reuseport;
if (reuseport) {
@@ -659,6 +620,8 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
if (sk_filter(sk, skb))
goto drop;
+ if (unlikely(skb->len < sizeof(struct udphdr)))
+ goto drop;
udp_csum_pull_header(skb);
if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
diff --git a/net/irda/ircomm/ircomm_tty_ioctl.c b/net/irda/ircomm/ircomm_tty_ioctl.c
index d4fdf8f7b471..8f5678cb6263 100644
--- a/net/irda/ircomm/ircomm_tty_ioctl.c
+++ b/net/irda/ircomm/ircomm_tty_ioctl.c
@@ -246,9 +246,6 @@ static int ircomm_tty_get_serial_info(struct ircomm_tty_cb *self,
{
struct serial_struct info;
- if (!retinfo)
- return -EFAULT;
-
memset(&info, 0, sizeof(info));
info.line = self->line;
info.flags = self->port.flags;
@@ -258,11 +255,6 @@ static int ircomm_tty_get_serial_info(struct ircomm_tty_cb *self,
/* For compatibility */
info.type = PORT_16550A;
- info.port = 0;
- info.irq = 0;
- info.xmit_fifo_size = 0;
- info.hub6 = 0;
- info.custom_divisor = 0;
if (copy_to_user(retinfo, &info, sizeof(*retinfo)))
return -EFAULT;
diff --git a/net/kcm/kcmproc.c b/net/kcm/kcmproc.c
index 738008726cc6..fda7f4715c58 100644
--- a/net/kcm/kcmproc.c
+++ b/net/kcm/kcmproc.c
@@ -241,6 +241,7 @@ static const struct file_operations kcm_seq_fops = {
.open = kcm_seq_open,
.read = seq_read,
.llseek = seq_lseek,
+ .release = seq_release_net,
};
static struct kcm_seq_muxinfo kcm_seq_muxinfo = {
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 21b1fdf5d01d..6a1603bcdced 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -148,14 +148,17 @@ u32 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata)
void mesh_sta_cleanup(struct sta_info *sta)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
- u32 changed;
+ u32 changed = 0;
/*
* maybe userspace handles peer allocation and peering, but in either
* case the beacon is still generated by the kernel and we might need
* an update.
*/
- changed = mesh_accept_plinks_update(sdata);
+ if (sdata->u.mesh.user_mpm &&
+ sta->mesh->plink_state == NL80211_PLINK_ESTAB)
+ changed |= mesh_plink_dec_estab_count(sdata);
+ changed |= mesh_accept_plinks_update(sdata);
if (!sdata->u.mesh.user_mpm) {
changed |= mesh_plink_deactivate(sta);
del_timer_sync(&sta->mesh->plink_timer);
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 803001a45aa1..1b07578bedf3 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -1545,7 +1545,8 @@ error:
/*
* Set up receiving multicast socket over UDP
*/
-static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id)
+static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
+ int ifindex)
{
/* multicast addr */
union ipvs_sockaddr mcast_addr;
@@ -1566,6 +1567,7 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id)
set_sock_size(sock->sk, 0, result);
get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id);
+ sock->sk->sk_bound_dev_if = ifindex;
result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen);
if (result < 0) {
pr_err("Error binding to the multicast addr\n");
@@ -1868,7 +1870,7 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
if (state == IP_VS_STATE_MASTER)
sock = make_send_sock(ipvs, id);
else
- sock = make_receive_sock(ipvs, id);
+ sock = make_receive_sock(ipvs, id, dev->ifindex);
if (IS_ERR(sock)) {
result = PTR_ERR(sock);
goto outtinfo;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index b8c5501d3872..4cbda4bd8926 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -667,6 +667,7 @@ static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb,
l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
if (l4proto->allow_clash &&
+ !nfct_nat(ct) &&
!nf_ct_is_dying(ct) &&
atomic_inc_not_zero(&ct->ct_general.use)) {
nf_ct_acct_merge(ct, ctinfo, (struct nf_conn *)skb->nfct);
@@ -1565,6 +1566,8 @@ void nf_conntrack_cleanup_end(void)
nf_conntrack_tstamp_fini();
nf_conntrack_acct_fini();
nf_conntrack_expect_fini();
+
+ kmem_cache_destroy(nf_conntrack_cachep);
}
/*
@@ -1620,8 +1623,15 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
unsigned int nr_slots, i;
size_t sz;
+ if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head)))
+ return NULL;
+
BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
+
+ if (nr_slots > (UINT_MAX / sizeof(struct hlist_nulls_head)))
+ return NULL;
+
sz = nr_slots * sizeof(struct hlist_nulls_head);
hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
get_order(sz));
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 7b7aa871a174..cf7c74599cbe 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1724,9 +1724,11 @@ struct nft_expr *nft_expr_init(const struct nft_ctx *ctx,
err = nf_tables_newexpr(ctx, &info, expr);
if (err < 0)
- goto err2;
+ goto err3;
return expr;
+err3:
+ kfree(expr);
err2:
module_put(info.ops->type->owner);
err1:
@@ -2946,24 +2948,20 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
* jumps are already validated for that chain.
*/
list_for_each_entry(i, &set->bindings, list) {
- if (binding->flags & NFT_SET_MAP &&
+ if (i->flags & NFT_SET_MAP &&
i->chain == binding->chain)
goto bind;
}
+ iter.genmask = nft_genmask_next(ctx->net);
iter.skip = 0;
iter.count = 0;
iter.err = 0;
iter.fn = nf_tables_bind_check_setelem;
set->ops->walk(ctx, set, &iter);
- if (iter.err < 0) {
- /* Destroy anonymous sets if binding fails */
- if (set->flags & NFT_SET_ANONYMOUS)
- nf_tables_set_destroy(ctx, set);
-
+ if (iter.err < 0)
return iter.err;
- }
}
bind:
binding->chain = ctx->chain;
@@ -3192,12 +3190,13 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
if (nest == NULL)
goto nla_put_failure;
- args.cb = cb;
- args.skb = skb;
- args.iter.skip = cb->args[0];
- args.iter.count = 0;
- args.iter.err = 0;
- args.iter.fn = nf_tables_dump_setelem;
+ args.cb = cb;
+ args.skb = skb;
+ args.iter.genmask = nft_genmask_cur(ctx.net);
+ args.iter.skip = cb->args[0];
+ args.iter.count = 0;
+ args.iter.err = 0;
+ args.iter.fn = nf_tables_dump_setelem;
set->ops->walk(&ctx, set, &args.iter);
nla_nest_end(skb, nest);
@@ -4284,6 +4283,7 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
binding->chain != chain)
continue;
+ iter.genmask = nft_genmask_next(ctx->net);
iter.skip = 0;
iter.count = 0;
iter.err = 0;
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index e9f8dffcc244..fb8b5892b5ff 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -143,7 +143,7 @@ next_rule:
list_for_each_entry_continue_rcu(rule, &chain->rules, list) {
/* This rule is not active, skip. */
- if (unlikely(rule->genmask & (1 << gencursor)))
+ if (unlikely(rule->genmask & gencursor))
continue;
rulenum++;
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index 137e308d5b24..81fbb450783e 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -54,7 +54,6 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
const struct nf_conn_help *help;
const struct nf_conntrack_tuple *tuple;
const struct nf_conntrack_helper *helper;
- long diff;
unsigned int state;
ct = nf_ct_get(pkt->skb, &ctinfo);
@@ -94,10 +93,7 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
return;
#endif
case NFT_CT_EXPIRATION:
- diff = (long)jiffies - (long)ct->timeout.expires;
- if (diff < 0)
- diff = 0;
- *dest = jiffies_to_msecs(diff);
+ *dest = jiffies_to_msecs(nf_ct_expires(ct));
return;
case NFT_CT_HELPER:
if (ct->master == NULL)
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index 6fa016564f90..f39c53a159eb 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -189,7 +189,6 @@ static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
struct nft_hash_elem *he;
struct rhashtable_iter hti;
struct nft_set_elem elem;
- u8 genmask = nft_genmask_cur(read_pnet(&set->pnet));
int err;
err = rhashtable_walk_init(&priv->ht, &hti, GFP_KERNEL);
@@ -218,7 +217,7 @@ static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
goto cont;
if (nft_set_elem_expired(&he->ext))
goto cont;
- if (!nft_set_elem_active(&he->ext, genmask))
+ if (!nft_set_elem_active(&he->ext, iter->genmask))
goto cont;
elem.priv = he;
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 16c50b0dd426..f4bad9dc15c4 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -227,7 +227,7 @@ void nft_meta_set_eval(const struct nft_expr *expr,
skb->pkt_type = value;
break;
case NFT_META_NFTRACE:
- skb->nf_trace = 1;
+ skb->nf_trace = !!value;
break;
default:
WARN_ON(1);
diff --git a/net/netfilter/nft_rbtree.c b/net/netfilter/nft_rbtree.c
index f762094af7c1..7201d57b5a93 100644
--- a/net/netfilter/nft_rbtree.c
+++ b/net/netfilter/nft_rbtree.c
@@ -211,7 +211,6 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
struct nft_rbtree_elem *rbe;
struct nft_set_elem elem;
struct rb_node *node;
- u8 genmask = nft_genmask_cur(read_pnet(&set->pnet));
spin_lock_bh(&nft_rbtree_lock);
for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
@@ -219,7 +218,7 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
if (iter->count < iter->skip)
goto cont;
- if (!nft_set_elem_active(&rbe->ext, genmask))
+ if (!nft_set_elem_active(&rbe->ext, iter->genmask))
goto cont;
elem.priv = rbe;
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 3d5feede962d..d84312584ee4 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -818,8 +818,18 @@ static int ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
*/
state = OVS_CS_F_TRACKED | OVS_CS_F_NEW | OVS_CS_F_RELATED;
__ovs_ct_update_key(key, state, &info->zone, exp->master);
- } else
- return __ovs_ct_lookup(net, key, info, skb);
+ } else {
+ struct nf_conn *ct;
+ int err;
+
+ err = __ovs_ct_lookup(net, key, info, skb);
+ if (err)
+ return err;
+
+ ct = (struct nf_conn *)skb->nfct;
+ if (ct)
+ nf_ct_deliver_cached_events(ct);
+ }
return 0;
}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 9bff6ef16fa7..b43c4015b2f7 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1341,7 +1341,7 @@ static unsigned int fanout_demux_hash(struct packet_fanout *f,
struct sk_buff *skb,
unsigned int num)
{
- return reciprocal_scale(skb_get_hash(skb), num);
+ return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
}
static unsigned int fanout_demux_lb(struct packet_fanout *f,
@@ -1927,13 +1927,11 @@ retry:
goto out_unlock;
}
- sockc.tsflags = 0;
+ sockc.tsflags = sk->sk_tsflags;
if (msg->msg_controllen) {
err = sock_cmsg_send(sk, msg, &sockc);
- if (unlikely(err)) {
- err = -EINVAL;
+ if (unlikely(err))
goto out_unlock;
- }
}
skb->protocol = proto;
@@ -2678,7 +2676,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
}
- sockc.tsflags = 0;
+ sockc.tsflags = po->sk.sk_tsflags;
if (msg->msg_controllen) {
err = sock_cmsg_send(&po->sk, msg, &sockc);
if (unlikely(err))
@@ -2881,7 +2879,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
if (unlikely(!(dev->flags & IFF_UP)))
goto out_unlock;
- sockc.tsflags = 0;
+ sockc.tsflags = sk->sk_tsflags;
sockc.mark = sk->sk_mark;
if (msg->msg_controllen) {
err = sock_cmsg_send(sk, msg, &sockc);
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 310cabce2311..7c2a65a6af5c 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -111,7 +111,7 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
}
}
- if (conn->c_version < RDS_PROTOCOL(3,1)) {
+ if (conn->c_version < RDS_PROTOCOL(3, 1)) {
printk(KERN_NOTICE "RDS/IB: Connection to %pI4 version %u.%u failed,"
" no longer supported\n",
&conn->c_faddr,
diff --git a/net/rds/loop.c b/net/rds/loop.c
index 6b12b68541ae..814173b466d9 100644
--- a/net/rds/loop.c
+++ b/net/rds/loop.c
@@ -95,8 +95,9 @@ out:
*/
static void rds_loop_inc_free(struct rds_incoming *inc)
{
- struct rds_message *rm = container_of(inc, struct rds_message, m_inc);
- rds_message_put(rm);
+ struct rds_message *rm = container_of(inc, struct rds_message, m_inc);
+
+ rds_message_put(rm);
}
/* we need to at least give the thread something to succeed */
diff --git a/net/rds/sysctl.c b/net/rds/sysctl.c
index c173f69e1479..e381bbcd9cc1 100644
--- a/net/rds/sysctl.c
+++ b/net/rds/sysctl.c
@@ -102,7 +102,8 @@ int rds_sysctl_init(void)
rds_sysctl_reconnect_min = msecs_to_jiffies(1);
rds_sysctl_reconnect_min_jiffies = rds_sysctl_reconnect_min;
- rds_sysctl_reg_table = register_net_sysctl(&init_net,"net/rds", rds_sysctl_rds_table);
+ rds_sysctl_reg_table =
+ register_net_sysctl(&init_net, "net/rds", rds_sysctl_rds_table);
if (!rds_sysctl_reg_table)
return -ENOMEM;
return 0;
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 74ee126a6fe6..c8a7b4c90190 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -616,7 +616,7 @@ static int rds_tcp_init(void)
ret = rds_tcp_recv_init();
if (ret)
- goto out_slab;
+ goto out_pernet;
ret = rds_trans_register(&rds_tcp_transport);
if (ret)
@@ -628,8 +628,9 @@ static int rds_tcp_init(void)
out_recv:
rds_tcp_recv_exit();
-out_slab:
+out_pernet:
unregister_pernet_subsys(&rds_tcp_net_ops);
+out_slab:
kmem_cache_destroy(rds_tcp_conn_slab);
out:
return ret;
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index ec0602b0dc24..7940babf6c71 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -83,7 +83,7 @@ int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
void rds_tcp_xmit_prepare(struct rds_connection *conn);
void rds_tcp_xmit_complete(struct rds_connection *conn);
int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
- unsigned int hdr_off, unsigned int sg, unsigned int off);
+ unsigned int hdr_off, unsigned int sg, unsigned int off);
void rds_tcp_write_space(struct sock *sk);
/* tcp_stats.c */
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index fba13d0305fb..f6e95d60db54 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -54,19 +54,19 @@ void rds_tcp_state_change(struct sock *sk)
rdsdebug("sock %p state_change to %d\n", tc->t_sock, sk->sk_state);
- switch(sk->sk_state) {
- /* ignore connecting sockets as they make progress */
- case TCP_SYN_SENT:
- case TCP_SYN_RECV:
- break;
- case TCP_ESTABLISHED:
- rds_connect_path_complete(conn, RDS_CONN_CONNECTING);
- break;
- case TCP_CLOSE_WAIT:
- case TCP_CLOSE:
- rds_conn_drop(conn);
- default:
- break;
+ switch (sk->sk_state) {
+ /* ignore connecting sockets as they make progress */
+ case TCP_SYN_SENT:
+ case TCP_SYN_RECV:
+ break;
+ case TCP_ESTABLISHED:
+ rds_connect_path_complete(conn, RDS_CONN_CONNECTING);
+ break;
+ case TCP_CLOSE_WAIT:
+ case TCP_CLOSE:
+ rds_conn_drop(conn);
+ default:
+ break;
}
out:
read_unlock_bh(&sk->sk_callback_lock);
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 686b1d03a558..245542ca4718 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -138,7 +138,7 @@ int rds_tcp_accept_one(struct socket *sock)
rds_tcp_reset_callbacks(new_sock, conn);
conn->c_outgoing = 0;
/* rds_connect_path_complete() marks RDS_CONN_UP */
- rds_connect_path_complete(conn, RDS_CONN_DISCONNECTING);
+ rds_connect_path_complete(conn, RDS_CONN_RESETTING);
}
} else {
rds_tcp_set_callbacks(new_sock, conn);
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index c3196f9d070a..6e6a7111a034 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -171,7 +171,7 @@ static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
while (left) {
if (!tinc) {
tinc = kmem_cache_alloc(rds_tcp_incoming_slab,
- arg->gfp);
+ arg->gfp);
if (!tinc) {
desc->error = -ENOMEM;
goto out;
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index 22d0f2020a79..618be69c9c3b 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -66,19 +66,19 @@ void rds_tcp_xmit_complete(struct rds_connection *conn)
static int rds_tcp_sendmsg(struct socket *sock, void *data, unsigned int len)
{
struct kvec vec = {
- .iov_base = data,
- .iov_len = len,
+ .iov_base = data,
+ .iov_len = len,
+ };
+ struct msghdr msg = {
+ .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL,
};
- struct msghdr msg = {
- .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL,
- };
return kernel_sendmsg(sock, &msg, &vec, 1, vec.iov_len);
}
/* the core send_sem serializes this with other xmit and shutdown */
int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
- unsigned int hdr_off, unsigned int sg, unsigned int off)
+ unsigned int hdr_off, unsigned int sg, unsigned int off)
{
struct rds_tcp_connection *tc = conn->c_transport_data;
int done = 0;
@@ -196,7 +196,7 @@ void rds_tcp_write_space(struct sock *sk)
tc->t_last_seen_una = rds_tcp_snd_una(tc);
rds_send_drop_acked(conn, rds_tcp_snd_una(tc), rds_tcp_is_acked);
- if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf)
+ if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf)
queue_delayed_work(rds_wq, &conn->c_send_w, 0);
out:
diff --git a/net/rds/transport.c b/net/rds/transport.c
index f3afd1d60d3c..2ffd3e30c643 100644
--- a/net/rds/transport.c
+++ b/net/rds/transport.c
@@ -140,8 +140,7 @@ unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
rds_info_iter_unmap(iter);
down_read(&rds_trans_sem);
- for (i = 0; i < RDS_TRANS_COUNT; i++)
- {
+ for (i = 0; i < RDS_TRANS_COUNT; i++) {
trans = transports[i];
if (!trans || !trans->stats_info_copy)
continue;
diff --git a/net/rose/rose_in.c b/net/rose/rose_in.c
index 79c4abcfa6b4..0a6394754e81 100644
--- a/net/rose/rose_in.c
+++ b/net/rose/rose_in.c
@@ -164,7 +164,8 @@ static int rose_state3_machine(struct sock *sk, struct sk_buff *skb, int framety
rose_frames_acked(sk, nr);
if (ns == rose->vr) {
rose_start_idletimer(sk);
- if (sock_queue_rcv_skb(sk, skb) == 0) {
+ if (sk_filter_trim_cap(sk, skb, ROSE_MIN_LEN) == 0 &&
+ __sock_queue_rcv_skb(sk, skb) == 0) {
rose->vr = (rose->vr + 1) % ROSE_MODULUS;
queued = 1;
} else {
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 336774a535c3..c7a0b0d481c0 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -1118,7 +1118,7 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
nla_nest_end(skb, nest);
ret = skb->len;
} else
- nla_nest_cancel(skb, nest);
+ nlmsg_trim(skb, b);
nlh->nlmsg_len = skb_tail_pointer(skb) - b;
if (NETLINK_CB(cb->skb).portid && ret)
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index 658046dfe02d..ea4a2fef1b71 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -106,9 +106,9 @@ int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi)
}
EXPORT_SYMBOL_GPL(ife_get_meta_u16);
-int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval)
+int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp)
{
- mi->metaval = kmemdup(metaval, sizeof(u32), GFP_KERNEL);
+ mi->metaval = kmemdup(metaval, sizeof(u32), gfp);
if (!mi->metaval)
return -ENOMEM;
@@ -116,9 +116,9 @@ int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval)
}
EXPORT_SYMBOL_GPL(ife_alloc_meta_u32);
-int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval)
+int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp)
{
- mi->metaval = kmemdup(metaval, sizeof(u16), GFP_KERNEL);
+ mi->metaval = kmemdup(metaval, sizeof(u16), gfp);
if (!mi->metaval)
return -ENOMEM;
@@ -240,10 +240,10 @@ static int ife_validate_metatype(struct tcf_meta_ops *ops, void *val, int len)
}
/* called when adding new meta information
- * under ife->tcf_lock
+ * under ife->tcf_lock for existing action
*/
static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
- void *val, int len)
+ void *val, int len, bool exists)
{
struct tcf_meta_ops *ops = find_ife_oplist(metaid);
int ret = 0;
@@ -251,11 +251,13 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
if (!ops) {
ret = -ENOENT;
#ifdef CONFIG_MODULES
- spin_unlock_bh(&ife->tcf_lock);
+ if (exists)
+ spin_unlock_bh(&ife->tcf_lock);
rtnl_unlock();
request_module("ifemeta%u", metaid);
rtnl_lock();
- spin_lock_bh(&ife->tcf_lock);
+ if (exists)
+ spin_lock_bh(&ife->tcf_lock);
ops = find_ife_oplist(metaid);
#endif
}
@@ -272,10 +274,10 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
}
/* called when adding new meta information
- * under ife->tcf_lock
+ * under ife->tcf_lock for existing action
*/
static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
- int len)
+ int len, bool atomic)
{
struct tcf_meta_info *mi = NULL;
struct tcf_meta_ops *ops = find_ife_oplist(metaid);
@@ -284,7 +286,7 @@ static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
if (!ops)
return -ENOENT;
- mi = kzalloc(sizeof(*mi), GFP_KERNEL);
+ mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL);
if (!mi) {
/*put back what find_ife_oplist took */
module_put(ops->owner);
@@ -294,7 +296,7 @@ static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
mi->metaid = metaid;
mi->ops = ops;
if (len > 0) {
- ret = ops->alloc(mi, metaval);
+ ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL);
if (ret != 0) {
kfree(mi);
module_put(ops->owner);
@@ -313,11 +315,13 @@ static int use_all_metadata(struct tcf_ife_info *ife)
int rc = 0;
int installed = 0;
+ read_lock(&ife_mod_lock);
list_for_each_entry(o, &ifeoplist, list) {
- rc = add_metainfo(ife, o->metaid, NULL, 0);
+ rc = add_metainfo(ife, o->metaid, NULL, 0, true);
if (rc == 0)
installed += 1;
}
+ read_unlock(&ife_mod_lock);
if (installed)
return 0;
@@ -385,8 +389,9 @@ static void tcf_ife_cleanup(struct tc_action *a, int bind)
spin_unlock_bh(&ife->tcf_lock);
}
-/* under ife->tcf_lock */
-static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb)
+/* under ife->tcf_lock for existing action */
+static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
+ bool exists)
{
int len = 0;
int rc = 0;
@@ -398,11 +403,11 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb)
val = nla_data(tb[i]);
len = nla_len(tb[i]);
- rc = load_metaops_and_vet(ife, i, val, len);
+ rc = load_metaops_and_vet(ife, i, val, len, exists);
if (rc != 0)
return rc;
- rc = add_metainfo(ife, i, val, len);
+ rc = add_metainfo(ife, i, val, len, exists);
if (rc)
return rc;
}
@@ -474,7 +479,8 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
saddr = nla_data(tb[TCA_IFE_SMAC]);
}
- spin_lock_bh(&ife->tcf_lock);
+ if (exists)
+ spin_lock_bh(&ife->tcf_lock);
ife->tcf_action = parm->action;
if (parm->flags & IFE_ENCODE) {
@@ -504,11 +510,12 @@ metadata_parse_err:
if (ret == ACT_P_CREATED)
_tcf_ife_cleanup(a, bind);
- spin_unlock_bh(&ife->tcf_lock);
+ if (exists)
+ spin_unlock_bh(&ife->tcf_lock);
return err;
}
- err = populate_metalist(ife, tb2);
+ err = populate_metalist(ife, tb2, exists);
if (err)
goto metadata_parse_err;
@@ -523,12 +530,14 @@ metadata_parse_err:
if (ret == ACT_P_CREATED)
_tcf_ife_cleanup(a, bind);
- spin_unlock_bh(&ife->tcf_lock);
+ if (exists)
+ spin_unlock_bh(&ife->tcf_lock);
return err;
}
}
- spin_unlock_bh(&ife->tcf_lock);
+ if (exists)
+ spin_unlock_bh(&ife->tcf_lock);
if (ret == ACT_P_CREATED)
tcf_hash_insert(tn, a);
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 9f002ada7074..d4bd19ee5822 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -121,10 +121,13 @@ static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla,
}
td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]);
- if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size)
+ if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size) {
+ if (exists)
+ tcf_hash_release(a, bind);
return -EINVAL;
+ }
- if (!tcf_hash_check(tn, index, a, bind)) {
+ if (!exists) {
ret = tcf_hash_create(tn, index, est, a, sizeof(*ipt), bind,
false);
if (ret)
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 128942bc9e42..1f5bd6ccbd2c 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -181,7 +181,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
if (!(at & AT_EGRESS)) {
if (m->tcfm_ok_push)
- skb_push(skb2, skb->mac_len);
+ skb_push_rcsum(skb2, skb->mac_len);
}
/* mirror is always swallowed */
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 2177eac0a61e..2e4bd2c0a50c 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -37,14 +37,18 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
+ unsigned int prev_backlog;
+
if (likely(skb_queue_len(&sch->q) < sch->limit))
return qdisc_enqueue_tail(skb, sch);
+ prev_backlog = sch->qstats.backlog;
/* queue full, remove one skb to fulfill the limit */
__qdisc_queue_drop_head(sch, &sch->q);
qdisc_qstats_drop(sch);
qdisc_enqueue_tail(skb, sch);
+ qdisc_tree_reduce_backlog(sch, 0, prev_backlog - sch->qstats.backlog);
return NET_XMIT_CN;
}
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index d4b4218af6b1..052f84d6cc23 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1007,7 +1007,9 @@ static void htb_work_func(struct work_struct *work)
struct htb_sched *q = container_of(work, struct htb_sched, work);
struct Qdisc *sch = q->watchdog.qdisc;
+ rcu_read_lock();
__netif_schedule(qdisc_root(sch));
+ rcu_read_unlock();
}
static int htb_init(struct Qdisc *sch, struct nlattr *opt)
@@ -1138,8 +1140,10 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
if (!cl->level && cl->un.leaf.q)
qlen = cl->un.leaf.q->q.qlen;
- cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens);
- cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens);
+ cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
+ INT_MIN, INT_MAX);
+ cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
+ INT_MIN, INT_MAX);
if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 205bed00dd34..178f1630a036 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -650,14 +650,14 @@ deliver:
#endif
if (q->qdisc) {
+ unsigned int pkt_len = qdisc_pkt_len(skb);
int err = qdisc_enqueue(skb, q->qdisc);
- if (unlikely(err != NET_XMIT_SUCCESS)) {
- if (net_xmit_drop_count(err)) {
- qdisc_qstats_drop(sch);
- qdisc_tree_reduce_backlog(sch, 1,
- qdisc_pkt_len(skb));
- }
+ if (err != NET_XMIT_SUCCESS &&
+ net_xmit_drop_count(err)) {
+ qdisc_qstats_drop(sch);
+ qdisc_tree_reduce_backlog(sch, 1,
+ pkt_len);
}
goto tfifo_dequeue;
}
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 4b0a82191bc4..a356450b747b 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -172,8 +172,9 @@ prio_destroy(struct Qdisc *sch)
static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
{
struct prio_sched_data *q = qdisc_priv(sch);
+ struct Qdisc *queues[TCQ_PRIO_BANDS];
+ int oldbands = q->bands, i;
struct tc_prio_qopt *qopt;
- int i;
if (nla_len(opt) < sizeof(*qopt))
return -EINVAL;
@@ -187,62 +188,42 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
return -EINVAL;
}
+ /* Before commit, make sure we can allocate all new qdiscs */
+ for (i = oldbands; i < qopt->bands; i++) {
+ queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+ TC_H_MAKE(sch->handle, i + 1));
+ if (!queues[i]) {
+ while (i > oldbands)
+ qdisc_destroy(queues[--i]);
+ return -ENOMEM;
+ }
+ }
+
sch_tree_lock(sch);
q->bands = qopt->bands;
memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
- for (i = q->bands; i < TCQ_PRIO_BANDS; i++) {
+ for (i = q->bands; i < oldbands; i++) {
struct Qdisc *child = q->queues[i];
- q->queues[i] = &noop_qdisc;
- if (child != &noop_qdisc) {
- qdisc_tree_reduce_backlog(child, child->q.qlen, child->qstats.backlog);
- qdisc_destroy(child);
- }
- }
- sch_tree_unlock(sch);
- for (i = 0; i < q->bands; i++) {
- if (q->queues[i] == &noop_qdisc) {
- struct Qdisc *child, *old;
-
- child = qdisc_create_dflt(sch->dev_queue,
- &pfifo_qdisc_ops,
- TC_H_MAKE(sch->handle, i + 1));
- if (child) {
- sch_tree_lock(sch);
- old = q->queues[i];
- q->queues[i] = child;
-
- if (old != &noop_qdisc) {
- qdisc_tree_reduce_backlog(old,
- old->q.qlen,
- old->qstats.backlog);
- qdisc_destroy(old);
- }
- sch_tree_unlock(sch);
- }
- }
+ qdisc_tree_reduce_backlog(child, child->q.qlen,
+ child->qstats.backlog);
+ qdisc_destroy(child);
}
+
+ for (i = oldbands; i < q->bands; i++)
+ q->queues[i] = queues[i];
+
+ sch_tree_unlock(sch);
return 0;
}
static int prio_init(struct Qdisc *sch, struct nlattr *opt)
{
- struct prio_sched_data *q = qdisc_priv(sch);
- int i;
-
- for (i = 0; i < TCQ_PRIO_BANDS; i++)
- q->queues[i] = &noop_qdisc;
-
- if (opt == NULL) {
+ if (!opt)
return -EINVAL;
- } else {
- int err;
- if ((err = prio_tune(sch, opt)) != 0)
- return err;
- }
- return 0;
+ return prio_tune(sch, opt);
}
static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
diff --git a/net/sctp/input.c b/net/sctp/input.c
index a701527a9480..47cf4604d19c 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -112,7 +112,6 @@ int sctp_rcv(struct sk_buff *skb)
struct sctp_ep_common *rcvr;
struct sctp_transport *transport = NULL;
struct sctp_chunk *chunk;
- struct sctphdr *sh;
union sctp_addr src;
union sctp_addr dest;
int family;
@@ -127,8 +126,6 @@ int sctp_rcv(struct sk_buff *skb)
if (skb_linearize(skb))
goto discard_it;
- sh = sctp_hdr(skb);
-
/* Pull up the IP and SCTP headers. */
__skb_pull(skb, skb_transport_offset(skb));
if (skb->len < sizeof(struct sctphdr))
@@ -230,7 +227,7 @@ int sctp_rcv(struct sk_buff *skb)
chunk->rcvr = rcvr;
/* Remember the SCTP header. */
- chunk->sctp_hdr = sh;
+ chunk->sctp_hdr = sctp_hdr(skb);
/* Set the source and destination addresses of the incoming chunk. */
sctp_init_addrs(chunk, &src, &dest);
diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c
index 1ce724b87618..f69edcf219e5 100644
--- a/net/sctp/sctp_diag.c
+++ b/net/sctp/sctp_diag.c
@@ -3,12 +3,6 @@
#include <linux/sock_diag.h>
#include <net/sctp/sctp.h>
-extern void inet_diag_msg_common_fill(struct inet_diag_msg *r,
- struct sock *sk);
-extern int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
- struct inet_diag_msg *r, int ext,
- struct user_namespace *user_ns);
-
static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
void *info);
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 06b4df9faaa1..2808d550d273 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -446,16 +446,27 @@ out_no_rpciod:
return ERR_PTR(err);
}
-struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
+static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
struct rpc_xprt *xprt)
{
struct rpc_clnt *clnt = NULL;
struct rpc_xprt_switch *xps;
- xps = xprt_switch_alloc(xprt, GFP_KERNEL);
- if (xps == NULL)
- return ERR_PTR(-ENOMEM);
-
+ if (args->bc_xprt && args->bc_xprt->xpt_bc_xps) {
+ WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP);
+ xps = args->bc_xprt->xpt_bc_xps;
+ xprt_switch_get(xps);
+ } else {
+ xps = xprt_switch_alloc(xprt, GFP_KERNEL);
+ if (xps == NULL) {
+ xprt_put(xprt);
+ return ERR_PTR(-ENOMEM);
+ }
+ if (xprt->bc_xprt) {
+ xprt_switch_get(xps);
+ xprt->bc_xprt->xpt_bc_xps = xps;
+ }
+ }
clnt = rpc_new_client(args, xps, xprt, NULL);
if (IS_ERR(clnt))
return clnt;
@@ -483,7 +494,6 @@ struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
return clnt;
}
-EXPORT_SYMBOL_GPL(rpc_create_xprt);
/**
* rpc_create - create an RPC client and transport with one call
@@ -509,6 +519,15 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
};
char servername[48];
+ if (args->bc_xprt) {
+ WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP);
+ xprt = args->bc_xprt->xpt_bc_xprt;
+ if (xprt) {
+ xprt_get(xprt);
+ return rpc_create_xprt(args, xprt);
+ }
+ }
+
if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS)
xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS;
if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT)
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index f5572e31d518..4f01f63102ee 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -136,6 +136,8 @@ static void svc_xprt_free(struct kref *kref)
/* See comment on corresponding get in xs_setup_bc_tcp(): */
if (xprt->xpt_bc_xprt)
xprt_put(xprt->xpt_bc_xprt);
+ if (xprt->xpt_bc_xps)
+ xprt_switch_put(xprt->xpt_bc_xps);
xprt->xpt_ops->xpo_free(xprt);
module_put(owner);
}
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 2d3e0c42361e..7e2b2fa189c3 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -3057,6 +3057,7 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
return xprt;
args->bc_xprt->xpt_bc_xprt = NULL;
+ args->bc_xprt->xpt_bc_xps = NULL;
xprt_put(xprt);
ret = ERR_PTR(-EINVAL);
out_err:
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 6f11c62bc8f9..a597708ae381 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -330,6 +330,21 @@ static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b)
return 0;
}
+/* tipc_bearer_reset_all - reset all links on all bearers
+ */
+void tipc_bearer_reset_all(struct net *net)
+{
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_bearer *b;
+ int i;
+
+ for (i = 0; i < MAX_BEARERS; i++) {
+ b = rcu_dereference_rtnl(tn->bearer_list[i]);
+ if (b)
+ tipc_reset_bearer(net, b);
+ }
+}
+
/**
* bearer_disable
*
@@ -405,7 +420,7 @@ int tipc_l2_send_msg(struct net *net, struct sk_buff *skb,
return 0;
/* Send RESET message even if bearer is detached from device */
- tipc_ptr = rtnl_dereference(dev->tipc_ptr);
+ tipc_ptr = rcu_dereference_rtnl(dev->tipc_ptr);
if (unlikely(!tipc_ptr && !msg_is_reset(buf_msg(skb))))
goto drop;
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index f686e41b5abb..60e49c3be19c 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -198,6 +198,7 @@ void tipc_bearer_add_dest(struct net *net, u32 bearer_id, u32 dest);
void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest);
struct tipc_bearer *tipc_bearer_find(struct net *net, const char *name);
struct tipc_media *tipc_media_find(const char *name);
+void tipc_bearer_reset_all(struct net *net);
int tipc_bearer_setup(void);
void tipc_bearer_cleanup(void);
void tipc_bearer_stop(struct net *net);
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 7059c94f33c5..7d89f8713d49 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -349,6 +349,8 @@ void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
u16 ack = snd_l->snd_nxt - 1;
snd_l->ackers--;
+ rcv_l->bc_peer_is_up = true;
+ rcv_l->state = LINK_ESTABLISHED;
tipc_link_bc_ack_rcv(rcv_l, ack, xmitq);
tipc_link_reset(rcv_l);
rcv_l->state = LINK_RESET;
@@ -704,7 +706,8 @@ static void link_profile_stats(struct tipc_link *l)
*/
int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
{
- int mtyp, rc = 0;
+ int mtyp = 0;
+ int rc = 0;
bool state = false;
bool probe = false;
bool setup = false;
@@ -1558,7 +1561,12 @@ void tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
if (!msg_peer_node_is_up(hdr))
return;
- l->bc_peer_is_up = true;
+ /* Open when peer ackowledges our bcast init msg (pkt #1) */
+ if (msg_ack(hdr))
+ l->bc_peer_is_up = true;
+
+ if (!l->bc_peer_is_up)
+ return;
/* Ignore if peers_snd_nxt goes beyond receive window */
if (more(peers_snd_nxt, l->rcv_nxt + l->window))
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 8740930f0787..17201aa8423d 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -41,6 +41,8 @@
#include "name_table.h"
#define MAX_FORWARD_SIZE 1024
+#define BUF_HEADROOM (LL_MAX_HEADER + 48)
+#define BUF_TAILROOM 16
static unsigned int align(unsigned int i)
{
@@ -505,6 +507,10 @@ bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err)
msg_set_hdr_sz(hdr, BASIC_H_SIZE);
}
+ if (skb_cloned(_skb) &&
+ pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_KERNEL))
+ goto exit;
+
/* Now reverse the concerned fields */
msg_set_errcode(hdr, err);
msg_set_origport(hdr, msg_destport(&ohdr));
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 024da8af91f0..7cf52fb39bee 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -94,17 +94,6 @@ struct plist;
#define TIPC_MEDIA_INFO_OFFSET 5
-/**
- * TIPC message buffer code
- *
- * TIPC message buffer headroom reserves space for the worst-case
- * link-level device header (in case the message is sent off-node).
- *
- * Note: Headroom should be a multiple of 4 to ensure the TIPC header fields
- * are word aligned for quicker access
- */
-#define BUF_HEADROOM (LL_MAX_HEADER + 48)
-
struct tipc_skb_cb {
void *handle;
struct sk_buff *tail;
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index 3ad9fab1985f..1fd464764765 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -604,7 +604,7 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg,
link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
- nla_strlcpy(link_info.str, nla_data(link[TIPC_NLA_LINK_NAME]),
+ nla_strlcpy(link_info.str, link[TIPC_NLA_LINK_NAME],
TIPC_MAX_LINK_NAME);
return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO,
diff --git a/net/tipc/node.c b/net/tipc/node.c
index e01e2c71b5a1..23d4761842a0 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -1297,10 +1297,6 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id
rc = tipc_bcast_rcv(net, be->link, skb);
- /* Broadcast link reset may happen at reassembly failure */
- if (rc & TIPC_LINK_DOWN_EVT)
- tipc_node_reset_links(n);
-
/* Broadcast ACKs are sent on a unicast link */
if (rc & TIPC_LINK_SND_BC_ACK) {
tipc_node_read_lock(n);
@@ -1320,6 +1316,17 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id
spin_unlock_bh(&be->inputq2.lock);
tipc_sk_mcast_rcv(net, &be->arrvq, &be->inputq2);
}
+
+ if (rc & TIPC_LINK_DOWN_EVT) {
+ /* Reception reassembly failure => reset all links to peer */
+ if (!tipc_link_is_up(be->link))
+ tipc_node_reset_links(n);
+
+ /* Retransmission failure => reset all links to all peers */
+ if (!tipc_link_is_up(tipc_bc_sndlink(net)))
+ tipc_bearer_reset_all(net);
+ }
+
tipc_node_put(n);
}
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 88bfcd707064..c49b8df438cb 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -796,9 +796,11 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
* @tsk: receiving socket
* @skb: pointer to message buffer.
*/
-static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb)
+static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
+ struct sk_buff_head *xmitq)
{
struct sock *sk = &tsk->sk;
+ u32 onode = tsk_own_node(tsk);
struct tipc_msg *hdr = buf_msg(skb);
int mtyp = msg_type(hdr);
bool conn_cong;
@@ -811,7 +813,8 @@ static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb)
if (mtyp == CONN_PROBE) {
msg_set_type(hdr, CONN_PROBE_REPLY);
- tipc_sk_respond(sk, skb, TIPC_OK);
+ if (tipc_msg_reverse(onode, &skb, TIPC_OK))
+ __skb_queue_tail(xmitq, skb);
return;
} else if (mtyp == CONN_ACK) {
conn_cong = tsk_conn_cong(tsk);
@@ -1686,7 +1689,8 @@ static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
*
* Returns true if message was added to socket receive queue, otherwise false
*/
-static bool filter_rcv(struct sock *sk, struct sk_buff *skb)
+static bool filter_rcv(struct sock *sk, struct sk_buff *skb,
+ struct sk_buff_head *xmitq)
{
struct socket *sock = sk->sk_socket;
struct tipc_sock *tsk = tipc_sk(sk);
@@ -1696,7 +1700,7 @@ static bool filter_rcv(struct sock *sk, struct sk_buff *skb)
int usr = msg_user(hdr);
if (unlikely(msg_user(hdr) == CONN_MANAGER)) {
- tipc_sk_proto_rcv(tsk, skb);
+ tipc_sk_proto_rcv(tsk, skb, xmitq);
return false;
}
@@ -1739,7 +1743,8 @@ static bool filter_rcv(struct sock *sk, struct sk_buff *skb)
return true;
reject:
- tipc_sk_respond(sk, skb, err);
+ if (tipc_msg_reverse(tsk_own_node(tsk), &skb, err))
+ __skb_queue_tail(xmitq, skb);
return false;
}
@@ -1755,9 +1760,24 @@ reject:
static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
unsigned int truesize = skb->truesize;
+ struct sk_buff_head xmitq;
+ u32 dnode, selector;
- if (likely(filter_rcv(sk, skb)))
+ __skb_queue_head_init(&xmitq);
+
+ if (likely(filter_rcv(sk, skb, &xmitq))) {
atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt);
+ return 0;
+ }
+
+ if (skb_queue_empty(&xmitq))
+ return 0;
+
+ /* Send response/rejected message */
+ skb = __skb_dequeue(&xmitq);
+ dnode = msg_destnode(buf_msg(skb));
+ selector = msg_origport(buf_msg(skb));
+ tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
return 0;
}
@@ -1771,12 +1791,13 @@ static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
* Caller must hold socket lock
*/
static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
- u32 dport)
+ u32 dport, struct sk_buff_head *xmitq)
{
+ unsigned long time_limit = jiffies + 2;
+ struct sk_buff *skb;
unsigned int lim;
atomic_t *dcnt;
- struct sk_buff *skb;
- unsigned long time_limit = jiffies + 2;
+ u32 onode;
while (skb_queue_len(inputq)) {
if (unlikely(time_after_eq(jiffies, time_limit)))
@@ -1788,7 +1809,7 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
/* Add message directly to receive queue if possible */
if (!sock_owned_by_user(sk)) {
- filter_rcv(sk, skb);
+ filter_rcv(sk, skb, xmitq);
continue;
}
@@ -1801,7 +1822,9 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
continue;
/* Overload => reject message back to sender */
- tipc_sk_respond(sk, skb, TIPC_ERR_OVERLOAD);
+ onode = tipc_own_addr(sock_net(sk));
+ if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD))
+ __skb_queue_tail(xmitq, skb);
break;
}
}
@@ -1814,12 +1837,14 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
*/
void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
{
+ struct sk_buff_head xmitq;
u32 dnode, dport = 0;
int err;
struct tipc_sock *tsk;
struct sock *sk;
struct sk_buff *skb;
+ __skb_queue_head_init(&xmitq);
while (skb_queue_len(inputq)) {
dport = tipc_skb_peek_port(inputq, dport);
tsk = tipc_sk_lookup(net, dport);
@@ -1827,9 +1852,14 @@ void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
if (likely(tsk)) {
sk = &tsk->sk;
if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
- tipc_sk_enqueue(inputq, sk, dport);
+ tipc_sk_enqueue(inputq, sk, dport, &xmitq);
spin_unlock_bh(&sk->sk_lock.slock);
}
+ /* Send pending response/rejected messages, if any */
+ while ((skb = __skb_dequeue(&xmitq))) {
+ dnode = msg_destnode(buf_msg(skb));
+ tipc_node_xmit_skb(net, skb, dnode, dport);
+ }
sock_put(sk);
continue;
}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 80aa6a3e6817..735362c26c8e 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -315,7 +315,7 @@ static struct sock *unix_find_socket_byinode(struct inode *i)
&unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
struct dentry *dentry = unix_sk(s)->path.dentry;
- if (dentry && d_backing_inode(dentry) == i) {
+ if (dentry && d_real_inode(dentry) == i) {
sock_hold(s);
goto found;
}
@@ -911,7 +911,7 @@ static struct sock *unix_find_other(struct net *net,
err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
if (err)
goto fail;
- inode = d_backing_inode(path.dentry);
+ inode = d_real_inode(path.dentry);
err = inode_permission(inode, MAY_WRITE);
if (err)
goto put_fail;
@@ -1048,7 +1048,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
goto out_up;
}
addr->hash = UNIX_HASH_SIZE;
- hash = d_backing_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
+ hash = d_real_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
spin_lock(&unix_table_lock);
u->path = u_path;
list = &unix_socket_table[hash];
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index b5f1221f48d4..b96ac918e0ba 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -61,6 +61,14 @@
* function will also cleanup rejected sockets, those that reach the connected
* state but leave it before they have been accepted.
*
+ * - Lock ordering for pending or accept queue sockets is:
+ *
+ * lock_sock(listener);
+ * lock_sock_nested(pending, SINGLE_DEPTH_NESTING);
+ *
+ * Using explicit nested locking keeps lockdep happy since normally only one
+ * lock of a given class may be taken at a time.
+ *
* - Sockets created by user action will be cleaned up when the user process
* calls close(2), causing our release implementation to be called. Our release
* implementation will perform some cleanup then drop the last reference so our
@@ -443,7 +451,7 @@ void vsock_pending_work(struct work_struct *work)
cleanup = true;
lock_sock(listener);
- lock_sock(sk);
+ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
if (vsock_is_pending(sk)) {
vsock_remove_pending(listener, sk);
@@ -1292,7 +1300,7 @@ static int vsock_accept(struct socket *sock, struct socket *newsock, int flags)
if (connected) {
listener->sk_ack_backlog--;
- lock_sock(connected);
+ lock_sock_nested(connected, SINGLE_DEPTH_NESTING);
vconnected = vsock_sk(connected);
/* If the listener socket has received an error, then we should
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index d7599014055d..7d72283901a3 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -3487,16 +3487,16 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
params.smps_mode = NL80211_SMPS_OFF;
}
+ params.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]);
+ if (params.pbss && !rdev->wiphy.bands[NL80211_BAND_60GHZ])
+ return -EOPNOTSUPP;
+
if (info->attrs[NL80211_ATTR_ACL_POLICY]) {
params.acl = parse_acl_data(&rdev->wiphy, info);
if (IS_ERR(params.acl))
return PTR_ERR(params.acl);
}
- params.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]);
- if (params.pbss && !rdev->wiphy.bands[NL80211_BAND_60GHZ])
- return -EOPNOTSUPP;
-
wdev_lock(wdev);
err = rdev_start_ap(rdev, dev, &params);
if (!err) {
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 4e809e978b7d..b7d1592bd5b8 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -509,7 +509,7 @@ static int __ieee80211_data_to_8023(struct sk_buff *skb, struct ethhdr *ehdr,
* replace EtherType */
hdrlen += ETH_ALEN + 2;
else
- tmp.h_proto = htons(skb->len);
+ tmp.h_proto = htons(skb->len - hdrlen);
pskb_pull(skb, hdrlen);
@@ -721,6 +721,8 @@ __ieee80211_amsdu_copy(struct sk_buff *skb, unsigned int hlen,
* alignment since sizeof(struct ethhdr) is 14.
*/
frame = dev_alloc_skb(hlen + sizeof(struct ethhdr) + 2 + cur_len);
+ if (!frame)
+ return NULL;
skb_reserve(frame, hlen + sizeof(struct ethhdr) + 2);
skb_copy_bits(skb, offset, skb_put(frame, cur_len), cur_len);