aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/Kconfig52
-rw-r--r--net/ipv4/af_inet.c28
-rw-r--r--net/ipv4/arp.c13
-rw-r--r--net/ipv4/devinet.c12
-rw-r--r--net/ipv4/fib_frontend.c2
-rw-r--r--net/ipv4/fib_semantics.c5
-rw-r--r--net/ipv4/icmp.c2
-rw-r--r--net/ipv4/inet_connection_sock.c42
-rw-r--r--net/ipv4/inet_fragment.c1
-rw-r--r--net/ipv4/inet_hashtables.c12
-rw-r--r--net/ipv4/ip_gre.c136
-rw-r--r--net/ipv4/ip_output.c6
-rw-r--r--net/ipv4/ipconfig.c8
-rw-r--r--net/ipv4/ipip.c7
-rw-r--r--net/ipv4/ipmr.c464
-rw-r--r--net/ipv4/netfilter/Kconfig30
-rw-r--r--net/ipv4/netfilter/Makefile2
-rw-r--r--net/ipv4/netfilter/arp_tables.c155
-rw-r--r--net/ipv4/netfilter/arptable_filter.c2
-rw-r--r--net/ipv4/netfilter/ip_queue.c2
-rw-r--r--net/ipv4/netfilter/ip_tables.c138
-rw-r--r--net/ipv4/netfilter/ipt_LOG.c2
-rw-r--r--net/ipv4/netfilter/ipt_TTL.c97
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c2
-rw-r--r--net/ipv4/netfilter/ipt_ttl.c63
-rw-r--r--net/ipv4/netfilter/iptable_filter.c1
-rw-r--r--net/ipv4/netfilter/iptable_mangle.c1
-rw-r--r--net/ipv4/netfilter/iptable_raw.c1
-rw-r--r--net/ipv4/netfilter/iptable_security.c1
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c10
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c63
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c6
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_rule.c1
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c4
-rw-r--r--net/ipv4/proc.c4
-rw-r--r--net/ipv4/raw.c1
-rw-r--r--net/ipv4/route.c15
-rw-r--r--net/ipv4/tcp.c89
-rw-r--r--net/ipv4/tcp_bic.c11
-rw-r--r--net/ipv4/tcp_cong.c21
-rw-r--r--net/ipv4/tcp_cubic.c11
-rw-r--r--net/ipv4/tcp_htcp.c3
-rw-r--r--net/ipv4/tcp_input.c198
-rw-r--r--net/ipv4/tcp_ipv4.c11
-rw-r--r--net/ipv4/tcp_minisocks.c9
-rw-r--r--net/ipv4/tcp_output.c94
-rw-r--r--net/ipv4/tcp_probe.c5
-rw-r--r--net/ipv4/tcp_scalable.c10
-rw-r--r--net/ipv4/tcp_timer.c23
-rw-r--r--net/ipv4/tcp_veno.c7
-rw-r--r--net/ipv4/tcp_yeah.c9
-rw-r--r--net/ipv4/udp.c15
-rw-r--r--net/ipv4/xfrm4_policy.c2
54 files changed, 1047 insertions, 864 deletions
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 691268f3a359..b2cf91e4ccaa 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -35,7 +35,7 @@ config IP_ADVANCED_ROUTER
at boot time after the /proc file system has been mounted.
- If you turn on IP forwarding, you will also get the rp_filter, which
+ If you turn on IP forwarding, you should consider the rp_filter, which
automatically rejects incoming packets if the routing table entry
for their source address doesn't match the network interface they're
arriving on. This has security advantages because it prevents the
@@ -46,12 +46,16 @@ config IP_ADVANCED_ROUTER
rp_filter on use:
echo 1 > /proc/sys/net/ipv4/conf/<device>/rp_filter
- or
+ and
echo 1 > /proc/sys/net/ipv4/conf/all/rp_filter
+ Note that some distributions enable it in startup scripts.
+ For details about rp_filter strict and loose mode read
+ <file:Documentation/networking/ip-sysctl.txt>.
+
If unsure, say N here.
-choice
+choice
prompt "Choose IP: FIB lookup algorithm (choose FIB_HASH if unsure)"
depends on IP_ADVANCED_ROUTER
default ASK_IP_FIB_HASH
@@ -59,27 +63,29 @@ choice
config ASK_IP_FIB_HASH
bool "FIB_HASH"
---help---
- Current FIB is very proven and good enough for most users.
+ Current FIB is very proven and good enough for most users.
config IP_FIB_TRIE
bool "FIB_TRIE"
---help---
- Use new experimental LC-trie as FIB lookup algorithm.
- This improves lookup performance if you have a large
- number of routes.
-
- LC-trie is a longest matching prefix lookup algorithm which
- performs better than FIB_HASH for large routing tables.
- But, it consumes more memory and is more complex.
-
- LC-trie is described in:
-
- IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
- IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999
- An experimental study of compression methods for dynamic tries
- Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
- http://www.nada.kth.se/~snilsson/public/papers/dyntrie2/
-
+ Use new experimental LC-trie as FIB lookup algorithm.
+ This improves lookup performance if you have a large
+ number of routes.
+
+ LC-trie is a longest matching prefix lookup algorithm which
+ performs better than FIB_HASH for large routing tables.
+ But, it consumes more memory and is more complex.
+
+ LC-trie is described in:
+
+ IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
+ IEEE Journal on Selected Areas in Communications, 17(6):1083-1092,
+ June 1999
+
+ An experimental study of compression methods for dynamic tries
+ Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
+ http://www.nada.kth.se/~snilsson/public/papers/dyntrie2/
+
endchoice
config IP_FIB_HASH
@@ -191,7 +197,7 @@ config IP_PNP_RARP
<file:Documentation/filesystems/nfsroot.txt> for details.
# not yet ready..
-# bool ' IP: ARP support' CONFIG_IP_PNP_ARP
+# bool ' IP: ARP support' CONFIG_IP_PNP_ARP
config NET_IPIP
tristate "IP: tunneling"
select INET_TUNNEL
@@ -361,7 +367,7 @@ config INET_IPCOMP
---help---
Support for IP Payload Compression Protocol (IPComp) (RFC3173),
typically needed for IPsec.
-
+
If unsure, say Y.
config INET_XFRM_TUNNEL
@@ -415,7 +421,7 @@ config INET_DIAG
Support for INET (TCP, DCCP, etc) socket monitoring interface used by
native Linux tools such as ss. ss is included in iproute2, currently
downloadable at <http://linux-net.osdl.org/index.php/Iproute2>.
-
+
If unsure, say Y.
config INET_TCP_DIAG
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 743f5542d65a..d5aaabbb7cb3 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -369,7 +369,6 @@ lookup_protocol:
sock_init_data(sock, sk);
sk->sk_destruct = inet_sock_destruct;
- sk->sk_family = PF_INET;
sk->sk_protocol = protocol;
sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
@@ -1253,10 +1252,10 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
int proto;
int id;
- if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
+ iph = skb_gro_header(skb, sizeof(*iph));
+ if (unlikely(!iph))
goto out;
- iph = ip_hdr(skb);
proto = iph->protocol & (MAX_INET_PROTOS - 1);
rcu_read_lock();
@@ -1264,13 +1263,13 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
if (!ops || !ops->gro_receive)
goto out_unlock;
- if (iph->version != 4 || iph->ihl != 5)
+ if (*(u8 *)iph != 0x45)
goto out_unlock;
if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
goto out_unlock;
- flush = ntohs(iph->tot_len) != skb->len ||
+ flush = ntohs(iph->tot_len) != skb_gro_len(skb) ||
iph->frag_off != htons(IP_DF);
id = ntohs(iph->id);
@@ -1282,24 +1281,25 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
iph2 = ip_hdr(p);
- if (iph->protocol != iph2->protocol ||
- iph->tos != iph2->tos ||
- memcmp(&iph->saddr, &iph2->saddr, 8)) {
+ if ((iph->protocol ^ iph2->protocol) |
+ (iph->tos ^ iph2->tos) |
+ (iph->saddr ^ iph2->saddr) |
+ (iph->daddr ^ iph2->daddr)) {
NAPI_GRO_CB(p)->same_flow = 0;
continue;
}
/* All fields must match except length and checksum. */
NAPI_GRO_CB(p)->flush |=
- memcmp(&iph->frag_off, &iph2->frag_off, 4) ||
- (u16)(ntohs(iph2->id) + NAPI_GRO_CB(p)->count) != id;
+ (iph->ttl ^ iph2->ttl) |
+ ((u16)(ntohs(iph2->id) + NAPI_GRO_CB(p)->count) ^ id);
NAPI_GRO_CB(p)->flush |= flush;
}
NAPI_GRO_CB(skb)->flush |= flush;
- __skb_pull(skb, sizeof(*iph));
- skb_reset_transport_header(skb);
+ skb_gro_pull(skb, sizeof(*iph));
+ skb_set_transport_header(skb, skb_gro_offset(skb));
pp = ops->gro_receive(head, skb);
@@ -1500,8 +1500,8 @@ static int ipv4_proc_init(void);
* IP protocol layer initialiser
*/
-static struct packet_type ip_packet_type = {
- .type = __constant_htons(ETH_P_IP),
+static struct packet_type ip_packet_type __read_mostly = {
+ .type = cpu_to_be16(ETH_P_IP),
.func = ip_rcv,
.gso_send_check = inet_gso_send_check,
.gso_segment = inet_gso_segment,
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 29a74c01d8de..f11931c18381 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -801,8 +801,11 @@ static int arp_process(struct sk_buff *skb)
* cache.
*/
- /* Special case: IPv4 duplicate address detection packet (RFC2131) */
- if (sip == 0) {
+ /*
+ * Special case: IPv4 duplicate address detection packet (RFC2131)
+ * and Gratuitous ARP/ARP Announce. (RFC3927, Section 2.4)
+ */
+ if (sip == 0 || tip == sip) {
if (arp->ar_op == htons(ARPOP_REQUEST) &&
inet_addr_type(net, tip) == RTN_LOCAL &&
!arp_ignore(in_dev, sip, tip))
@@ -892,7 +895,7 @@ static int arp_process(struct sk_buff *skb)
out:
if (in_dev)
in_dev_put(in_dev);
- kfree_skb(skb);
+ consume_skb(skb);
return 0;
}
@@ -1225,8 +1228,8 @@ void arp_ifdown(struct net_device *dev)
* Called once on startup.
*/
-static struct packet_type arp_packet_type = {
- .type = __constant_htons(ETH_P_ARP),
+static struct packet_type arp_packet_type __read_mostly = {
+ .type = cpu_to_be16(ETH_P_ARP),
.func = arp_rcv,
};
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 309997edc8a5..126bb911880f 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1075,6 +1075,14 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
}
}
ip_mc_up(in_dev);
+ /* fall through */
+ case NETDEV_CHANGEADDR:
+ if (IN_DEV_ARP_NOTIFY(in_dev))
+ arp_send(ARPOP_REQUEST, ETH_P_ARP,
+ in_dev->ifa_list->ifa_address,
+ dev,
+ in_dev->ifa_list->ifa_address,
+ NULL, dev->dev_addr, NULL);
break;
case NETDEV_DOWN:
ip_mc_down(in_dev);
@@ -1208,7 +1216,8 @@ static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
kfree_skb(skb);
goto errout;
}
- err = rtnl_notify(skb, net, pid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL);
+ rtnl_notify(skb, net, pid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL);
+ return;
errout:
if (err < 0)
rtnl_set_sk_err(net, RTNLGRP_IPV4_IFADDR, err);
@@ -1439,6 +1448,7 @@ static struct devinet_sysctl_table {
DEVINET_SYSCTL_RW_ENTRY(ARP_ANNOUNCE, "arp_announce"),
DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"),
DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"),
+ DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"),
DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"),
DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"),
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 741e4fa3e474..cafcc49d0993 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -275,7 +275,7 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
fib_res_put(&res);
if (no_addr)
goto last_resort;
- if (rpf)
+ if (rpf == 1)
goto e_inval;
fl.oif = dev->ifindex;
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 4817dea3bc73..f831df500907 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -322,8 +322,9 @@ void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
kfree_skb(skb);
goto errout;
}
- err = rtnl_notify(skb, info->nl_net, info->pid, RTNLGRP_IPV4_ROUTE,
- info->nlh, GFP_KERNEL);
+ rtnl_notify(skb, info->nl_net, info->pid, RTNLGRP_IPV4_ROUTE,
+ info->nlh, GFP_KERNEL);
+ return;
errout:
if (err < 0)
rtnl_set_sk_err(info->nl_net, RTNLGRP_IPV4_ROUTE, err);
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index fc562d29cc46..3f50807237e0 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -375,6 +375,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
inet->tos = ip_hdr(skb)->tos;
daddr = ipc.addr = rt->rt_src;
ipc.opt = NULL;
+ ipc.shtx.flags = 0;
if (icmp_param->replyopts.optlen) {
ipc.opt = &icmp_param->replyopts;
if (ipc.opt->srr)
@@ -532,6 +533,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
inet_sk(sk)->tos = tos;
ipc.addr = iph->saddr;
ipc.opt = &icmp_param.replyopts;
+ ipc.shtx.flags = 0;
{
struct flowi fl = {
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index f26ab38680de..22cd19ee44e5 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -93,24 +93,40 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
struct inet_bind_hashbucket *head;
struct hlist_node *node;
struct inet_bind_bucket *tb;
- int ret;
+ int ret, attempts = 5;
struct net *net = sock_net(sk);
+ int smallest_size = -1, smallest_rover;
local_bh_disable();
if (!snum) {
int remaining, rover, low, high;
+again:
inet_get_local_port_range(&low, &high);
remaining = (high - low) + 1;
- rover = net_random() % remaining + low;
+ smallest_rover = rover = net_random() % remaining + low;
+ smallest_size = -1;
do {
head = &hashinfo->bhash[inet_bhashfn(net, rover,
hashinfo->bhash_size)];
spin_lock(&head->lock);
inet_bind_bucket_for_each(tb, node, &head->chain)
- if (ib_net(tb) == net && tb->port == rover)
+ if (ib_net(tb) == net && tb->port == rover) {
+ if (tb->fastreuse > 0 &&
+ sk->sk_reuse &&
+ sk->sk_state != TCP_LISTEN &&
+ (tb->num_owners < smallest_size || smallest_size == -1)) {
+ smallest_size = tb->num_owners;
+ smallest_rover = rover;
+ if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) {
+ spin_unlock(&head->lock);
+ snum = smallest_rover;
+ goto have_snum;
+ }
+ }
goto next;
+ }
break;
next:
spin_unlock(&head->lock);
@@ -125,14 +141,19 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
* the top level, not from the 'break;' statement.
*/
ret = 1;
- if (remaining <= 0)
+ if (remaining <= 0) {
+ if (smallest_size != -1) {
+ snum = smallest_rover;
+ goto have_snum;
+ }
goto fail;
-
+ }
/* OK, here is the one we will use. HEAD is
* non-NULL and we hold it's mutex.
*/
snum = rover;
} else {
+have_snum:
head = &hashinfo->bhash[inet_bhashfn(net, snum,
hashinfo->bhash_size)];
spin_lock(&head->lock);
@@ -145,12 +166,19 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
tb_found:
if (!hlist_empty(&tb->owners)) {
if (tb->fastreuse > 0 &&
- sk->sk_reuse && sk->sk_state != TCP_LISTEN) {
+ sk->sk_reuse && sk->sk_state != TCP_LISTEN &&
+ smallest_size == -1) {
goto success;
} else {
ret = 1;
- if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb))
+ if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
+ if (sk->sk_reuse && sk->sk_state != TCP_LISTEN &&
+ smallest_size != -1 && --attempts >= 0) {
+ spin_unlock(&head->lock);
+ goto again;
+ }
goto fail_unlock;
+ }
}
}
tb_not_found:
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 6c52e08f786e..eaf3e2c8646a 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -267,6 +267,7 @@ static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
struct inet_frags *f, void *key, unsigned int hash)
+ __releases(&f->lock)
{
struct inet_frag_queue *q;
struct hlist_node *n;
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 6a1045da48d2..625cc5f64c94 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -38,6 +38,7 @@ struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
write_pnet(&tb->ib_net, hold_net(net));
tb->port = snum;
tb->fastreuse = 0;
+ tb->num_owners = 0;
INIT_HLIST_HEAD(&tb->owners);
hlist_add_head(&tb->node, &head->chain);
}
@@ -59,8 +60,13 @@ void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket
void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
const unsigned short snum)
{
+ struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
+
+ atomic_inc(&hashinfo->bsockets);
+
inet_sk(sk)->num = snum;
sk_add_bind_node(sk, &tb->owners);
+ tb->num_owners++;
inet_csk(sk)->icsk_bind_hash = tb;
}
@@ -75,9 +81,12 @@ static void __inet_put_port(struct sock *sk)
struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash];
struct inet_bind_bucket *tb;
+ atomic_dec(&hashinfo->bsockets);
+
spin_lock(&head->lock);
tb = inet_csk(sk)->icsk_bind_hash;
__sk_del_bind_node(sk);
+ tb->num_owners--;
inet_csk(sk)->icsk_bind_hash = NULL;
inet_sk(sk)->num = 0;
inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
@@ -444,9 +453,9 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
*/
inet_bind_bucket_for_each(tb, node, &head->chain) {
if (ib_net(tb) == net && tb->port == port) {
- WARN_ON(hlist_empty(&tb->owners));
if (tb->fastreuse >= 0)
goto next_port;
+ WARN_ON(hlist_empty(&tb->owners));
if (!check_established(death_row, sk,
port, &tw))
goto ok;
@@ -523,6 +532,7 @@ void inet_hashinfo_init(struct inet_hashinfo *h)
{
int i;
+ atomic_set(&h->bsockets, 0);
for (i = 0; i < INET_LHTABLE_SIZE; i++) {
spin_lock_init(&h->listening_hash[i].lock);
INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].head,
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 0101521f366b..e62510d5ea5a 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -164,67 +164,124 @@ static DEFINE_RWLOCK(ipgre_lock);
/* Given src, dst and key, find appropriate for input tunnel. */
-static struct ip_tunnel * ipgre_tunnel_lookup(struct net *net,
+static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev,
__be32 remote, __be32 local,
__be32 key, __be16 gre_proto)
{
+ struct net *net = dev_net(dev);
+ int link = dev->ifindex;
unsigned h0 = HASH(remote);
unsigned h1 = HASH(key);
- struct ip_tunnel *t;
- struct ip_tunnel *t2 = NULL;
+ struct ip_tunnel *t, *cand = NULL;
struct ipgre_net *ign = net_generic(net, ipgre_net_id);
int dev_type = (gre_proto == htons(ETH_P_TEB)) ?
ARPHRD_ETHER : ARPHRD_IPGRE;
+ int score, cand_score = 4;
for (t = ign->tunnels_r_l[h0^h1]; t; t = t->next) {
- if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr) {
- if (t->parms.i_key == key && t->dev->flags & IFF_UP) {
- if (t->dev->type == dev_type)
- return t;
- if (t->dev->type == ARPHRD_IPGRE && !t2)
- t2 = t;
- }
+ if (local != t->parms.iph.saddr ||
+ remote != t->parms.iph.daddr ||
+ key != t->parms.i_key ||
+ !(t->dev->flags & IFF_UP))
+ continue;
+
+ if (t->dev->type != ARPHRD_IPGRE &&
+ t->dev->type != dev_type)
+ continue;
+
+ score = 0;
+ if (t->parms.link != link)
+ score |= 1;
+ if (t->dev->type != dev_type)
+ score |= 2;
+ if (score == 0)
+ return t;
+
+ if (score < cand_score) {
+ cand = t;
+ cand_score = score;
}
}
for (t = ign->tunnels_r[h0^h1]; t; t = t->next) {
- if (remote == t->parms.iph.daddr) {
- if (t->parms.i_key == key && t->dev->flags & IFF_UP) {
- if (t->dev->type == dev_type)
- return t;
- if (t->dev->type == ARPHRD_IPGRE && !t2)
- t2 = t;
- }
+ if (remote != t->parms.iph.daddr ||
+ key != t->parms.i_key ||
+ !(t->dev->flags & IFF_UP))
+ continue;
+
+ if (t->dev->type != ARPHRD_IPGRE &&
+ t->dev->type != dev_type)
+ continue;
+
+ score = 0;
+ if (t->parms.link != link)
+ score |= 1;
+ if (t->dev->type != dev_type)
+ score |= 2;
+ if (score == 0)
+ return t;
+
+ if (score < cand_score) {
+ cand = t;
+ cand_score = score;
}
}
for (t = ign->tunnels_l[h1]; t; t = t->next) {
- if (local == t->parms.iph.saddr ||
- (local == t->parms.iph.daddr &&
- ipv4_is_multicast(local))) {
- if (t->parms.i_key == key && t->dev->flags & IFF_UP) {
- if (t->dev->type == dev_type)
- return t;
- if (t->dev->type == ARPHRD_IPGRE && !t2)
- t2 = t;
- }
+ if ((local != t->parms.iph.saddr &&
+ (local != t->parms.iph.daddr ||
+ !ipv4_is_multicast(local))) ||
+ key != t->parms.i_key ||
+ !(t->dev->flags & IFF_UP))
+ continue;
+
+ if (t->dev->type != ARPHRD_IPGRE &&
+ t->dev->type != dev_type)
+ continue;
+
+ score = 0;
+ if (t->parms.link != link)
+ score |= 1;
+ if (t->dev->type != dev_type)
+ score |= 2;
+ if (score == 0)
+ return t;
+
+ if (score < cand_score) {
+ cand = t;
+ cand_score = score;
}
}
for (t = ign->tunnels_wc[h1]; t; t = t->next) {
- if (t->parms.i_key == key && t->dev->flags & IFF_UP) {
- if (t->dev->type == dev_type)
- return t;
- if (t->dev->type == ARPHRD_IPGRE && !t2)
- t2 = t;
+ if (t->parms.i_key != key ||
+ !(t->dev->flags & IFF_UP))
+ continue;
+
+ if (t->dev->type != ARPHRD_IPGRE &&
+ t->dev->type != dev_type)
+ continue;
+
+ score = 0;
+ if (t->parms.link != link)
+ score |= 1;
+ if (t->dev->type != dev_type)
+ score |= 2;
+ if (score == 0)
+ return t;
+
+ if (score < cand_score) {
+ cand = t;
+ cand_score = score;
}
}
- if (t2)
- return t2;
+ if (cand != NULL)
+ return cand;
- if (ign->fb_tunnel_dev->flags&IFF_UP)
+ if (ign->fb_tunnel_dev->flags & IFF_UP)
return netdev_priv(ign->fb_tunnel_dev);
+
return NULL;
}
@@ -284,6 +341,7 @@ static struct ip_tunnel *ipgre_tunnel_find(struct net *net,
__be32 remote = parms->iph.daddr;
__be32 local = parms->iph.saddr;
__be32 key = parms->i_key;
+ int link = parms->link;
struct ip_tunnel *t, **tp;
struct ipgre_net *ign = net_generic(net, ipgre_net_id);
@@ -291,6 +349,7 @@ static struct ip_tunnel *ipgre_tunnel_find(struct net *net,
if (local == t->parms.iph.saddr &&
remote == t->parms.iph.daddr &&
key == t->parms.i_key &&
+ link == t->parms.link &&
type == t->dev->type)
break;
@@ -421,7 +480,7 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
}
read_lock(&ipgre_lock);
- t = ipgre_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr,
+ t = ipgre_tunnel_lookup(skb->dev, iph->daddr, iph->saddr,
flags & GRE_KEY ?
*(((__be32 *)p) + (grehlen / 4) - 1) : 0,
p[1]);
@@ -432,7 +491,7 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
goto out;
- if (jiffies - t->err_time < IPTUNNEL_ERR_TIMEO)
+ if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
t->err_count++;
else
t->err_count = 1;
@@ -518,7 +577,7 @@ static int ipgre_rcv(struct sk_buff *skb)
gre_proto = *(__be16 *)(h + 2);
read_lock(&ipgre_lock);
- if ((tunnel = ipgre_tunnel_lookup(dev_net(skb->dev),
+ if ((tunnel = ipgre_tunnel_lookup(skb->dev,
iph->saddr, iph->daddr, key,
gre_proto))) {
struct net_device_stats *stats = &tunnel->dev->stats;
@@ -744,7 +803,8 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
#endif
if (tunnel->err_count > 0) {
- if (jiffies - tunnel->err_time < IPTUNNEL_ERR_TIMEO) {
+ if (time_before(jiffies,
+ tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
tunnel->err_count--;
dst_link_failure(skb);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 8ebe86dd72af..3e7e910c7c0f 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -935,6 +935,10 @@ alloc_new_skb:
sk->sk_allocation);
if (unlikely(skb == NULL))
err = -ENOBUFS;
+ else
+ /* only the initial fragment is
+ time stamped */
+ ipc->shtx.flags = 0;
}
if (skb == NULL)
goto error;
@@ -945,6 +949,7 @@ alloc_new_skb:
skb->ip_summed = csummode;
skb->csum = 0;
skb_reserve(skb, hh_len);
+ *skb_tx(skb) = ipc->shtx;
/*
* Find where to start putting bytes.
@@ -1364,6 +1369,7 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar
daddr = ipc.addr = rt->rt_src;
ipc.opt = NULL;
+ ipc.shtx.flags = 0;
if (replyopts.opt.optlen) {
ipc.opt = &replyopts.opt;
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index d722013c1cae..90d22ae0a419 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -100,8 +100,8 @@
#define CONF_NAMESERVERS_MAX 3 /* Maximum number of nameservers
- '3' from resolv.h */
-#define NONE __constant_htonl(INADDR_NONE)
-#define ANY __constant_htonl(INADDR_ANY)
+#define NONE cpu_to_be32(INADDR_NONE)
+#define ANY cpu_to_be32(INADDR_ANY)
/*
* Public IP configuration
@@ -406,7 +406,7 @@ static int __init ic_defaults(void)
static int ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev);
static struct packet_type rarp_packet_type __initdata = {
- .type = __constant_htons(ETH_P_RARP),
+ .type = cpu_to_be16(ETH_P_RARP),
.func = ic_rarp_recv,
};
@@ -568,7 +568,7 @@ struct bootp_pkt { /* BOOTP packet format */
static int ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev);
static struct packet_type bootp_packet_type __initdata = {
- .type = __constant_htons(ETH_P_IP),
+ .type = cpu_to_be16(ETH_P_IP),
.func = ic_bootp_recv,
};
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 5079dfbc6f38..9054139795af 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -327,7 +327,7 @@ static int ipip_err(struct sk_buff *skb, u32 info)
if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
goto out;
- if (jiffies - t->err_time < IPTUNNEL_ERR_TIMEO)
+ if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
t->err_count++;
else
t->err_count = 1;
@@ -466,7 +466,8 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
}
if (tunnel->err_count > 0) {
- if (jiffies - tunnel->err_time < IPTUNNEL_ERR_TIMEO) {
+ if (time_before(jiffies,
+ tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
tunnel->err_count--;
dst_link_failure(skb);
} else
@@ -750,7 +751,7 @@ static struct xfrm_tunnel ipip_handler = {
.priority = 1,
};
-static char banner[] __initdata =
+static const char banner[] __initconst =
KERN_INFO "IPv4 over IPv4 tunneling driver\n";
static void ipip_destroy_tunnels(struct ipip_net *ipn)
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 14666449dc1c..13e9dd3012b3 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -67,9 +67,6 @@
#define CONFIG_IP_PIMSM 1
#endif
-static struct sock *mroute_socket;
-
-
/* Big lock, protecting vif table, mrt cache and mroute socket state.
Note that the changes are semaphored via rtnl_lock.
*/
@@ -80,18 +77,9 @@ static DEFINE_RWLOCK(mrt_lock);
* Multicast router control variables
*/
-static struct vif_device vif_table[MAXVIFS]; /* Devices */
-static int maxvif;
-
-#define VIF_EXISTS(idx) (vif_table[idx].dev != NULL)
-
-static int mroute_do_assert; /* Set in PIM assert */
-static int mroute_do_pim;
-
-static struct mfc_cache *mfc_cache_array[MFC_LINES]; /* Forwarding cache */
+#define VIF_EXISTS(_net, _idx) ((_net)->ipv4.vif_table[_idx].dev != NULL)
static struct mfc_cache *mfc_unres_queue; /* Queue of unresolved entries */
-static atomic_t cache_resolve_queue_len; /* Size of unresolved */
/* Special spinlock for queue of unresolved entries */
static DEFINE_SPINLOCK(mfc_unres_lock);
@@ -107,7 +95,8 @@ static DEFINE_SPINLOCK(mfc_unres_lock);
static struct kmem_cache *mrt_cachep __read_mostly;
static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local);
-static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert);
+static int ipmr_cache_report(struct net *net,
+ struct sk_buff *pkt, vifi_t vifi, int assert);
static int ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm);
#ifdef CONFIG_IP_PIMSM_V2
@@ -120,9 +109,11 @@ static struct timer_list ipmr_expire_timer;
static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
{
+ struct net *net = dev_net(dev);
+
dev_close(dev);
- dev = __dev_get_by_name(&init_net, "tunl0");
+ dev = __dev_get_by_name(net, "tunl0");
if (dev) {
const struct net_device_ops *ops = dev->netdev_ops;
struct ifreq ifr;
@@ -148,11 +139,11 @@ static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
}
static
-struct net_device *ipmr_new_tunnel(struct vifctl *v)
+struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
{
struct net_device *dev;
- dev = __dev_get_by_name(&init_net, "tunl0");
+ dev = __dev_get_by_name(net, "tunl0");
if (dev) {
const struct net_device_ops *ops = dev->netdev_ops;
@@ -181,7 +172,8 @@ struct net_device *ipmr_new_tunnel(struct vifctl *v)
dev = NULL;
- if (err == 0 && (dev = __dev_get_by_name(&init_net, p.name)) != NULL) {
+ if (err == 0 &&
+ (dev = __dev_get_by_name(net, p.name)) != NULL) {
dev->flags |= IFF_MULTICAST;
in_dev = __in_dev_get_rtnl(dev);
@@ -209,14 +201,15 @@ failure:
#ifdef CONFIG_IP_PIMSM
-static int reg_vif_num = -1;
-
static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
{
+ struct net *net = dev_net(dev);
+
read_lock(&mrt_lock);
dev->stats.tx_bytes += skb->len;
dev->stats.tx_packets++;
- ipmr_cache_report(skb, reg_vif_num, IGMPMSG_WHOLEPKT);
+ ipmr_cache_report(net, skb, net->ipv4.mroute_reg_vif_num,
+ IGMPMSG_WHOLEPKT);
read_unlock(&mrt_lock);
kfree_skb(skb);
return 0;
@@ -283,16 +276,16 @@ failure:
* @notify: Set to 1, if the caller is a notifier_call
*/
-static int vif_delete(int vifi, int notify)
+static int vif_delete(struct net *net, int vifi, int notify)
{
struct vif_device *v;
struct net_device *dev;
struct in_device *in_dev;
- if (vifi < 0 || vifi >= maxvif)
+ if (vifi < 0 || vifi >= net->ipv4.maxvif)
return -EADDRNOTAVAIL;
- v = &vif_table[vifi];
+ v = &net->ipv4.vif_table[vifi];
write_lock_bh(&mrt_lock);
dev = v->dev;
@@ -304,17 +297,17 @@ static int vif_delete(int vifi, int notify)
}
#ifdef CONFIG_IP_PIMSM
- if (vifi == reg_vif_num)
- reg_vif_num = -1;
+ if (vifi == net->ipv4.mroute_reg_vif_num)
+ net->ipv4.mroute_reg_vif_num = -1;
#endif
- if (vifi+1 == maxvif) {
+ if (vifi+1 == net->ipv4.maxvif) {
int tmp;
for (tmp=vifi-1; tmp>=0; tmp--) {
- if (VIF_EXISTS(tmp))
+ if (VIF_EXISTS(net, tmp))
break;
}
- maxvif = tmp+1;
+ net->ipv4.maxvif = tmp+1;
}
write_unlock_bh(&mrt_lock);
@@ -333,6 +326,12 @@ static int vif_delete(int vifi, int notify)
return 0;
}
+static inline void ipmr_cache_free(struct mfc_cache *c)
+{
+ release_net(mfc_net(c));
+ kmem_cache_free(mrt_cachep, c);
+}
+
/* Destroy an unresolved cache entry, killing queued skbs
and reporting error to netlink readers.
*/
@@ -341,8 +340,9 @@ static void ipmr_destroy_unres(struct mfc_cache *c)
{
struct sk_buff *skb;
struct nlmsgerr *e;
+ struct net *net = mfc_net(c);
- atomic_dec(&cache_resolve_queue_len);
+ atomic_dec(&net->ipv4.cache_resolve_queue_len);
while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
if (ip_hdr(skb)->version == 0) {
@@ -354,12 +354,12 @@ static void ipmr_destroy_unres(struct mfc_cache *c)
e->error = -ETIMEDOUT;
memset(&e->msg, 0, sizeof(e->msg));
- rtnl_unicast(skb, &init_net, NETLINK_CB(skb).pid);
+ rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
} else
kfree_skb(skb);
}
- kmem_cache_free(mrt_cachep, c);
+ ipmr_cache_free(c);
}
@@ -376,7 +376,7 @@ static void ipmr_expire_process(unsigned long dummy)
return;
}
- if (atomic_read(&cache_resolve_queue_len) == 0)
+ if (mfc_unres_queue == NULL)
goto out;
now = jiffies;
@@ -397,7 +397,7 @@ static void ipmr_expire_process(unsigned long dummy)
ipmr_destroy_unres(c);
}
- if (atomic_read(&cache_resolve_queue_len))
+ if (mfc_unres_queue != NULL)
mod_timer(&ipmr_expire_timer, jiffies + expires);
out:
@@ -409,13 +409,15 @@ out:
static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls)
{
int vifi;
+ struct net *net = mfc_net(cache);
cache->mfc_un.res.minvif = MAXVIFS;
cache->mfc_un.res.maxvif = 0;
memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
- for (vifi=0; vifi<maxvif; vifi++) {
- if (VIF_EXISTS(vifi) && ttls[vifi] && ttls[vifi] < 255) {
+ for (vifi = 0; vifi < net->ipv4.maxvif; vifi++) {
+ if (VIF_EXISTS(net, vifi) &&
+ ttls[vifi] && ttls[vifi] < 255) {
cache->mfc_un.res.ttls[vifi] = ttls[vifi];
if (cache->mfc_un.res.minvif > vifi)
cache->mfc_un.res.minvif = vifi;
@@ -425,16 +427,16 @@ static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls)
}
}
-static int vif_add(struct vifctl *vifc, int mrtsock)
+static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock)
{
int vifi = vifc->vifc_vifi;
- struct vif_device *v = &vif_table[vifi];
+ struct vif_device *v = &net->ipv4.vif_table[vifi];
struct net_device *dev;
struct in_device *in_dev;
int err;
/* Is vif busy ? */
- if (VIF_EXISTS(vifi))
+ if (VIF_EXISTS(net, vifi))
return -EADDRINUSE;
switch (vifc->vifc_flags) {
@@ -444,7 +446,7 @@ static int vif_add(struct vifctl *vifc, int mrtsock)
* Special Purpose VIF in PIM
* All the packets will be sent to the daemon
*/
- if (reg_vif_num >= 0)
+ if (net->ipv4.mroute_reg_vif_num >= 0)
return -EADDRINUSE;
dev = ipmr_reg_vif();
if (!dev)
@@ -458,7 +460,7 @@ static int vif_add(struct vifctl *vifc, int mrtsock)
break;
#endif
case VIFF_TUNNEL:
- dev = ipmr_new_tunnel(vifc);
+ dev = ipmr_new_tunnel(net, vifc);
if (!dev)
return -ENOBUFS;
err = dev_set_allmulti(dev, 1);
@@ -469,7 +471,7 @@ static int vif_add(struct vifctl *vifc, int mrtsock)
}
break;
case 0:
- dev = ip_dev_find(&init_net, vifc->vifc_lcl_addr.s_addr);
+ dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr);
if (!dev)
return -EADDRNOTAVAIL;
err = dev_set_allmulti(dev, 1);
@@ -510,20 +512,22 @@ static int vif_add(struct vifctl *vifc, int mrtsock)
v->dev = dev;
#ifdef CONFIG_IP_PIMSM
if (v->flags&VIFF_REGISTER)
- reg_vif_num = vifi;
+ net->ipv4.mroute_reg_vif_num = vifi;
#endif
- if (vifi+1 > maxvif)
- maxvif = vifi+1;
+ if (vifi+1 > net->ipv4.maxvif)
+ net->ipv4.maxvif = vifi+1;
write_unlock_bh(&mrt_lock);
return 0;
}
-static struct mfc_cache *ipmr_cache_find(__be32 origin, __be32 mcastgrp)
+static struct mfc_cache *ipmr_cache_find(struct net *net,
+ __be32 origin,
+ __be32 mcastgrp)
{
int line = MFC_HASH(mcastgrp, origin);
struct mfc_cache *c;
- for (c=mfc_cache_array[line]; c; c = c->next) {
+ for (c = net->ipv4.mfc_cache_array[line]; c; c = c->next) {
if (c->mfc_origin==origin && c->mfc_mcastgrp==mcastgrp)
break;
}
@@ -533,22 +537,24 @@ static struct mfc_cache *ipmr_cache_find(__be32 origin, __be32 mcastgrp)
/*
* Allocate a multicast cache entry
*/
-static struct mfc_cache *ipmr_cache_alloc(void)
+static struct mfc_cache *ipmr_cache_alloc(struct net *net)
{
struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
if (c == NULL)
return NULL;
c->mfc_un.res.minvif = MAXVIFS;
+ mfc_net_set(c, net);
return c;
}
-static struct mfc_cache *ipmr_cache_alloc_unres(void)
+static struct mfc_cache *ipmr_cache_alloc_unres(struct net *net)
{
struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
if (c == NULL)
return NULL;
skb_queue_head_init(&c->mfc_un.unres.unresolved);
c->mfc_un.unres.expires = jiffies + 10*HZ;
+ mfc_net_set(c, net);
return c;
}
@@ -581,7 +587,7 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
memset(&e->msg, 0, sizeof(e->msg));
}
- rtnl_unicast(skb, &init_net, NETLINK_CB(skb).pid);
+ rtnl_unicast(skb, mfc_net(c), NETLINK_CB(skb).pid);
} else
ip_mr_forward(skb, c, 0);
}
@@ -594,7 +600,8 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
* Called under mrt_lock.
*/
-static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
+static int ipmr_cache_report(struct net *net,
+ struct sk_buff *pkt, vifi_t vifi, int assert)
{
struct sk_buff *skb;
const int ihl = ip_hdrlen(pkt);
@@ -626,7 +633,7 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
msg->im_msgtype = IGMPMSG_WHOLEPKT;
msg->im_mbz = 0;
- msg->im_vif = reg_vif_num;
+ msg->im_vif = net->ipv4.mroute_reg_vif_num;
ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
sizeof(struct iphdr));
@@ -658,7 +665,7 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
skb->transport_header = skb->network_header;
}
- if (mroute_socket == NULL) {
+ if (net->ipv4.mroute_sk == NULL) {
kfree_skb(skb);
return -EINVAL;
}
@@ -666,7 +673,8 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
/*
* Deliver to mrouted
*/
- if ((ret = sock_queue_rcv_skb(mroute_socket, skb))<0) {
+ ret = sock_queue_rcv_skb(net->ipv4.mroute_sk, skb);
+ if (ret < 0) {
if (net_ratelimit())
printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n");
kfree_skb(skb);
@@ -680,7 +688,7 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
*/
static int
-ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
+ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb)
{
int err;
struct mfc_cache *c;
@@ -688,7 +696,8 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
spin_lock_bh(&mfc_unres_lock);
for (c=mfc_unres_queue; c; c=c->next) {
- if (c->mfc_mcastgrp == iph->daddr &&
+ if (net_eq(mfc_net(c), net) &&
+ c->mfc_mcastgrp == iph->daddr &&
c->mfc_origin == iph->saddr)
break;
}
@@ -698,8 +707,8 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
* Create a new entry if allowable
*/
- if (atomic_read(&cache_resolve_queue_len) >= 10 ||
- (c=ipmr_cache_alloc_unres())==NULL) {
+ if (atomic_read(&net->ipv4.cache_resolve_queue_len) >= 10 ||
+ (c = ipmr_cache_alloc_unres(net)) == NULL) {
spin_unlock_bh(&mfc_unres_lock);
kfree_skb(skb);
@@ -716,18 +725,19 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
/*
* Reflect first query at mrouted.
*/
- if ((err = ipmr_cache_report(skb, vifi, IGMPMSG_NOCACHE))<0) {
+ err = ipmr_cache_report(net, skb, vifi, IGMPMSG_NOCACHE);
+ if (err < 0) {
/* If the report failed throw the cache entry
out - Brad Parker
*/
spin_unlock_bh(&mfc_unres_lock);
- kmem_cache_free(mrt_cachep, c);
+ ipmr_cache_free(c);
kfree_skb(skb);
return err;
}
- atomic_inc(&cache_resolve_queue_len);
+ atomic_inc(&net->ipv4.cache_resolve_queue_len);
c->next = mfc_unres_queue;
mfc_unres_queue = c;
@@ -753,35 +763,37 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
* MFC cache manipulation by user space mroute daemon
*/
-static int ipmr_mfc_delete(struct mfcctl *mfc)
+static int ipmr_mfc_delete(struct net *net, struct mfcctl *mfc)
{
int line;
struct mfc_cache *c, **cp;
line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
- for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) {
+ for (cp = &net->ipv4.mfc_cache_array[line];
+ (c = *cp) != NULL; cp = &c->next) {
if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
write_lock_bh(&mrt_lock);
*cp = c->next;
write_unlock_bh(&mrt_lock);
- kmem_cache_free(mrt_cachep, c);
+ ipmr_cache_free(c);
return 0;
}
}
return -ENOENT;
}
-static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock)
+static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock)
{
int line;
struct mfc_cache *uc, *c, **cp;
line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
- for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) {
+ for (cp = &net->ipv4.mfc_cache_array[line];
+ (c = *cp) != NULL; cp = &c->next) {
if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr)
break;
@@ -800,7 +812,7 @@ static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock)
if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
return -EINVAL;
- c = ipmr_cache_alloc();
+ c = ipmr_cache_alloc(net);
if (c == NULL)
return -ENOMEM;
@@ -812,8 +824,8 @@ static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock)
c->mfc_flags |= MFC_STATIC;
write_lock_bh(&mrt_lock);
- c->next = mfc_cache_array[line];
- mfc_cache_array[line] = c;
+ c->next = net->ipv4.mfc_cache_array[line];
+ net->ipv4.mfc_cache_array[line] = c;
write_unlock_bh(&mrt_lock);
/*
@@ -823,19 +835,21 @@ static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock)
spin_lock_bh(&mfc_unres_lock);
for (cp = &mfc_unres_queue; (uc=*cp) != NULL;
cp = &uc->next) {
- if (uc->mfc_origin == c->mfc_origin &&
+ if (net_eq(mfc_net(uc), net) &&
+ uc->mfc_origin == c->mfc_origin &&
uc->mfc_mcastgrp == c->mfc_mcastgrp) {
*cp = uc->next;
- if (atomic_dec_and_test(&cache_resolve_queue_len))
- del_timer(&ipmr_expire_timer);
+ atomic_dec(&net->ipv4.cache_resolve_queue_len);
break;
}
}
+ if (mfc_unres_queue == NULL)
+ del_timer(&ipmr_expire_timer);
spin_unlock_bh(&mfc_unres_lock);
if (uc) {
ipmr_cache_resolve(uc, c);
- kmem_cache_free(mrt_cachep, uc);
+ ipmr_cache_free(uc);
}
return 0;
}
@@ -844,16 +858,16 @@ static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock)
* Close the multicast socket, and clear the vif tables etc
*/
-static void mroute_clean_tables(struct sock *sk)
+static void mroute_clean_tables(struct net *net)
{
int i;
/*
* Shut down all active vif entries
*/
- for (i=0; i<maxvif; i++) {
- if (!(vif_table[i].flags&VIFF_STATIC))
- vif_delete(i, 0);
+ for (i = 0; i < net->ipv4.maxvif; i++) {
+ if (!(net->ipv4.vif_table[i].flags&VIFF_STATIC))
+ vif_delete(net, i, 0);
}
/*
@@ -862,7 +876,7 @@ static void mroute_clean_tables(struct sock *sk)
for (i=0; i<MFC_LINES; i++) {
struct mfc_cache *c, **cp;
- cp = &mfc_cache_array[i];
+ cp = &net->ipv4.mfc_cache_array[i];
while ((c = *cp) != NULL) {
if (c->mfc_flags&MFC_STATIC) {
cp = &c->next;
@@ -872,22 +886,23 @@ static void mroute_clean_tables(struct sock *sk)
*cp = c->next;
write_unlock_bh(&mrt_lock);
- kmem_cache_free(mrt_cachep, c);
+ ipmr_cache_free(c);
}
}
- if (atomic_read(&cache_resolve_queue_len) != 0) {
- struct mfc_cache *c;
+ if (atomic_read(&net->ipv4.cache_resolve_queue_len) != 0) {
+ struct mfc_cache *c, **cp;
spin_lock_bh(&mfc_unres_lock);
- while (mfc_unres_queue != NULL) {
- c = mfc_unres_queue;
- mfc_unres_queue = c->next;
- spin_unlock_bh(&mfc_unres_lock);
+ cp = &mfc_unres_queue;
+ while ((c = *cp) != NULL) {
+ if (!net_eq(mfc_net(c), net)) {
+ cp = &c->next;
+ continue;
+ }
+ *cp = c->next;
ipmr_destroy_unres(c);
-
- spin_lock_bh(&mfc_unres_lock);
}
spin_unlock_bh(&mfc_unres_lock);
}
@@ -895,15 +910,17 @@ static void mroute_clean_tables(struct sock *sk)
static void mrtsock_destruct(struct sock *sk)
{
+ struct net *net = sock_net(sk);
+
rtnl_lock();
- if (sk == mroute_socket) {
- IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)--;
+ if (sk == net->ipv4.mroute_sk) {
+ IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
write_lock_bh(&mrt_lock);
- mroute_socket = NULL;
+ net->ipv4.mroute_sk = NULL;
write_unlock_bh(&mrt_lock);
- mroute_clean_tables(sk);
+ mroute_clean_tables(net);
}
rtnl_unlock();
}
@@ -920,9 +937,10 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int
int ret;
struct vifctl vif;
struct mfcctl mfc;
+ struct net *net = sock_net(sk);
if (optname != MRT_INIT) {
- if (sk != mroute_socket && !capable(CAP_NET_ADMIN))
+ if (sk != net->ipv4.mroute_sk && !capable(CAP_NET_ADMIN))
return -EACCES;
}
@@ -935,7 +953,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int
return -ENOPROTOOPT;
rtnl_lock();
- if (mroute_socket) {
+ if (net->ipv4.mroute_sk) {
rtnl_unlock();
return -EADDRINUSE;
}
@@ -943,15 +961,15 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int
ret = ip_ra_control(sk, 1, mrtsock_destruct);
if (ret == 0) {
write_lock_bh(&mrt_lock);
- mroute_socket = sk;
+ net->ipv4.mroute_sk = sk;
write_unlock_bh(&mrt_lock);
- IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)++;
+ IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
}
rtnl_unlock();
return ret;
case MRT_DONE:
- if (sk != mroute_socket)
+ if (sk != net->ipv4.mroute_sk)
return -EACCES;
return ip_ra_control(sk, 0, NULL);
case MRT_ADD_VIF:
@@ -964,9 +982,9 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int
return -ENFILE;
rtnl_lock();
if (optname == MRT_ADD_VIF) {
- ret = vif_add(&vif, sk==mroute_socket);
+ ret = vif_add(net, &vif, sk == net->ipv4.mroute_sk);
} else {
- ret = vif_delete(vif.vifc_vifi, 0);
+ ret = vif_delete(net, vif.vifc_vifi, 0);
}
rtnl_unlock();
return ret;
@@ -983,9 +1001,9 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int
return -EFAULT;
rtnl_lock();
if (optname == MRT_DEL_MFC)
- ret = ipmr_mfc_delete(&mfc);
+ ret = ipmr_mfc_delete(net, &mfc);
else
- ret = ipmr_mfc_add(&mfc, sk==mroute_socket);
+ ret = ipmr_mfc_add(net, &mfc, sk == net->ipv4.mroute_sk);
rtnl_unlock();
return ret;
/*
@@ -996,7 +1014,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int
int v;
if (get_user(v,(int __user *)optval))
return -EFAULT;
- mroute_do_assert=(v)?1:0;
+ net->ipv4.mroute_do_assert = (v) ? 1 : 0;
return 0;
}
#ifdef CONFIG_IP_PIMSM
@@ -1010,11 +1028,11 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int
rtnl_lock();
ret = 0;
- if (v != mroute_do_pim) {
- mroute_do_pim = v;
- mroute_do_assert = v;
+ if (v != net->ipv4.mroute_do_pim) {
+ net->ipv4.mroute_do_pim = v;
+ net->ipv4.mroute_do_assert = v;
#ifdef CONFIG_IP_PIMSM_V2
- if (mroute_do_pim)
+ if (net->ipv4.mroute_do_pim)
ret = inet_add_protocol(&pim_protocol,
IPPROTO_PIM);
else
@@ -1045,6 +1063,7 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int
{
int olr;
int val;
+ struct net *net = sock_net(sk);
if (optname != MRT_VERSION &&
#ifdef CONFIG_IP_PIMSM
@@ -1066,10 +1085,10 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int
val = 0x0305;
#ifdef CONFIG_IP_PIMSM
else if (optname == MRT_PIM)
- val = mroute_do_pim;
+ val = net->ipv4.mroute_do_pim;
#endif
else
- val = mroute_do_assert;
+ val = net->ipv4.mroute_do_assert;
if (copy_to_user(optval, &val, olr))
return -EFAULT;
return 0;
@@ -1085,16 +1104,17 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
struct sioc_vif_req vr;
struct vif_device *vif;
struct mfc_cache *c;
+ struct net *net = sock_net(sk);
switch (cmd) {
case SIOCGETVIFCNT:
if (copy_from_user(&vr, arg, sizeof(vr)))
return -EFAULT;
- if (vr.vifi >= maxvif)
+ if (vr.vifi >= net->ipv4.maxvif)
return -EINVAL;
read_lock(&mrt_lock);
- vif=&vif_table[vr.vifi];
- if (VIF_EXISTS(vr.vifi)) {
+ vif = &net->ipv4.vif_table[vr.vifi];
+ if (VIF_EXISTS(net, vr.vifi)) {
vr.icount = vif->pkt_in;
vr.ocount = vif->pkt_out;
vr.ibytes = vif->bytes_in;
@@ -1112,7 +1132,7 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
return -EFAULT;
read_lock(&mrt_lock);
- c = ipmr_cache_find(sr.src.s_addr, sr.grp.s_addr);
+ c = ipmr_cache_find(net, sr.src.s_addr, sr.grp.s_addr);
if (c) {
sr.pktcnt = c->mfc_un.res.pkt;
sr.bytecnt = c->mfc_un.res.bytes;
@@ -1134,18 +1154,19 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct net_device *dev = ptr;
+ struct net *net = dev_net(dev);
struct vif_device *v;
int ct;
- if (!net_eq(dev_net(dev), &init_net))
+ if (!net_eq(dev_net(dev), net))
return NOTIFY_DONE;
if (event != NETDEV_UNREGISTER)
return NOTIFY_DONE;
- v=&vif_table[0];
- for (ct=0; ct<maxvif; ct++,v++) {
+ v = &net->ipv4.vif_table[0];
+ for (ct = 0; ct < net->ipv4.maxvif; ct++, v++) {
if (v->dev == dev)
- vif_delete(ct, 1);
+ vif_delete(net, ct, 1);
}
return NOTIFY_DONE;
}
@@ -1205,8 +1226,9 @@ static inline int ipmr_forward_finish(struct sk_buff *skb)
static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
{
+ struct net *net = mfc_net(c);
const struct iphdr *iph = ip_hdr(skb);
- struct vif_device *vif = &vif_table[vifi];
+ struct vif_device *vif = &net->ipv4.vif_table[vifi];
struct net_device *dev;
struct rtable *rt;
int encap = 0;
@@ -1220,9 +1242,8 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
vif->bytes_out += skb->len;
vif->dev->stats.tx_bytes += skb->len;
vif->dev->stats.tx_packets++;
- ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT);
- kfree_skb(skb);
- return;
+ ipmr_cache_report(net, skb, vifi, IGMPMSG_WHOLEPKT);
+ goto out_free;
}
#endif
@@ -1233,7 +1254,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
.saddr = vif->local,
.tos = RT_TOS(iph->tos) } },
.proto = IPPROTO_IPIP };
- if (ip_route_output_key(&init_net, &rt, &fl))
+ if (ip_route_output_key(net, &rt, &fl))
goto out_free;
encap = sizeof(struct iphdr);
} else {
@@ -1242,7 +1263,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
{ .daddr = iph->daddr,
.tos = RT_TOS(iph->tos) } },
.proto = IPPROTO_IPIP };
- if (ip_route_output_key(&init_net, &rt, &fl))
+ if (ip_route_output_key(net, &rt, &fl))
goto out_free;
}
@@ -1306,9 +1327,10 @@ out_free:
static int ipmr_find_vif(struct net_device *dev)
{
+ struct net *net = dev_net(dev);
int ct;
- for (ct=maxvif-1; ct>=0; ct--) {
- if (vif_table[ct].dev == dev)
+ for (ct = net->ipv4.maxvif-1; ct >= 0; ct--) {
+ if (net->ipv4.vif_table[ct].dev == dev)
break;
}
return ct;
@@ -1320,6 +1342,7 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
{
int psend = -1;
int vif, ct;
+ struct net *net = mfc_net(cache);
vif = cache->mfc_parent;
cache->mfc_un.res.pkt++;
@@ -1328,7 +1351,7 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
/*
* Wrong interface: drop packet and (maybe) send PIM assert.
*/
- if (vif_table[vif].dev != skb->dev) {
+ if (net->ipv4.vif_table[vif].dev != skb->dev) {
int true_vifi;
if (skb->rtable->fl.iif == 0) {
@@ -1349,23 +1372,24 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
cache->mfc_un.res.wrong_if++;
true_vifi = ipmr_find_vif(skb->dev);
- if (true_vifi >= 0 && mroute_do_assert &&
+ if (true_vifi >= 0 && net->ipv4.mroute_do_assert &&
/* pimsm uses asserts, when switching from RPT to SPT,
so that we cannot check that packet arrived on an oif.
It is bad, but otherwise we would need to move pretty
large chunk of pimd to kernel. Ough... --ANK
*/
- (mroute_do_pim || cache->mfc_un.res.ttls[true_vifi] < 255) &&
+ (net->ipv4.mroute_do_pim ||
+ cache->mfc_un.res.ttls[true_vifi] < 255) &&
time_after(jiffies,
cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
cache->mfc_un.res.last_assert = jiffies;
- ipmr_cache_report(skb, true_vifi, IGMPMSG_WRONGVIF);
+ ipmr_cache_report(net, skb, true_vifi, IGMPMSG_WRONGVIF);
}
goto dont_forward;
}
- vif_table[vif].pkt_in++;
- vif_table[vif].bytes_in += skb->len;
+ net->ipv4.vif_table[vif].pkt_in++;
+ net->ipv4.vif_table[vif].bytes_in += skb->len;
/*
* Forward the frame
@@ -1405,6 +1429,7 @@ dont_forward:
int ip_mr_input(struct sk_buff *skb)
{
struct mfc_cache *cache;
+ struct net *net = dev_net(skb->dev);
int local = skb->rtable->rt_flags&RTCF_LOCAL;
/* Packet is looped back after forward, it should not be
@@ -1425,9 +1450,9 @@ int ip_mr_input(struct sk_buff *skb)
that we can forward NO IGMP messages.
*/
read_lock(&mrt_lock);
- if (mroute_socket) {
+ if (net->ipv4.mroute_sk) {
nf_reset(skb);
- raw_rcv(mroute_socket, skb);
+ raw_rcv(net->ipv4.mroute_sk, skb);
read_unlock(&mrt_lock);
return 0;
}
@@ -1436,7 +1461,7 @@ int ip_mr_input(struct sk_buff *skb)
}
read_lock(&mrt_lock);
- cache = ipmr_cache_find(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
+ cache = ipmr_cache_find(net, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
/*
* No usable cache entry
@@ -1456,7 +1481,7 @@ int ip_mr_input(struct sk_buff *skb)
vif = ipmr_find_vif(skb->dev);
if (vif >= 0) {
- int err = ipmr_cache_unresolved(vif, skb);
+ int err = ipmr_cache_unresolved(net, vif, skb);
read_unlock(&mrt_lock);
return err;
@@ -1487,6 +1512,7 @@ static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen)
{
struct net_device *reg_dev = NULL;
struct iphdr *encap;
+ struct net *net = dev_net(skb->dev);
encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
/*
@@ -1501,8 +1527,8 @@ static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen)
return 1;
read_lock(&mrt_lock);
- if (reg_vif_num >= 0)
- reg_dev = vif_table[reg_vif_num].dev;
+ if (net->ipv4.mroute_reg_vif_num >= 0)
+ reg_dev = net->ipv4.vif_table[net->ipv4.mroute_reg_vif_num].dev;
if (reg_dev)
dev_hold(reg_dev);
read_unlock(&mrt_lock);
@@ -1537,13 +1563,14 @@ static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen)
int pim_rcv_v1(struct sk_buff * skb)
{
struct igmphdr *pim;
+ struct net *net = dev_net(skb->dev);
if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
goto drop;
pim = igmp_hdr(skb);
- if (!mroute_do_pim ||
+ if (!net->ipv4.mroute_do_pim ||
pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
goto drop;
@@ -1583,7 +1610,8 @@ ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm)
{
int ct;
struct rtnexthop *nhp;
- struct net_device *dev = vif_table[c->mfc_parent].dev;
+ struct net *net = mfc_net(c);
+ struct net_device *dev = net->ipv4.vif_table[c->mfc_parent].dev;
u8 *b = skb_tail_pointer(skb);
struct rtattr *mp_head;
@@ -1599,7 +1627,7 @@ ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm)
nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
nhp->rtnh_flags = 0;
nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
- nhp->rtnh_ifindex = vif_table[ct].dev->ifindex;
+ nhp->rtnh_ifindex = net->ipv4.vif_table[ct].dev->ifindex;
nhp->rtnh_len = sizeof(*nhp);
}
}
@@ -1613,14 +1641,15 @@ rtattr_failure:
return -EMSGSIZE;
}
-int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
+int ipmr_get_route(struct net *net,
+ struct sk_buff *skb, struct rtmsg *rtm, int nowait)
{
int err;
struct mfc_cache *cache;
struct rtable *rt = skb->rtable;
read_lock(&mrt_lock);
- cache = ipmr_cache_find(rt->rt_src, rt->rt_dst);
+ cache = ipmr_cache_find(net, rt->rt_src, rt->rt_dst);
if (cache == NULL) {
struct sk_buff *skb2;
@@ -1651,7 +1680,7 @@ int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
iph->saddr = rt->rt_src;
iph->daddr = rt->rt_dst;
iph->version = 0;
- err = ipmr_cache_unresolved(vif, skb2);
+ err = ipmr_cache_unresolved(net, vif, skb2);
read_unlock(&mrt_lock);
return err;
}
@@ -1668,17 +1697,19 @@ int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
* The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif
*/
struct ipmr_vif_iter {
+ struct seq_net_private p;
int ct;
};
-static struct vif_device *ipmr_vif_seq_idx(struct ipmr_vif_iter *iter,
+static struct vif_device *ipmr_vif_seq_idx(struct net *net,
+ struct ipmr_vif_iter *iter,
loff_t pos)
{
- for (iter->ct = 0; iter->ct < maxvif; ++iter->ct) {
- if (!VIF_EXISTS(iter->ct))
+ for (iter->ct = 0; iter->ct < net->ipv4.maxvif; ++iter->ct) {
+ if (!VIF_EXISTS(net, iter->ct))
continue;
if (pos-- == 0)
- return &vif_table[iter->ct];
+ return &net->ipv4.vif_table[iter->ct];
}
return NULL;
}
@@ -1686,23 +1717,26 @@ static struct vif_device *ipmr_vif_seq_idx(struct ipmr_vif_iter *iter,
static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(mrt_lock)
{
+ struct net *net = seq_file_net(seq);
+
read_lock(&mrt_lock);
- return *pos ? ipmr_vif_seq_idx(seq->private, *pos - 1)
+ return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1)
: SEQ_START_TOKEN;
}
static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct ipmr_vif_iter *iter = seq->private;
+ struct net *net = seq_file_net(seq);
++*pos;
if (v == SEQ_START_TOKEN)
- return ipmr_vif_seq_idx(iter, 0);
+ return ipmr_vif_seq_idx(net, iter, 0);
- while (++iter->ct < maxvif) {
- if (!VIF_EXISTS(iter->ct))
+ while (++iter->ct < net->ipv4.maxvif) {
+ if (!VIF_EXISTS(net, iter->ct))
continue;
- return &vif_table[iter->ct];
+ return &net->ipv4.vif_table[iter->ct];
}
return NULL;
}
@@ -1715,6 +1749,8 @@ static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
{
+ struct net *net = seq_file_net(seq);
+
if (v == SEQ_START_TOKEN) {
seq_puts(seq,
"Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
@@ -1724,7 +1760,7 @@ static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
seq_printf(seq,
"%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
- vif - vif_table,
+ vif - net->ipv4.vif_table,
name, vif->bytes_in, vif->pkt_in,
vif->bytes_out, vif->pkt_out,
vif->flags, vif->local, vif->remote);
@@ -1741,8 +1777,8 @@ static const struct seq_operations ipmr_vif_seq_ops = {
static int ipmr_vif_open(struct inode *inode, struct file *file)
{
- return seq_open_private(file, &ipmr_vif_seq_ops,
- sizeof(struct ipmr_vif_iter));
+ return seq_open_net(inode, file, &ipmr_vif_seq_ops,
+ sizeof(struct ipmr_vif_iter));
}
static const struct file_operations ipmr_vif_fops = {
@@ -1750,23 +1786,26 @@ static const struct file_operations ipmr_vif_fops = {
.open = ipmr_vif_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release_private,
+ .release = seq_release_net,
};
struct ipmr_mfc_iter {
+ struct seq_net_private p;
struct mfc_cache **cache;
int ct;
};
-static struct mfc_cache *ipmr_mfc_seq_idx(struct ipmr_mfc_iter *it, loff_t pos)
+static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
+ struct ipmr_mfc_iter *it, loff_t pos)
{
struct mfc_cache *mfc;
- it->cache = mfc_cache_array;
+ it->cache = net->ipv4.mfc_cache_array;
read_lock(&mrt_lock);
for (it->ct = 0; it->ct < MFC_LINES; it->ct++)
- for (mfc = mfc_cache_array[it->ct]; mfc; mfc = mfc->next)
+ for (mfc = net->ipv4.mfc_cache_array[it->ct];
+ mfc; mfc = mfc->next)
if (pos-- == 0)
return mfc;
read_unlock(&mrt_lock);
@@ -1774,7 +1813,8 @@ static struct mfc_cache *ipmr_mfc_seq_idx(struct ipmr_mfc_iter *it, loff_t pos)
it->cache = &mfc_unres_queue;
spin_lock_bh(&mfc_unres_lock);
for (mfc = mfc_unres_queue; mfc; mfc = mfc->next)
- if (pos-- == 0)
+ if (net_eq(mfc_net(mfc), net) &&
+ pos-- == 0)
return mfc;
spin_unlock_bh(&mfc_unres_lock);
@@ -1786,9 +1826,11 @@ static struct mfc_cache *ipmr_mfc_seq_idx(struct ipmr_mfc_iter *it, loff_t pos)
static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
{
struct ipmr_mfc_iter *it = seq->private;
+ struct net *net = seq_file_net(seq);
+
it->cache = NULL;
it->ct = 0;
- return *pos ? ipmr_mfc_seq_idx(seq->private, *pos - 1)
+ return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
: SEQ_START_TOKEN;
}
@@ -1796,11 +1838,12 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct mfc_cache *mfc = v;
struct ipmr_mfc_iter *it = seq->private;
+ struct net *net = seq_file_net(seq);
++*pos;
if (v == SEQ_START_TOKEN)
- return ipmr_mfc_seq_idx(seq->private, 0);
+ return ipmr_mfc_seq_idx(net, seq->private, 0);
if (mfc->next)
return mfc->next;
@@ -1808,10 +1851,10 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
if (it->cache == &mfc_unres_queue)
goto end_of_list;
- BUG_ON(it->cache != mfc_cache_array);
+ BUG_ON(it->cache != net->ipv4.mfc_cache_array);
while (++it->ct < MFC_LINES) {
- mfc = mfc_cache_array[it->ct];
+ mfc = net->ipv4.mfc_cache_array[it->ct];
if (mfc)
return mfc;
}
@@ -1823,6 +1866,8 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
spin_lock_bh(&mfc_unres_lock);
mfc = mfc_unres_queue;
+ while (mfc && !net_eq(mfc_net(mfc), net))
+ mfc = mfc->next;
if (mfc)
return mfc;
@@ -1836,16 +1881,18 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
{
struct ipmr_mfc_iter *it = seq->private;
+ struct net *net = seq_file_net(seq);
if (it->cache == &mfc_unres_queue)
spin_unlock_bh(&mfc_unres_lock);
- else if (it->cache == mfc_cache_array)
+ else if (it->cache == net->ipv4.mfc_cache_array)
read_unlock(&mrt_lock);
}
static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
{
int n;
+ struct net *net = seq_file_net(seq);
if (v == SEQ_START_TOKEN) {
seq_puts(seq,
@@ -1866,9 +1913,9 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
mfc->mfc_un.res.wrong_if);
for (n = mfc->mfc_un.res.minvif;
n < mfc->mfc_un.res.maxvif; n++ ) {
- if (VIF_EXISTS(n)
- && mfc->mfc_un.res.ttls[n] < 255)
- seq_printf(seq,
+ if (VIF_EXISTS(net, n) &&
+ mfc->mfc_un.res.ttls[n] < 255)
+ seq_printf(seq,
" %2d:%-3d",
n, mfc->mfc_un.res.ttls[n]);
}
@@ -1892,8 +1939,8 @@ static const struct seq_operations ipmr_mfc_seq_ops = {
static int ipmr_mfc_open(struct inode *inode, struct file *file)
{
- return seq_open_private(file, &ipmr_mfc_seq_ops,
- sizeof(struct ipmr_mfc_iter));
+ return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
+ sizeof(struct ipmr_mfc_iter));
}
static const struct file_operations ipmr_mfc_fops = {
@@ -1901,7 +1948,7 @@ static const struct file_operations ipmr_mfc_fops = {
.open = ipmr_mfc_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release_private,
+ .release = seq_release_net,
};
#endif
@@ -1915,6 +1962,65 @@ static struct net_protocol pim_protocol = {
/*
* Setup for IP multicast routing
*/
+static int __net_init ipmr_net_init(struct net *net)
+{
+ int err = 0;
+
+ net->ipv4.vif_table = kcalloc(MAXVIFS, sizeof(struct vif_device),
+ GFP_KERNEL);
+ if (!net->ipv4.vif_table) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ /* Forwarding cache */
+ net->ipv4.mfc_cache_array = kcalloc(MFC_LINES,
+ sizeof(struct mfc_cache *),
+ GFP_KERNEL);
+ if (!net->ipv4.mfc_cache_array) {
+ err = -ENOMEM;
+ goto fail_mfc_cache;
+ }
+
+#ifdef CONFIG_IP_PIMSM
+ net->ipv4.mroute_reg_vif_num = -1;
+#endif
+
+#ifdef CONFIG_PROC_FS
+ err = -ENOMEM;
+ if (!proc_net_fops_create(net, "ip_mr_vif", 0, &ipmr_vif_fops))
+ goto proc_vif_fail;
+ if (!proc_net_fops_create(net, "ip_mr_cache", 0, &ipmr_mfc_fops))
+ goto proc_cache_fail;
+#endif
+ return 0;
+
+#ifdef CONFIG_PROC_FS
+proc_cache_fail:
+ proc_net_remove(net, "ip_mr_vif");
+proc_vif_fail:
+ kfree(net->ipv4.mfc_cache_array);
+#endif
+fail_mfc_cache:
+ kfree(net->ipv4.vif_table);
+fail:
+ return err;
+}
+
+static void __net_exit ipmr_net_exit(struct net *net)
+{
+#ifdef CONFIG_PROC_FS
+ proc_net_remove(net, "ip_mr_cache");
+ proc_net_remove(net, "ip_mr_vif");
+#endif
+ kfree(net->ipv4.mfc_cache_array);
+ kfree(net->ipv4.vif_table);
+}
+
+static struct pernet_operations ipmr_net_ops = {
+ .init = ipmr_net_init,
+ .exit = ipmr_net_exit,
+};
int __init ip_mr_init(void)
{
@@ -1927,26 +2033,20 @@ int __init ip_mr_init(void)
if (!mrt_cachep)
return -ENOMEM;
+ err = register_pernet_subsys(&ipmr_net_ops);
+ if (err)
+ goto reg_pernet_fail;
+
setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0);
err = register_netdevice_notifier(&ip_mr_notifier);
if (err)
goto reg_notif_fail;
-#ifdef CONFIG_PROC_FS
- err = -ENOMEM;
- if (!proc_net_fops_create(&init_net, "ip_mr_vif", 0, &ipmr_vif_fops))
- goto proc_vif_fail;
- if (!proc_net_fops_create(&init_net, "ip_mr_cache", 0, &ipmr_mfc_fops))
- goto proc_cache_fail;
-#endif
return 0;
-#ifdef CONFIG_PROC_FS
-proc_cache_fail:
- proc_net_remove(&init_net, "ip_mr_vif");
-proc_vif_fail:
- unregister_netdevice_notifier(&ip_mr_notifier);
-#endif
+
reg_notif_fail:
del_timer(&ipmr_expire_timer);
+ unregister_pernet_subsys(&ipmr_net_ops);
+reg_pernet_fail:
kmem_cache_destroy(mrt_cachep);
return err;
}
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 3816e1dc9295..1833bdbf9805 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -31,7 +31,7 @@ config NF_CONNTRACK_PROC_COMPAT
default y
help
This option enables /proc and sysctl compatibility with the old
- layer 3 dependant connection tracking. This is needed to keep
+ layer 3 dependent connection tracking. This is needed to keep
old programs that have not been adapted to the new names working.
If unsure, say Y.
@@ -95,11 +95,11 @@ config IP_NF_MATCH_ECN
config IP_NF_MATCH_TTL
tristate '"ttl" match support'
depends on NETFILTER_ADVANCED
- help
- This adds CONFIG_IP_NF_MATCH_TTL option, which enabled the user
- to match packets by their TTL value.
-
- To compile it as a module, choose M here. If unsure, say N.
+ select NETFILTER_XT_MATCH_HL
+ ---help---
+ This is a backwards-compat option for the user's convenience
+ (e.g. when running oldconfig). It selects
+ CONFIG_NETFILTER_XT_MATCH_HL.
# `filter', generic and specific targets
config IP_NF_FILTER
@@ -323,19 +323,13 @@ config IP_NF_TARGET_ECN
To compile it as a module, choose M here. If unsure, say N.
config IP_NF_TARGET_TTL
- tristate 'TTL target support'
- depends on IP_NF_MANGLE
+ tristate '"TTL" target support'
depends on NETFILTER_ADVANCED
- help
- This option adds a `TTL' target, which enables the user to modify
- the TTL value of the IP header.
-
- While it is safe to decrement/lower the TTL, this target also enables
- functionality to increment and set the TTL value of the IP header to
- arbitrary values. This is EXTREMELY DANGEROUS since you can easily
- create immortal packets that loop forever on the network.
-
- To compile it as a module, choose M here. If unsure, say N.
+ select NETFILTER_XT_TARGET_HL
+ ---help---
+ This is a backwards-compat option for the user's convenience
+ (e.g. when running oldconfig). It selects
+ CONFIG_NETFILTER_XT_TARGET_HL.
# raw + specific targets
config IP_NF_RAW
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 5f9b650d90fc..48111594ee9b 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -51,7 +51,6 @@ obj-$(CONFIG_IP_NF_SECURITY) += iptable_security.o
obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o
-obj-$(CONFIG_IP_NF_MATCH_TTL) += ipt_ttl.o
# targets
obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
@@ -61,7 +60,6 @@ obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o
obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt_NETMAP.o
obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o
obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
-obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o
obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
# generic ARP tables
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 7ea88b61cb0d..35c5f6a5cb7c 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -73,6 +73,28 @@ static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap,
return (ret != 0);
}
+/*
+ * Unfortunatly, _b and _mask are not aligned to an int (or long int)
+ * Some arches dont care, unrolling the loop is a win on them.
+ * For other arches, we only have a 16bit alignement.
+ */
+static unsigned long ifname_compare(const char *_a, const char *_b, const char *_mask)
+{
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ unsigned long ret = ifname_compare_aligned(_a, _b, _mask);
+#else
+ unsigned long ret = 0;
+ const u16 *a = (const u16 *)_a;
+ const u16 *b = (const u16 *)_b;
+ const u16 *mask = (const u16 *)_mask;
+ int i;
+
+ for (i = 0; i < IFNAMSIZ/sizeof(u16); i++)
+ ret |= (a[i] ^ b[i]) & mask[i];
+#endif
+ return ret;
+}
+
/* Returns whether packet matches rule or not. */
static inline int arp_packet_match(const struct arphdr *arphdr,
struct net_device *dev,
@@ -83,7 +105,7 @@ static inline int arp_packet_match(const struct arphdr *arphdr,
const char *arpptr = (char *)(arphdr + 1);
const char *src_devaddr, *tgt_devaddr;
__be32 src_ipaddr, tgt_ipaddr;
- int i, ret;
+ long ret;
#define FWINV(bool, invflg) ((bool) ^ !!(arpinfo->invflags & (invflg)))
@@ -156,10 +178,7 @@ static inline int arp_packet_match(const struct arphdr *arphdr,
}
/* Look for ifname matches. */
- for (i = 0, ret = 0; i < IFNAMSIZ; i++) {
- ret |= (indev[i] ^ arpinfo->iniface[i])
- & arpinfo->iniface_mask[i];
- }
+ ret = ifname_compare(indev, arpinfo->iniface, arpinfo->iniface_mask);
if (FWINV(ret != 0, ARPT_INV_VIA_IN)) {
dprintf("VIA in mismatch (%s vs %s).%s\n",
@@ -168,10 +187,7 @@ static inline int arp_packet_match(const struct arphdr *arphdr,
return 0;
}
- for (i = 0, ret = 0; i < IFNAMSIZ; i++) {
- ret |= (outdev[i] ^ arpinfo->outiface[i])
- & arpinfo->outiface_mask[i];
- }
+ ret = ifname_compare(outdev, arpinfo->outiface, arpinfo->outiface_mask);
if (FWINV(ret != 0, ARPT_INV_VIA_OUT)) {
dprintf("VIA out mismatch (%s vs %s).%s\n",
@@ -221,7 +237,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
const struct net_device *out,
struct xt_table *table)
{
- static const char nulldevname[IFNAMSIZ];
+ static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
unsigned int verdict = NF_DROP;
const struct arphdr *arp;
bool hotdrop = false;
@@ -237,9 +253,10 @@ unsigned int arpt_do_table(struct sk_buff *skb,
indev = in ? in->name : nulldevname;
outdev = out ? out->name : nulldevname;
- read_lock_bh(&table->lock);
- private = table->private;
- table_base = (void *)private->entries[smp_processor_id()];
+ rcu_read_lock();
+ private = rcu_dereference(table->private);
+ table_base = rcu_dereference(private->entries[smp_processor_id()]);
+
e = get_entry(table_base, private->hook_entry[hook]);
back = get_entry(table_base, private->underflow[hook]);
@@ -311,7 +328,8 @@ unsigned int arpt_do_table(struct sk_buff *skb,
e = (void *)e + e->next_offset;
}
} while (!hotdrop);
- read_unlock_bh(&table->lock);
+
+ rcu_read_unlock();
if (hotdrop)
return NF_DROP;
@@ -374,7 +392,9 @@ static int mark_source_chains(struct xt_table_info *newinfo,
&& unconditional(&e->arp)) || visited) {
unsigned int oldpos, size;
- if (t->verdict < -NF_MAX_VERDICT - 1) {
+ if ((strcmp(t->target.u.user.name,
+ ARPT_STANDARD_TARGET) == 0) &&
+ t->verdict < -NF_MAX_VERDICT - 1) {
duprintf("mark_source_chains: bad "
"negative verdict (%i)\n",
t->verdict);
@@ -714,11 +734,65 @@ static void get_counters(const struct xt_table_info *t,
}
}
-static inline struct xt_counters *alloc_counters(struct xt_table *table)
+
+/* We're lazy, and add to the first CPU; overflow works its fey magic
+ * and everything is OK. */
+static int
+add_counter_to_entry(struct arpt_entry *e,
+ const struct xt_counters addme[],
+ unsigned int *i)
+{
+ ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
+
+ (*i)++;
+ return 0;
+}
+
+/* Take values from counters and add them back onto the current cpu */
+static void put_counters(struct xt_table_info *t,
+ const struct xt_counters counters[])
+{
+ unsigned int i, cpu;
+
+ local_bh_disable();
+ cpu = smp_processor_id();
+ i = 0;
+ ARPT_ENTRY_ITERATE(t->entries[cpu],
+ t->size,
+ add_counter_to_entry,
+ counters,
+ &i);
+ local_bh_enable();
+}
+
+static inline int
+zero_entry_counter(struct arpt_entry *e, void *arg)
+{
+ e->counters.bcnt = 0;
+ e->counters.pcnt = 0;
+ return 0;
+}
+
+static void
+clone_counters(struct xt_table_info *newinfo, const struct xt_table_info *info)
+{
+ unsigned int cpu;
+ const void *loc_cpu_entry = info->entries[raw_smp_processor_id()];
+
+ memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
+ for_each_possible_cpu(cpu) {
+ memcpy(newinfo->entries[cpu], loc_cpu_entry, info->size);
+ ARPT_ENTRY_ITERATE(newinfo->entries[cpu], newinfo->size,
+ zero_entry_counter, NULL);
+ }
+}
+
+static struct xt_counters *alloc_counters(struct xt_table *table)
{
unsigned int countersize;
struct xt_counters *counters;
- const struct xt_table_info *private = table->private;
+ struct xt_table_info *private = table->private;
+ struct xt_table_info *info;
/* We need atomic snapshot of counters: rest doesn't change
* (other than comefrom, which userspace doesn't care
@@ -728,14 +802,30 @@ static inline struct xt_counters *alloc_counters(struct xt_table *table)
counters = vmalloc_node(countersize, numa_node_id());
if (counters == NULL)
- return ERR_PTR(-ENOMEM);
+ goto nomem;
+
+ info = xt_alloc_table_info(private->size);
+ if (!info)
+ goto free_counters;
+
+ clone_counters(info, private);
+
+ mutex_lock(&table->lock);
+ xt_table_entry_swap_rcu(private, info);
+ synchronize_net(); /* Wait until smoke has cleared */
+
+ get_counters(info, counters);
+ put_counters(private, counters);
+ mutex_unlock(&table->lock);
- /* First, sum counters... */
- write_lock_bh(&table->lock);
- get_counters(private, counters);
- write_unlock_bh(&table->lock);
+ xt_free_table_info(info);
return counters;
+
+ free_counters:
+ vfree(counters);
+ nomem:
+ return ERR_PTR(-ENOMEM);
}
static int copy_entries_to_user(unsigned int total_size,
@@ -1075,20 +1165,6 @@ static int do_replace(struct net *net, void __user *user, unsigned int len)
return ret;
}
-/* We're lazy, and add to the first CPU; overflow works its fey magic
- * and everything is OK.
- */
-static inline int add_counter_to_entry(struct arpt_entry *e,
- const struct xt_counters addme[],
- unsigned int *i)
-{
-
- ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
-
- (*i)++;
- return 0;
-}
-
static int do_add_counters(struct net *net, void __user *user, unsigned int len,
int compat)
{
@@ -1148,13 +1224,14 @@ static int do_add_counters(struct net *net, void __user *user, unsigned int len,
goto free;
}
- write_lock_bh(&t->lock);
+ mutex_lock(&t->lock);
private = t->private;
if (private->number != num_counters) {
ret = -EINVAL;
goto unlock_up_free;
}
+ preempt_disable();
i = 0;
/* Choose the copy that is on our node */
loc_cpu_entry = private->entries[smp_processor_id()];
@@ -1163,8 +1240,10 @@ static int do_add_counters(struct net *net, void __user *user, unsigned int len,
add_counter_to_entry,
paddc,
&i);
+ preempt_enable();
unlock_up_free:
- write_unlock_bh(&t->lock);
+ mutex_unlock(&t->lock);
+
xt_table_unlock(t);
module_put(t->me);
free:
diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c
index e091187e864f..6ecfdae7c589 100644
--- a/net/ipv4/netfilter/arptable_filter.c
+++ b/net/ipv4/netfilter/arptable_filter.c
@@ -48,8 +48,6 @@ static struct
static struct xt_table packet_filter = {
.name = "filter",
.valid_hooks = FILTER_VALID_HOOKS,
- .lock = __RW_LOCK_UNLOCKED(packet_filter.lock),
- .private = NULL,
.me = THIS_MODULE,
.af = NFPROTO_ARP,
};
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index 432ce9d1c11c..5f22c91c6e15 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -24,6 +24,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/security.h>
+#include <linux/net.h>
#include <linux/mutex.h>
#include <net/net_namespace.h>
#include <net/sock.h>
@@ -640,6 +641,7 @@ static void __exit ip_queue_fini(void)
MODULE_DESCRIPTION("IPv4 packet queue handler");
MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
MODULE_LICENSE("GPL");
+MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_FIREWALL);
module_init(ip_queue_init);
module_exit(ip_queue_fini);
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index ef8b6ca068b2..82ee7c9049ff 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -83,7 +83,6 @@ ip_packet_match(const struct iphdr *ip,
const struct ipt_ip *ipinfo,
int isfrag)
{
- size_t i;
unsigned long ret;
#define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg)))
@@ -103,12 +102,7 @@ ip_packet_match(const struct iphdr *ip,
return false;
}
- /* Look for ifname matches; this should unroll nicely. */
- for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
- ret |= (((const unsigned long *)indev)[i]
- ^ ((const unsigned long *)ipinfo->iniface)[i])
- & ((const unsigned long *)ipinfo->iniface_mask)[i];
- }
+ ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask);
if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
dprintf("VIA in mismatch (%s vs %s).%s\n",
@@ -117,11 +111,7 @@ ip_packet_match(const struct iphdr *ip,
return false;
}
- for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
- ret |= (((const unsigned long *)outdev)[i]
- ^ ((const unsigned long *)ipinfo->outiface)[i])
- & ((const unsigned long *)ipinfo->outiface_mask)[i];
- }
+ ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask);
if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
dprintf("VIA out mismatch (%s vs %s).%s\n",
@@ -347,10 +337,12 @@ ipt_do_table(struct sk_buff *skb,
mtpar.family = tgpar.family = NFPROTO_IPV4;
tgpar.hooknum = hook;
- read_lock_bh(&table->lock);
IP_NF_ASSERT(table->valid_hooks & (1 << hook));
- private = table->private;
- table_base = (void *)private->entries[smp_processor_id()];
+
+ rcu_read_lock();
+ private = rcu_dereference(table->private);
+ table_base = rcu_dereference(private->entries[smp_processor_id()]);
+
e = get_entry(table_base, private->hook_entry[hook]);
/* For return from builtin chain */
@@ -445,7 +437,7 @@ ipt_do_table(struct sk_buff *skb,
}
} while (!hotdrop);
- read_unlock_bh(&table->lock);
+ rcu_read_unlock();
#ifdef DEBUG_ALLOW_ALL
return NF_ACCEPT;
@@ -496,7 +488,9 @@ mark_source_chains(struct xt_table_info *newinfo,
&& unconditional(&e->ip)) || visited) {
unsigned int oldpos, size;
- if (t->verdict < -NF_MAX_VERDICT - 1) {
+ if ((strcmp(t->target.u.user.name,
+ IPT_STANDARD_TARGET) == 0) &&
+ t->verdict < -NF_MAX_VERDICT - 1) {
duprintf("mark_source_chains: bad "
"negative verdict (%i)\n",
t->verdict);
@@ -924,13 +918,68 @@ get_counters(const struct xt_table_info *t,
counters,
&i);
}
+
+}
+
+/* We're lazy, and add to the first CPU; overflow works its fey magic
+ * and everything is OK. */
+static int
+add_counter_to_entry(struct ipt_entry *e,
+ const struct xt_counters addme[],
+ unsigned int *i)
+{
+ ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
+
+ (*i)++;
+ return 0;
+}
+
+/* Take values from counters and add them back onto the current cpu */
+static void put_counters(struct xt_table_info *t,
+ const struct xt_counters counters[])
+{
+ unsigned int i, cpu;
+
+ local_bh_disable();
+ cpu = smp_processor_id();
+ i = 0;
+ IPT_ENTRY_ITERATE(t->entries[cpu],
+ t->size,
+ add_counter_to_entry,
+ counters,
+ &i);
+ local_bh_enable();
+}
+
+
+static inline int
+zero_entry_counter(struct ipt_entry *e, void *arg)
+{
+ e->counters.bcnt = 0;
+ e->counters.pcnt = 0;
+ return 0;
+}
+
+static void
+clone_counters(struct xt_table_info *newinfo, const struct xt_table_info *info)
+{
+ unsigned int cpu;
+ const void *loc_cpu_entry = info->entries[raw_smp_processor_id()];
+
+ memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
+ for_each_possible_cpu(cpu) {
+ memcpy(newinfo->entries[cpu], loc_cpu_entry, info->size);
+ IPT_ENTRY_ITERATE(newinfo->entries[cpu], newinfo->size,
+ zero_entry_counter, NULL);
+ }
}
static struct xt_counters * alloc_counters(struct xt_table *table)
{
unsigned int countersize;
struct xt_counters *counters;
- const struct xt_table_info *private = table->private;
+ struct xt_table_info *private = table->private;
+ struct xt_table_info *info;
/* We need atomic snapshot of counters: rest doesn't change
(other than comefrom, which userspace doesn't care
@@ -939,14 +988,30 @@ static struct xt_counters * alloc_counters(struct xt_table *table)
counters = vmalloc_node(countersize, numa_node_id());
if (counters == NULL)
- return ERR_PTR(-ENOMEM);
+ goto nomem;
+
+ info = xt_alloc_table_info(private->size);
+ if (!info)
+ goto free_counters;
+
+ clone_counters(info, private);
- /* First, sum counters... */
- write_lock_bh(&table->lock);
- get_counters(private, counters);
- write_unlock_bh(&table->lock);
+ mutex_lock(&table->lock);
+ xt_table_entry_swap_rcu(private, info);
+ synchronize_net(); /* Wait until smoke has cleared */
+
+ get_counters(info, counters);
+ put_counters(private, counters);
+ mutex_unlock(&table->lock);
+
+ xt_free_table_info(info);
return counters;
+
+ free_counters:
+ vfree(counters);
+ nomem:
+ return ERR_PTR(-ENOMEM);
}
static int
@@ -1312,27 +1377,6 @@ do_replace(struct net *net, void __user *user, unsigned int len)
return ret;
}
-/* We're lazy, and add to the first CPU; overflow works its fey magic
- * and everything is OK. */
-static int
-add_counter_to_entry(struct ipt_entry *e,
- const struct xt_counters addme[],
- unsigned int *i)
-{
-#if 0
- duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
- *i,
- (long unsigned int)e->counters.pcnt,
- (long unsigned int)e->counters.bcnt,
- (long unsigned int)addme[*i].pcnt,
- (long unsigned int)addme[*i].bcnt);
-#endif
-
- ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
-
- (*i)++;
- return 0;
-}
static int
do_add_counters(struct net *net, void __user *user, unsigned int len, int compat)
@@ -1393,13 +1437,14 @@ do_add_counters(struct net *net, void __user *user, unsigned int len, int compat
goto free;
}
- write_lock_bh(&t->lock);
+ mutex_lock(&t->lock);
private = t->private;
if (private->number != num_counters) {
ret = -EINVAL;
goto unlock_up_free;
}
+ preempt_disable();
i = 0;
/* Choose the copy that is on our node */
loc_cpu_entry = private->entries[raw_smp_processor_id()];
@@ -1408,8 +1453,9 @@ do_add_counters(struct net *net, void __user *user, unsigned int len, int compat
add_counter_to_entry,
paddc,
&i);
+ preempt_enable();
unlock_up_free:
- write_unlock_bh(&t->lock);
+ mutex_unlock(&t->lock);
xt_table_unlock(t);
module_put(t->me);
free:
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c
index 27a78fbbd92b..acc44c69eb68 100644
--- a/net/ipv4/netfilter/ipt_LOG.c
+++ b/net/ipv4/netfilter/ipt_LOG.c
@@ -464,7 +464,7 @@ static struct xt_target log_tg_reg __read_mostly = {
.me = THIS_MODULE,
};
-static const struct nf_logger ipt_log_logger ={
+static struct nf_logger ipt_log_logger __read_mostly = {
.name = "ipt_LOG",
.logfn = &ipt_log_packet,
.me = THIS_MODULE,
diff --git a/net/ipv4/netfilter/ipt_TTL.c b/net/ipv4/netfilter/ipt_TTL.c
deleted file mode 100644
index 6d76aae90cc0..000000000000
--- a/net/ipv4/netfilter/ipt_TTL.c
+++ /dev/null
@@ -1,97 +0,0 @@
-/* TTL modification target for IP tables
- * (C) 2000,2005 by Harald Welte <laforge@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/ip.h>
-#include <net/checksum.h>
-
-#include <linux/netfilter/x_tables.h>
-#include <linux/netfilter_ipv4/ipt_TTL.h>
-
-MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
-MODULE_DESCRIPTION("Xtables: IPv4 TTL field modification target");
-MODULE_LICENSE("GPL");
-
-static unsigned int
-ttl_tg(struct sk_buff *skb, const struct xt_target_param *par)
-{
- struct iphdr *iph;
- const struct ipt_TTL_info *info = par->targinfo;
- int new_ttl;
-
- if (!skb_make_writable(skb, skb->len))
- return NF_DROP;
-
- iph = ip_hdr(skb);
-
- switch (info->mode) {
- case IPT_TTL_SET:
- new_ttl = info->ttl;
- break;
- case IPT_TTL_INC:
- new_ttl = iph->ttl + info->ttl;
- if (new_ttl > 255)
- new_ttl = 255;
- break;
- case IPT_TTL_DEC:
- new_ttl = iph->ttl - info->ttl;
- if (new_ttl < 0)
- new_ttl = 0;
- break;
- default:
- new_ttl = iph->ttl;
- break;
- }
-
- if (new_ttl != iph->ttl) {
- csum_replace2(&iph->check, htons(iph->ttl << 8),
- htons(new_ttl << 8));
- iph->ttl = new_ttl;
- }
-
- return XT_CONTINUE;
-}
-
-static bool ttl_tg_check(const struct xt_tgchk_param *par)
-{
- const struct ipt_TTL_info *info = par->targinfo;
-
- if (info->mode > IPT_TTL_MAXMODE) {
- printk(KERN_WARNING "ipt_TTL: invalid or unknown Mode %u\n",
- info->mode);
- return false;
- }
- if (info->mode != IPT_TTL_SET && info->ttl == 0)
- return false;
- return true;
-}
-
-static struct xt_target ttl_tg_reg __read_mostly = {
- .name = "TTL",
- .family = NFPROTO_IPV4,
- .target = ttl_tg,
- .targetsize = sizeof(struct ipt_TTL_info),
- .table = "mangle",
- .checkentry = ttl_tg_check,
- .me = THIS_MODULE,
-};
-
-static int __init ttl_tg_init(void)
-{
- return xt_register_target(&ttl_tg_reg);
-}
-
-static void __exit ttl_tg_exit(void)
-{
- xt_unregister_target(&ttl_tg_reg);
-}
-
-module_init(ttl_tg_init);
-module_exit(ttl_tg_exit);
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index 18a2826b57c6..d32cc4bb328a 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -379,7 +379,7 @@ static struct xt_target ulog_tg_reg __read_mostly = {
.me = THIS_MODULE,
};
-static struct nf_logger ipt_ulog_logger = {
+static struct nf_logger ipt_ulog_logger __read_mostly = {
.name = "ipt_ULOG",
.logfn = ipt_logfn,
.me = THIS_MODULE,
diff --git a/net/ipv4/netfilter/ipt_ttl.c b/net/ipv4/netfilter/ipt_ttl.c
deleted file mode 100644
index 297f1cbf4ff5..000000000000
--- a/net/ipv4/netfilter/ipt_ttl.c
+++ /dev/null
@@ -1,63 +0,0 @@
-/* IP tables module for matching the value of the TTL
- *
- * (C) 2000,2001 by Harald Welte <laforge@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/ip.h>
-#include <linux/module.h>
-#include <linux/skbuff.h>
-
-#include <linux/netfilter_ipv4/ipt_ttl.h>
-#include <linux/netfilter/x_tables.h>
-
-MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
-MODULE_DESCRIPTION("Xtables: IPv4 TTL field match");
-MODULE_LICENSE("GPL");
-
-static bool ttl_mt(const struct sk_buff *skb, const struct xt_match_param *par)
-{
- const struct ipt_ttl_info *info = par->matchinfo;
- const u8 ttl = ip_hdr(skb)->ttl;
-
- switch (info->mode) {
- case IPT_TTL_EQ:
- return ttl == info->ttl;
- case IPT_TTL_NE:
- return ttl != info->ttl;
- case IPT_TTL_LT:
- return ttl < info->ttl;
- case IPT_TTL_GT:
- return ttl > info->ttl;
- default:
- printk(KERN_WARNING "ipt_ttl: unknown mode %d\n",
- info->mode);
- return false;
- }
-
- return false;
-}
-
-static struct xt_match ttl_mt_reg __read_mostly = {
- .name = "ttl",
- .family = NFPROTO_IPV4,
- .match = ttl_mt,
- .matchsize = sizeof(struct ipt_ttl_info),
- .me = THIS_MODULE,
-};
-
-static int __init ttl_mt_init(void)
-{
- return xt_register_match(&ttl_mt_reg);
-}
-
-static void __exit ttl_mt_exit(void)
-{
- xt_unregister_match(&ttl_mt_reg);
-}
-
-module_init(ttl_mt_init);
-module_exit(ttl_mt_exit);
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index 52cb6939d093..c30a969724f8 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -56,7 +56,6 @@ static struct
static struct xt_table packet_filter = {
.name = "filter",
.valid_hooks = FILTER_VALID_HOOKS,
- .lock = __RW_LOCK_UNLOCKED(packet_filter.lock),
.me = THIS_MODULE,
.af = AF_INET,
};
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index 3929d20b9e45..4087614d9519 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -67,7 +67,6 @@ static struct
static struct xt_table packet_mangler = {
.name = "mangle",
.valid_hooks = MANGLE_VALID_HOOKS,
- .lock = __RW_LOCK_UNLOCKED(packet_mangler.lock),
.me = THIS_MODULE,
.af = AF_INET,
};
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c
index 7f65d18333e3..e5356da1fb54 100644
--- a/net/ipv4/netfilter/iptable_raw.c
+++ b/net/ipv4/netfilter/iptable_raw.c
@@ -39,7 +39,6 @@ static struct
static struct xt_table packet_raw = {
.name = "raw",
.valid_hooks = RAW_VALID_HOOKS,
- .lock = __RW_LOCK_UNLOCKED(packet_raw.lock),
.me = THIS_MODULE,
.af = AF_INET,
};
diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c
index a52a35f4a584..29ab630f240a 100644
--- a/net/ipv4/netfilter/iptable_security.c
+++ b/net/ipv4/netfilter/iptable_security.c
@@ -60,7 +60,6 @@ static struct
static struct xt_table security_table = {
.name = "security",
.valid_hooks = SECURITY_VALID_HOOKS,
- .lock = __RW_LOCK_UNLOCKED(security_table.lock),
.me = THIS_MODULE,
.af = AF_INET,
};
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 4beb04fac588..7d2ead7228ac 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -120,8 +120,10 @@ static unsigned int ipv4_confirm(unsigned int hooknum,
typeof(nf_nat_seq_adjust_hook) seq_adjust;
seq_adjust = rcu_dereference(nf_nat_seq_adjust_hook);
- if (!seq_adjust || !seq_adjust(skb, ct, ctinfo))
+ if (!seq_adjust || !seq_adjust(skb, ct, ctinfo)) {
+ NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop);
return NF_DROP;
+ }
}
out:
/* We've seen it coming out the other side: confirm it */
@@ -326,6 +328,11 @@ static int ipv4_nlattr_to_tuple(struct nlattr *tb[],
return 0;
}
+
+static int ipv4_nlattr_tuple_size(void)
+{
+ return nla_policy_len(ipv4_nla_policy, CTA_IP_MAX + 1);
+}
#endif
static struct nf_sockopt_ops so_getorigdst = {
@@ -345,6 +352,7 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 __read_mostly = {
.get_l4proto = ipv4_get_l4proto,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.tuple_to_nlattr = ipv4_tuple_to_nlattr,
+ .nlattr_tuple_size = ipv4_nlattr_tuple_size,
.nlattr_to_tuple = ipv4_nlattr_to_tuple,
.nla_policy = ipv4_nla_policy,
#endif
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index 6ba5c557690c..8668a3defda6 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -25,40 +25,42 @@ struct ct_iter_state {
unsigned int bucket;
};
-static struct hlist_node *ct_get_first(struct seq_file *seq)
+static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
{
struct net *net = seq_file_net(seq);
struct ct_iter_state *st = seq->private;
- struct hlist_node *n;
+ struct hlist_nulls_node *n;
for (st->bucket = 0;
st->bucket < nf_conntrack_htable_size;
st->bucket++) {
n = rcu_dereference(net->ct.hash[st->bucket].first);
- if (n)
+ if (!is_a_nulls(n))
return n;
}
return NULL;
}
-static struct hlist_node *ct_get_next(struct seq_file *seq,
- struct hlist_node *head)
+static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
+ struct hlist_nulls_node *head)
{
struct net *net = seq_file_net(seq);
struct ct_iter_state *st = seq->private;
head = rcu_dereference(head->next);
- while (head == NULL) {
- if (++st->bucket >= nf_conntrack_htable_size)
- return NULL;
+ while (is_a_nulls(head)) {
+ if (likely(get_nulls_value(head) == st->bucket)) {
+ if (++st->bucket >= nf_conntrack_htable_size)
+ return NULL;
+ }
head = rcu_dereference(net->ct.hash[st->bucket].first);
}
return head;
}
-static struct hlist_node *ct_get_idx(struct seq_file *seq, loff_t pos)
+static struct hlist_nulls_node *ct_get_idx(struct seq_file *seq, loff_t pos)
{
- struct hlist_node *head = ct_get_first(seq);
+ struct hlist_nulls_node *head = ct_get_first(seq);
if (head)
while (pos && (head = ct_get_next(seq, head)))
@@ -87,69 +89,76 @@ static void ct_seq_stop(struct seq_file *s, void *v)
static int ct_seq_show(struct seq_file *s, void *v)
{
- const struct nf_conntrack_tuple_hash *hash = v;
- const struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash);
+ struct nf_conntrack_tuple_hash *hash = v;
+ struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash);
const struct nf_conntrack_l3proto *l3proto;
const struct nf_conntrack_l4proto *l4proto;
+ int ret = 0;
NF_CT_ASSERT(ct);
+ if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
+ return 0;
+
/* we only want to print DIR_ORIGINAL */
if (NF_CT_DIRECTION(hash))
- return 0;
+ goto release;
if (nf_ct_l3num(ct) != AF_INET)
- return 0;
+ goto release;
l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
NF_CT_ASSERT(l3proto);
l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
NF_CT_ASSERT(l4proto);
+ ret = -ENOSPC;
if (seq_printf(s, "%-8s %u %ld ",
l4proto->name, nf_ct_protonum(ct),
timer_pending(&ct->timeout)
? (long)(ct->timeout.expires - jiffies)/HZ : 0) != 0)
- return -ENOSPC;
+ goto release;
if (l4proto->print_conntrack && l4proto->print_conntrack(s, ct))
- return -ENOSPC;
+ goto release;
if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
l3proto, l4proto))
- return -ENOSPC;
+ goto release;
if (seq_print_acct(s, ct, IP_CT_DIR_ORIGINAL))
- return -ENOSPC;
+ goto release;
if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status)))
if (seq_printf(s, "[UNREPLIED] "))
- return -ENOSPC;
+ goto release;
if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
l3proto, l4proto))
- return -ENOSPC;
+ goto release;
if (seq_print_acct(s, ct, IP_CT_DIR_REPLY))
- return -ENOSPC;
+ goto release;
if (test_bit(IPS_ASSURED_BIT, &ct->status))
if (seq_printf(s, "[ASSURED] "))
- return -ENOSPC;
+ goto release;
#ifdef CONFIG_NF_CONNTRACK_MARK
if (seq_printf(s, "mark=%u ", ct->mark))
- return -ENOSPC;
+ goto release;
#endif
#ifdef CONFIG_NF_CONNTRACK_SECMARK
if (seq_printf(s, "secmark=%u ", ct->secmark))
- return -ENOSPC;
+ goto release;
#endif
if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)))
- return -ENOSPC;
-
- return 0;
+ goto release;
+ ret = 0;
+release:
+ nf_ct_put(ct);
+ return ret;
}
static const struct seq_operations ct_seq_ops = {
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 2a8bee26f43d..23b2c2ee869a 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -262,6 +262,11 @@ static int icmp_nlattr_to_tuple(struct nlattr *tb[],
return 0;
}
+
+static int icmp_nlattr_tuple_size(void)
+{
+ return nla_policy_len(icmp_nla_policy, CTA_PROTO_MAX + 1);
+}
#endif
#ifdef CONFIG_SYSCTL
@@ -309,6 +314,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp __read_mostly =
.me = NULL,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.tuple_to_nlattr = icmp_tuple_to_nlattr,
+ .nlattr_tuple_size = icmp_nlattr_tuple_size,
.nlattr_to_tuple = icmp_nlattr_to_tuple,
.nla_policy = icmp_nla_policy,
#endif
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index a65cf692359f..fe65187810f0 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -679,7 +679,7 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct,
static int __net_init nf_nat_net_init(struct net *net)
{
net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size,
- &net->ipv4.nat_vmalloced);
+ &net->ipv4.nat_vmalloced, 0);
if (!net->ipv4.nat_bysource)
return -ENOMEM;
return 0;
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c
index a7eb04719044..6348a793936e 100644
--- a/net/ipv4/netfilter/nf_nat_rule.c
+++ b/net/ipv4/netfilter/nf_nat_rule.c
@@ -61,7 +61,6 @@ static struct
static struct xt_table nat_table = {
.name = "nat",
.valid_hooks = NAT_VALID_HOOKS,
- .lock = __RW_LOCK_UNLOCKED(nat_table.lock),
.me = THIS_MODULE,
.af = AF_INET,
};
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index 182f845de92f..d9521f6f9ed0 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -1292,7 +1292,7 @@ static struct nf_conntrack_helper snmp_helper __read_mostly = {
.expect_policy = &snmp_exp_policy,
.name = "snmp",
.tuple.src.l3num = AF_INET,
- .tuple.src.u.udp.port = __constant_htons(SNMP_PORT),
+ .tuple.src.u.udp.port = cpu_to_be16(SNMP_PORT),
.tuple.dst.protonum = IPPROTO_UDP,
};
@@ -1302,7 +1302,7 @@ static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {
.expect_policy = &snmp_exp_policy,
.name = "snmp_trap",
.tuple.src.l3num = AF_INET,
- .tuple.src.u.udp.port = __constant_htons(SNMP_TRAP_PORT),
+ .tuple.src.u.udp.port = cpu_to_be16(SNMP_TRAP_PORT),
.tuple.dst.protonum = IPPROTO_UDP,
};
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index eb62e58bff79..cf0cdeeb1db0 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -54,8 +54,8 @@ static int sockstat_seq_show(struct seq_file *seq, void *v)
int orphans, sockets;
local_bh_disable();
- orphans = percpu_counter_sum_positive(&tcp_orphan_count),
- sockets = percpu_counter_sum_positive(&tcp_sockets_allocated),
+ orphans = percpu_counter_sum_positive(&tcp_orphan_count);
+ sockets = percpu_counter_sum_positive(&tcp_sockets_allocated);
local_bh_enable();
socket_seq_show(seq);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index dff8bc4e0fac..f774651f0a47 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -493,6 +493,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
ipc.addr = inet->saddr;
ipc.opt = NULL;
+ ipc.shtx.flags = 0;
ipc.oif = sk->sk_bound_dev_if;
if (msg->msg_controllen) {
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 97f71153584f..5caee609be06 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -151,7 +151,7 @@ static void rt_emergency_hash_rebuild(struct net *net);
static struct dst_ops ipv4_dst_ops = {
.family = AF_INET,
- .protocol = __constant_htons(ETH_P_IP),
+ .protocol = cpu_to_be16(ETH_P_IP),
.gc = rt_garbage_collect,
.check = ipv4_dst_check,
.destroy = ipv4_dst_destroy,
@@ -2696,7 +2696,7 @@ static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
static struct dst_ops ipv4_dst_blackhole_ops = {
.family = AF_INET,
- .protocol = __constant_htons(ETH_P_IP),
+ .protocol = cpu_to_be16(ETH_P_IP),
.destroy = ipv4_dst_destroy,
.check = ipv4_dst_check,
.update_pmtu = ipv4_rt_blackhole_update_pmtu,
@@ -2779,7 +2779,8 @@ int ip_route_output_key(struct net *net, struct rtable **rp, struct flowi *flp)
return ip_route_output_flow(net, rp, flp, NULL, 0);
}
-static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
+static int rt_fill_info(struct net *net,
+ struct sk_buff *skb, u32 pid, u32 seq, int event,
int nowait, unsigned int flags)
{
struct rtable *rt = skb->rtable;
@@ -2844,8 +2845,8 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
__be32 dst = rt->rt_dst;
if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
- IPV4_DEVCONF_ALL(&init_net, MC_FORWARDING)) {
- int err = ipmr_get_route(skb, r, nowait);
+ IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
+ int err = ipmr_get_route(net, skb, r, nowait);
if (err <= 0) {
if (!nowait) {
if (err == 0)
@@ -2950,7 +2951,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
if (rtm->rtm_flags & RTM_F_NOTIFY)
rt->rt_flags |= RTCF_NOTIFY;
- err = rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
+ err = rt_fill_info(net, skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
RTM_NEWROUTE, 0, 0);
if (err <= 0)
goto errout_free;
@@ -2988,7 +2989,7 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
if (rt_is_expired(rt))
continue;
skb->dst = dst_clone(&rt->u.dst);
- if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
+ if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, RTM_NEWROUTE,
1, NLM_F_MULTI) <= 0) {
dst_release(xchg(&skb->dst, NULL));
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 76b148bcb0dc..2451aeb5ac23 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -661,6 +661,47 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
return NULL;
}
+static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
+ int large_allowed)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ u32 xmit_size_goal, old_size_goal;
+
+ xmit_size_goal = mss_now;
+
+ if (large_allowed && sk_can_gso(sk)) {
+ xmit_size_goal = ((sk->sk_gso_max_size - 1) -
+ inet_csk(sk)->icsk_af_ops->net_header_len -
+ inet_csk(sk)->icsk_ext_hdr_len -
+ tp->tcp_header_len);
+
+ xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal);
+
+ /* We try hard to avoid divides here */
+ old_size_goal = tp->xmit_size_goal_segs * mss_now;
+
+ if (likely(old_size_goal <= xmit_size_goal &&
+ old_size_goal + mss_now > xmit_size_goal)) {
+ xmit_size_goal = old_size_goal;
+ } else {
+ tp->xmit_size_goal_segs = xmit_size_goal / mss_now;
+ xmit_size_goal = tp->xmit_size_goal_segs * mss_now;
+ }
+ }
+
+ return max(xmit_size_goal, mss_now);
+}
+
+static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
+{
+ int mss_now;
+
+ mss_now = tcp_current_mss(sk);
+ *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB));
+
+ return mss_now;
+}
+
static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
size_t psize, int flags)
{
@@ -677,13 +718,12 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
- mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
- size_goal = tp->xmit_size_goal;
+ mss_now = tcp_send_mss(sk, &size_goal, flags);
copied = 0;
err = -EPIPE;
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
- goto do_error;
+ goto out_err;
while (psize > 0) {
struct sk_buff *skb = tcp_write_queue_tail(sk);
@@ -761,8 +801,7 @@ wait_for_memory:
if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
goto do_error;
- mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
- size_goal = tp->xmit_size_goal;
+ mss_now = tcp_send_mss(sk, &size_goal, flags);
}
out:
@@ -844,8 +883,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
/* This should be in poll */
clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
- mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
- size_goal = tp->xmit_size_goal;
+ mss_now = tcp_send_mss(sk, &size_goal, flags);
/* Ok commence sending. */
iovlen = msg->msg_iovlen;
@@ -854,7 +892,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
err = -EPIPE;
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
- goto do_error;
+ goto out_err;
while (--iovlen >= 0) {
int seglen = iov->iov_len;
@@ -1007,8 +1045,7 @@ wait_for_memory:
if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
goto do_error;
- mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
- size_goal = tp->xmit_size_goal;
+ mss_now = tcp_send_mss(sk, &size_goal, flags);
}
}
@@ -1045,8 +1082,7 @@ out_err:
*/
static int tcp_recv_urg(struct sock *sk, long timeo,
- struct msghdr *msg, int len, int flags,
- int *addr_len)
+ struct msghdr *msg, int len, int flags)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -1661,7 +1697,7 @@ out:
return err;
recv_urg:
- err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len);
+ err = tcp_recv_urg(sk, timeo, msg, len, flags);
goto out;
}
@@ -2478,23 +2514,23 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
struct tcphdr *th2;
unsigned int thlen;
unsigned int flags;
- unsigned int total;
unsigned int mss = 1;
int flush = 1;
+ int i;
- if (!pskb_may_pull(skb, sizeof(*th)))
+ th = skb_gro_header(skb, sizeof(*th));
+ if (unlikely(!th))
goto out;
- th = tcp_hdr(skb);
thlen = th->doff * 4;
if (thlen < sizeof(*th))
goto out;
- if (!pskb_may_pull(skb, thlen))
+ th = skb_gro_header(skb, thlen);
+ if (unlikely(!th))
goto out;
- th = tcp_hdr(skb);
- __skb_pull(skb, thlen);
+ skb_gro_pull(skb, thlen);
flags = tcp_flag_word(th);
@@ -2504,7 +2540,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
th2 = tcp_hdr(p);
- if (th->source != th2->source || th->dest != th2->dest) {
+ if ((th->source ^ th2->source) | (th->dest ^ th2->dest)) {
NAPI_GRO_CB(p)->same_flow = 0;
continue;
}
@@ -2519,14 +2555,15 @@ found:
flush |= flags & TCP_FLAG_CWR;
flush |= (flags ^ tcp_flag_word(th2)) &
~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH);
- flush |= th->ack_seq != th2->ack_seq || th->window != th2->window;
- flush |= memcmp(th + 1, th2 + 1, thlen - sizeof(*th));
+ flush |= (th->ack_seq ^ th2->ack_seq) | (th->window ^ th2->window);
+ for (i = sizeof(*th); !flush && i < thlen; i += 4)
+ flush |= *(u32 *)((u8 *)th + i) ^
+ *(u32 *)((u8 *)th2 + i);
- total = p->len;
mss = skb_shinfo(p)->gso_size;
- flush |= skb->len > mss || skb->len <= 0;
- flush |= ntohl(th2->seq) + total != ntohl(th->seq);
+ flush |= (skb_gro_len(skb) > mss) | !skb_gro_len(skb);
+ flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
if (flush || skb_gro_receive(head, skb)) {
mss = 1;
@@ -2538,7 +2575,7 @@ found:
tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
out_check_final:
- flush = skb->len < mss;
+ flush = skb_gro_len(skb) < mss;
flush |= flags & (TCP_FLAG_URG | TCP_FLAG_PSH | TCP_FLAG_RST |
TCP_FLAG_SYN | TCP_FLAG_FIN);
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index 7eb7636db0d0..3b53fd1af23f 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -149,16 +149,7 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
tcp_slow_start(tp);
else {
bictcp_update(ca, tp->snd_cwnd);
-
- /* In dangerous area, increase slowly.
- * In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd
- */
- if (tp->snd_cwnd_cnt >= ca->cnt) {
- if (tp->snd_cwnd < tp->snd_cwnd_clamp)
- tp->snd_cwnd++;
- tp->snd_cwnd_cnt = 0;
- } else
- tp->snd_cwnd_cnt++;
+ tcp_cong_avoid_ai(tp, ca->cnt);
}
}
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 4ec5b4e97c4e..e92beb9e55e0 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -336,6 +336,19 @@ void tcp_slow_start(struct tcp_sock *tp)
}
EXPORT_SYMBOL_GPL(tcp_slow_start);
+/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w) */
+void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w)
+{
+ if (tp->snd_cwnd_cnt >= w) {
+ if (tp->snd_cwnd < tp->snd_cwnd_clamp)
+ tp->snd_cwnd++;
+ tp->snd_cwnd_cnt = 0;
+ } else {
+ tp->snd_cwnd_cnt++;
+ }
+}
+EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
+
/*
* TCP Reno congestion control
* This is special case used for fallback as well.
@@ -365,13 +378,7 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
tp->snd_cwnd++;
}
} else {
- /* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd */
- if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
- if (tp->snd_cwnd < tp->snd_cwnd_clamp)
- tp->snd_cwnd++;
- tp->snd_cwnd_cnt = 0;
- } else
- tp->snd_cwnd_cnt++;
+ tcp_cong_avoid_ai(tp, tp->snd_cwnd);
}
}
EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index ee467ec40c4f..71d5f2f29fa6 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -294,16 +294,7 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
tcp_slow_start(tp);
} else {
bictcp_update(ca, tp->snd_cwnd);
-
- /* In dangerous area, increase slowly.
- * In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd
- */
- if (tp->snd_cwnd_cnt >= ca->cnt) {
- if (tp->snd_cwnd < tp->snd_cwnd_clamp)
- tp->snd_cwnd++;
- tp->snd_cwnd_cnt = 0;
- } else
- tp->snd_cwnd_cnt++;
+ tcp_cong_avoid_ai(tp, ca->cnt);
}
}
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index 937549b8a921..26d5c7fc7de5 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -115,8 +115,7 @@ static void measure_achieved_throughput(struct sock *sk, u32 pkts_acked, s32 rtt
return;
/* achieved throughput calculations */
- if (icsk->icsk_ca_state != TCP_CA_Open &&
- icsk->icsk_ca_state != TCP_CA_Disorder) {
+ if (!((1 << icsk->icsk_ca_state) & (TCPF_CA_Open | TCPF_CA_Disorder))) {
ca->packetcount = 0;
ca->lasttime = now;
return;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index c28976a7e596..2bc8e27a163d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -64,6 +64,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sysctl.h>
+#include <linux/kernel.h>
#include <net/dst.h>
#include <net/tcp.h>
#include <net/inet_common.h>
@@ -1178,10 +1179,18 @@ static void tcp_mark_lost_retrans(struct sock *sk)
if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS))
continue;
- if (after(received_upto, ack_seq) &&
- (tcp_is_fack(tp) ||
- !before(received_upto,
- ack_seq + tp->reordering * tp->mss_cache))) {
+ /* TODO: We would like to get rid of tcp_is_fack(tp) only
+ * constraint here (see above) but figuring out that at
+ * least tp->reordering SACK blocks reside between ack_seq
+ * and received_upto is not easy task to do cheaply with
+ * the available datastructures.
+ *
+ * Whether FACK should check here for tp->reordering segs
+ * in-between one could argue for either way (it would be
+ * rather simple to implement as we could count fack_count
+ * during the walk and do tp->fackets_out - fack_count).
+ */
+ if (after(received_upto, ack_seq)) {
TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
tp->retrans_out -= tcp_skb_pcount(skb);
@@ -1794,11 +1803,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
for (i = used_sacks - 1; i > 0; i--) {
for (j = 0; j < i; j++) {
if (after(sp[j].start_seq, sp[j + 1].start_seq)) {
- struct tcp_sack_block tmp;
-
- tmp = sp[j];
- sp[j] = sp[j + 1];
- sp[j + 1] = tmp;
+ swap(sp[j], sp[j + 1]);
/* Track where the first SACK block goes to */
if (j == first_sack_index)
@@ -2453,6 +2458,44 @@ static int tcp_time_to_recover(struct sock *sk)
return 0;
}
+/* New heuristics: it is possible only after we switched to restart timer
+ * each time when something is ACKed. Hence, we can detect timed out packets
+ * during fast retransmit without falling to slow start.
+ *
+ * Usefulness of this as is very questionable, since we should know which of
+ * the segments is the next to timeout which is relatively expensive to find
+ * in general case unless we add some data structure just for that. The
+ * current approach certainly won't find the right one too often and when it
+ * finally does find _something_ it usually marks large part of the window
+ * right away (because a retransmission with a larger timestamp blocks the
+ * loop from advancing). -ij
+ */
+static void tcp_timeout_skbs(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct sk_buff *skb;
+
+ if (!tcp_is_fack(tp) || !tcp_head_timedout(sk))
+ return;
+
+ skb = tp->scoreboard_skb_hint;
+ if (tp->scoreboard_skb_hint == NULL)
+ skb = tcp_write_queue_head(sk);
+
+ tcp_for_write_queue_from(skb, sk) {
+ if (skb == tcp_send_head(sk))
+ break;
+ if (!tcp_skb_timedout(sk, skb))
+ break;
+
+ tcp_skb_mark_lost(tp, skb);
+ }
+
+ tp->scoreboard_skb_hint = skb;
+
+ tcp_verify_left_out(tp);
+}
+
/* Mark head of queue up as lost. With RFC3517 SACK, the packets is
* is against sacked "cnt", otherwise it's against facked "cnt"
*/
@@ -2525,30 +2568,7 @@ static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit)
tcp_mark_head_lost(sk, sacked_upto);
}
- /* New heuristics: it is possible only after we switched
- * to restart timer each time when something is ACKed.
- * Hence, we can detect timed out packets during fast
- * retransmit without falling to slow start.
- */
- if (tcp_is_fack(tp) && tcp_head_timedout(sk)) {
- struct sk_buff *skb;
-
- skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint
- : tcp_write_queue_head(sk);
-
- tcp_for_write_queue_from(skb, sk) {
- if (skb == tcp_send_head(sk))
- break;
- if (!tcp_skb_timedout(sk, skb))
- break;
-
- tcp_skb_mark_lost(tp, skb);
- }
-
- tp->scoreboard_skb_hint = skb;
-
- tcp_verify_left_out(tp);
- }
+ tcp_timeout_skbs(sk);
}
/* CWND moderation, preventing bursts due to too big ACKs
@@ -2813,7 +2833,7 @@ static void tcp_mtup_probe_failed(struct sock *sk)
icsk->icsk_mtup.probe_size = 0;
}
-static void tcp_mtup_probe_success(struct sock *sk, struct sk_buff *skb)
+static void tcp_mtup_probe_success(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
@@ -2841,7 +2861,7 @@ void tcp_simple_retransmit(struct sock *sk)
const struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
- unsigned int mss = tcp_current_mss(sk, 0);
+ unsigned int mss = tcp_current_mss(sk);
u32 prior_lost = tp->lost_out;
tcp_for_write_queue(skb, sk) {
@@ -3178,7 +3198,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) {
struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
- u32 end_seq;
u32 acked_pcount;
u8 sacked = scb->sacked;
@@ -3193,16 +3212,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
break;
fully_acked = 0;
- end_seq = tp->snd_una;
} else {
acked_pcount = tcp_skb_pcount(skb);
- end_seq = scb->end_seq;
- }
-
- /* MTU probing checks */
- if (fully_acked && icsk->icsk_mtup.probe_size &&
- !after(tp->mtu_probe.probe_seq_end, scb->end_seq)) {
- tcp_mtup_probe_success(sk, skb);
}
if (sacked & TCPCB_RETRANS) {
@@ -3267,24 +3278,26 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
const struct tcp_congestion_ops *ca_ops
= inet_csk(sk)->icsk_ca_ops;
+ if (unlikely(icsk->icsk_mtup.probe_size &&
+ !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) {
+ tcp_mtup_probe_success(sk);
+ }
+
tcp_ack_update_rtt(sk, flag, seq_rtt);
tcp_rearm_rto(sk);
if (tcp_is_reno(tp)) {
tcp_remove_reno_sacks(sk, pkts_acked);
} else {
+ int delta;
+
/* Non-retransmitted hole got filled? That's reordering */
if (reord < prior_fackets)
tcp_update_reordering(sk, tp->fackets_out - reord, 0);
- /* No need to care for underflows here because
- * the lost_skb_hint gets NULLed if we're past it
- * (or something non-trivial happened)
- */
- if (tcp_is_fack(tp))
- tp->lost_cnt_hint -= pkts_acked;
- else
- tp->lost_cnt_hint -= prior_sacked - tp->sacked_out;
+ delta = tcp_is_fack(tp) ? pkts_acked :
+ prior_sacked - tp->sacked_out;
+ tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta);
}
tp->fackets_out -= min(pkts_acked, tp->fackets_out);
@@ -3396,7 +3409,7 @@ static int tcp_ack_update_window(struct sock *sk, struct sk_buff *skb, u32 ack,
if (tcp_may_update_window(tp, ack, ack_seq, nwin)) {
flag |= FLAG_WIN_UPDATE;
- tcp_update_wl(tp, ack, ack_seq);
+ tcp_update_wl(tp, ack_seq);
if (tp->snd_wnd != nwin) {
tp->snd_wnd = nwin;
@@ -3572,15 +3585,18 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
int prior_packets;
int frto_cwnd = 0;
- /* If the ack is newer than sent or older than previous acks
+ /* If the ack is older than previous acks
* then we can probably ignore it.
*/
- if (after(ack, tp->snd_nxt))
- goto uninteresting_ack;
-
if (before(ack, prior_snd_una))
goto old_ack;
+ /* If the ack includes data we haven't sent yet, discard
+ * this segment (RFC793 Section 3.9).
+ */
+ if (after(ack, tp->snd_nxt))
+ goto invalid_ack;
+
if (after(ack, prior_snd_una))
flag |= FLAG_SND_UNA_ADVANCED;
@@ -3601,7 +3617,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
* No more checks are required.
* Note, we use the fact that SND.UNA>=SND.WL2.
*/
- tcp_update_wl(tp, ack, ack_seq);
+ tcp_update_wl(tp, ack_seq);
tp->snd_una = ack;
flag |= FLAG_WIN_UPDATE;
@@ -3670,6 +3686,10 @@ no_queue:
tcp_ack_probe(sk);
return 1;
+invalid_ack:
+ SOCK_DEBUG(sk, "Ack %u after %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
+ return -1;
+
old_ack:
if (TCP_SKB_CB(skb)->sacked) {
tcp_sacktag_write_queue(sk, skb, prior_snd_una);
@@ -3677,8 +3697,7 @@ old_ack:
tcp_try_keep_open(sk);
}
-uninteresting_ack:
- SOCK_DEBUG(sk, "Ack %u out of %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
+ SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
return 0;
}
@@ -3866,8 +3885,7 @@ static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
* Not only, also it occurs for expired timestamps.
*/
- if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) >= 0 ||
- get_seconds() >= tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS)
+ if (tcp_paws_check(&tp->rx_opt, 0))
tcp_store_ts_recent(tp);
}
}
@@ -3919,9 +3937,9 @@ static inline int tcp_paws_discard(const struct sock *sk,
const struct sk_buff *skb)
{
const struct tcp_sock *tp = tcp_sk(sk);
- return ((s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) > TCP_PAWS_WINDOW &&
- get_seconds() < tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS &&
- !tcp_disordered_ack(sk, skb));
+
+ return !tcp_paws_check(&tp->rx_opt, TCP_PAWS_WINDOW) &&
+ !tcp_disordered_ack(sk, skb);
}
/* Check segment sequence number for validity.
@@ -4079,7 +4097,6 @@ static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
tp->rx_opt.dsack = 1;
tp->duplicate_sack[0].start_seq = seq;
tp->duplicate_sack[0].end_seq = end_seq;
- tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks + 1;
}
}
@@ -4134,8 +4151,6 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
* Decrease num_sacks.
*/
tp->rx_opt.num_sacks--;
- tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks +
- tp->rx_opt.dsack;
for (i = this_sack; i < tp->rx_opt.num_sacks; i++)
sp[i] = sp[i + 1];
continue;
@@ -4144,20 +4159,6 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
}
}
-static inline void tcp_sack_swap(struct tcp_sack_block *sack1,
- struct tcp_sack_block *sack2)
-{
- __u32 tmp;
-
- tmp = sack1->start_seq;
- sack1->start_seq = sack2->start_seq;
- sack2->start_seq = tmp;
-
- tmp = sack1->end_seq;
- sack1->end_seq = sack2->end_seq;
- sack2->end_seq = tmp;
-}
-
static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -4172,7 +4173,7 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
if (tcp_sack_extend(sp, seq, end_seq)) {
/* Rotate this_sack to the first one. */
for (; this_sack > 0; this_sack--, sp--)
- tcp_sack_swap(sp, sp - 1);
+ swap(*sp, *(sp - 1));
if (cur_sacks > 1)
tcp_sack_maybe_coalesce(tp);
return;
@@ -4198,7 +4199,6 @@ new_sack:
sp->start_seq = seq;
sp->end_seq = end_seq;
tp->rx_opt.num_sacks++;
- tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
}
/* RCV.NXT advances, some SACKs should be eaten. */
@@ -4212,7 +4212,6 @@ static void tcp_sack_remove(struct tcp_sock *tp)
/* Empty ofo queue, hence, all the SACKs are eaten. Clear. */
if (skb_queue_empty(&tp->out_of_order_queue)) {
tp->rx_opt.num_sacks = 0;
- tp->rx_opt.eff_sacks = tp->rx_opt.dsack;
return;
}
@@ -4233,11 +4232,7 @@ static void tcp_sack_remove(struct tcp_sock *tp)
this_sack++;
sp++;
}
- if (num_sacks != tp->rx_opt.num_sacks) {
- tp->rx_opt.num_sacks = num_sacks;
- tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks +
- tp->rx_opt.dsack;
- }
+ tp->rx_opt.num_sacks = num_sacks;
}
/* This one checks to see if we can put data from the
@@ -4313,10 +4308,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
TCP_ECN_accept_cwr(tp, skb);
- if (tp->rx_opt.dsack) {
- tp->rx_opt.dsack = 0;
- tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks;
- }
+ tp->rx_opt.dsack = 0;
/* Queue data for delivery to the user.
* Packets in sequence go to the receive queue.
@@ -4435,8 +4427,6 @@ drop:
/* Initial out of order segment, build 1 SACK. */
if (tcp_is_sack(tp)) {
tp->rx_opt.num_sacks = 1;
- tp->rx_opt.dsack = 0;
- tp->rx_opt.eff_sacks = 1;
tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq;
tp->selective_acks[0].end_seq =
TCP_SKB_CB(skb)->end_seq;
@@ -5157,7 +5147,8 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
*/
if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags &&
- TCP_SKB_CB(skb)->seq == tp->rcv_nxt) {
+ TCP_SKB_CB(skb)->seq == tp->rcv_nxt &&
+ !after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) {
int tcp_header_len = tp->tcp_header_len;
/* Timestamp header prediction: tcp_header_len
@@ -5310,8 +5301,8 @@ slow_path:
return -res;
step5:
- if (th->ack)
- tcp_ack(sk, skb, FLAG_SLOWPATH);
+ if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
+ goto discard;
tcp_rcv_rtt_measure_ts(sk, skb);
@@ -5409,7 +5400,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
* never scaled.
*/
tp->snd_wnd = ntohs(th->window);
- tcp_init_wl(tp, TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(skb)->seq);
+ tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
if (!tp->rx_opt.wscale_ok) {
tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0;
@@ -5510,7 +5501,7 @@ discard:
/* PAWS check. */
if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp &&
- tcp_paws_check(&tp->rx_opt, 0))
+ tcp_paws_reject(&tp->rx_opt, 0))
goto discard_and_undo;
if (th->syn) {
@@ -5648,7 +5639,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
/* step 5: check the ACK field */
if (th->ack) {
- int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH);
+ int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0;
switch (sk->sk_state) {
case TCP_SYN_RECV:
@@ -5670,8 +5661,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
tp->snd_wnd = ntohs(th->window) <<
tp->rx_opt.snd_wscale;
- tcp_init_wl(tp, TCP_SKB_CB(skb)->ack_seq,
- TCP_SKB_CB(skb)->seq);
+ tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
/* tcp_ack considers this ACK as duplicate
* and does not calculate rtt.
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index cf74c416831a..d0a314879d81 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1226,15 +1226,6 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
if (want_cookie && !tmp_opt.saw_tstamp)
tcp_clear_options(&tmp_opt);
- if (tmp_opt.saw_tstamp && !tmp_opt.rcv_tsval) {
- /* Some OSes (unknown ones, but I see them on web server, which
- * contains information interesting only for windows'
- * users) do not send their stamp in SYN. It is easy case.
- * We simply do not advertise TS support.
- */
- tmp_opt.saw_tstamp = 0;
- tmp_opt.tstamp_ok = 0;
- }
tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
tcp_openreq_init(req, &tmp_opt, skb);
@@ -2355,7 +2346,7 @@ struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
switch (skb->ip_summed) {
case CHECKSUM_COMPLETE:
- if (!tcp_v4_check(skb->len, iph->saddr, iph->daddr,
+ if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
skb->csum)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
break;
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index f67effbb102b..43bbba7926ee 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -107,7 +107,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
if (tmp_opt.saw_tstamp) {
tmp_opt.ts_recent = tcptw->tw_ts_recent;
tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
- paws_reject = tcp_paws_check(&tmp_opt, th->rst);
+ paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
}
}
@@ -399,7 +399,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
tcp_prequeue_init(newtp);
- tcp_init_wl(newtp, treq->snt_isn, treq->rcv_isn);
+ tcp_init_wl(newtp, treq->rcv_isn);
newtp->srtt = 0;
newtp->mdev = TCP_TIMEOUT_INIT;
@@ -434,9 +434,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
newtp->rx_opt.saw_tstamp = 0;
newtp->rx_opt.dsack = 0;
- newtp->rx_opt.eff_sacks = 0;
-
newtp->rx_opt.num_sacks = 0;
+
newtp->urg_data = 0;
if (sock_flag(newsk, SOCK_KEEPOPEN))
@@ -512,7 +511,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
* from another data.
*/
tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans);
- paws_reject = tcp_paws_check(&tmp_opt, th->rst);
+ paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
}
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index da2c3b8794f2..c1f259d2d33b 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -441,10 +441,7 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
*ptr++ = htonl(sp[this_sack].end_seq);
}
- if (tp->rx_opt.dsack) {
- tp->rx_opt.dsack = 0;
- tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks;
- }
+ tp->rx_opt.dsack = 0;
}
}
@@ -550,6 +547,7 @@ static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb,
struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL;
struct tcp_sock *tp = tcp_sk(sk);
unsigned size = 0;
+ unsigned int eff_sacks;
#ifdef CONFIG_TCP_MD5SIG
*md5 = tp->af_specific->md5_lookup(sk, sk);
@@ -568,10 +566,11 @@ static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb,
size += TCPOLEN_TSTAMP_ALIGNED;
}
- if (unlikely(tp->rx_opt.eff_sacks)) {
+ eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
+ if (unlikely(eff_sacks)) {
const unsigned remaining = MAX_TCP_OPTION_SPACE - size;
opts->num_sack_blocks =
- min_t(unsigned, tp->rx_opt.eff_sacks,
+ min_t(unsigned, eff_sacks,
(remaining - TCPOLEN_SACK_BASE_ALIGNED) /
TCPOLEN_SACK_PERBLOCK);
size += TCPOLEN_SACK_BASE_ALIGNED +
@@ -663,10 +662,14 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
th->urg_ptr = 0;
/* The urg_mode check is necessary during a below snd_una win probe */
- if (unlikely(tcp_urg_mode(tp) &&
- between(tp->snd_up, tcb->seq + 1, tcb->seq + 0xFFFF))) {
- th->urg_ptr = htons(tp->snd_up - tcb->seq);
- th->urg = 1;
+ if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) {
+ if (before(tp->snd_up, tcb->seq + 0x10000)) {
+ th->urg_ptr = htons(tp->snd_up - tcb->seq);
+ th->urg = 1;
+ } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
+ th->urg_ptr = 0xFFFF;
+ th->urg = 1;
+ }
}
tcp_options_write((__be32 *)(th + 1), tp, &opts, &md5_hash_location);
@@ -763,11 +766,10 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
struct sk_buff *buff;
int nsize, old_factor;
int nlen;
- u16 flags;
+ u8 flags;
BUG_ON(len > skb->len);
- tcp_clear_retrans_hints_partial(tp);
nsize = skb_headlen(skb) - len;
if (nsize < 0)
nsize = 0;
@@ -850,6 +852,12 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
tcp_verify_left_out(tp);
}
tcp_adjust_fackets_out(sk, skb, diff);
+
+ if (tp->lost_skb_hint &&
+ before(TCP_SKB_CB(skb)->seq,
+ TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
+ (tcp_is_fack(tp) || TCP_SKB_CB(skb)->sacked))
+ tp->lost_cnt_hint -= diff;
}
/* Link BUFF into the send queue. */
@@ -913,7 +921,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
* factor and mss.
*/
if (tcp_skb_pcount(skb) > 1)
- tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk, 1));
+ tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk));
return 0;
}
@@ -974,15 +982,6 @@ void tcp_mtup_init(struct sock *sk)
icsk->icsk_mtup.probe_size = 0;
}
-/* Bound MSS / TSO packet size with the half of the window */
-static int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
-{
- if (tp->max_window && pktsize > (tp->max_window >> 1))
- return max(tp->max_window >> 1, 68U - tp->tcp_header_len);
- else
- return pktsize;
-}
-
/* This function synchronize snd mss to current pmtu/exthdr set.
tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
@@ -1029,22 +1028,17 @@ unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
/* Compute the current effective MSS, taking SACKs and IP options,
* and even PMTU discovery events into account.
*/
-unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
+unsigned int tcp_current_mss(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct dst_entry *dst = __sk_dst_get(sk);
u32 mss_now;
- u16 xmit_size_goal;
- int doing_tso = 0;
unsigned header_len;
struct tcp_out_options opts;
struct tcp_md5sig_key *md5;
mss_now = tp->mss_cache;
- if (large_allowed && sk_can_gso(sk))
- doing_tso = 1;
-
if (dst) {
u32 mtu = dst_mtu(dst);
if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
@@ -1062,19 +1056,6 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
mss_now -= delta;
}
- xmit_size_goal = mss_now;
-
- if (doing_tso) {
- xmit_size_goal = ((sk->sk_gso_max_size - 1) -
- inet_csk(sk)->icsk_af_ops->net_header_len -
- inet_csk(sk)->icsk_ext_hdr_len -
- tp->tcp_header_len);
-
- xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal);
- xmit_size_goal -= (xmit_size_goal % mss_now);
- }
- tp->xmit_size_goal = xmit_size_goal;
-
return mss_now;
}
@@ -1256,7 +1237,7 @@ int tcp_may_send_now(struct sock *sk)
struct sk_buff *skb = tcp_send_head(sk);
return (skb &&
- tcp_snd_test(sk, skb, tcp_current_mss(sk, 1),
+ tcp_snd_test(sk, skb, tcp_current_mss(sk),
(tcp_skb_is_last(sk, skb) ?
tp->nonagle : TCP_NAGLE_PUSH)));
}
@@ -1273,7 +1254,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
{
struct sk_buff *buff;
int nlen = skb->len - len;
- u16 flags;
+ u8 flags;
/* All of a TSO frame must be composed of paged data. */
if (skb->len != skb->data_len)
@@ -1352,6 +1333,10 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
if (limit >= sk->sk_gso_max_size)
goto send_now;
+ /* Middle in queue won't get any more data, full sendable already? */
+ if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
+ goto send_now;
+
if (sysctl_tcp_tso_win_divisor) {
u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
@@ -1405,11 +1390,11 @@ static int tcp_mtu_probe(struct sock *sk)
icsk->icsk_mtup.probe_size ||
inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
tp->snd_cwnd < 11 ||
- tp->rx_opt.eff_sacks)
+ tp->rx_opt.num_sacks || tp->rx_opt.dsack)
return -1;
/* Very simple search strategy: just double the MSS. */
- mss_now = tcp_current_mss(sk, 0);
+ mss_now = tcp_current_mss(sk);
probe_size = 2 * tp->mss_cache;
size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) {
@@ -1754,11 +1739,9 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *next_skb = tcp_write_queue_next(sk, skb);
int skb_size, next_skb_size;
- u16 flags;
skb_size = skb->len;
next_skb_size = next_skb->len;
- flags = TCP_SKB_CB(skb)->flags;
BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
@@ -1778,9 +1761,8 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
/* Update sequence range on original skb. */
TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
- /* Merge over control information. */
- flags |= TCP_SKB_CB(next_skb)->flags; /* This moves PSH/FIN etc. over */
- TCP_SKB_CB(skb)->flags = flags;
+ /* Merge over control information. This moves PSH/FIN etc. over */
+ TCP_SKB_CB(skb)->flags |= TCP_SKB_CB(next_skb)->flags;
/* All done, get rid of second SKB and account for it so
* packet counting does not break.
@@ -1894,7 +1876,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
return -EHOSTUNREACH; /* Routing failure or similar. */
- cur_mss = tcp_current_mss(sk, 0);
+ cur_mss = tcp_current_mss(sk);
/* If receiver has shrunk his window, and skb is out of
* new window, do not retransmit it. The exception is the
@@ -1908,6 +1890,8 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
if (skb->len > cur_mss) {
if (tcp_fragment(sk, skb, cur_mss, cur_mss))
return -ENOMEM; /* We'll try again later. */
+ } else {
+ tcp_init_tso_segs(sk, skb, cur_mss);
}
tcp_retrans_try_collapse(sk, skb, cur_mss);
@@ -2061,7 +2045,7 @@ begin_fwd:
goto begin_fwd;
} else if (!(sacked & TCPCB_LOST)) {
- if (hole == NULL && !(sacked & TCPCB_SACKED_RETRANS))
+ if (hole == NULL && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
hole = skb;
continue;
@@ -2100,7 +2084,7 @@ void tcp_send_fin(struct sock *sk)
* unsent frames. But be careful about outgoing SACKS
* and IP options.
*/
- mss_now = tcp_current_mss(sk, 1);
+ mss_now = tcp_current_mss(sk);
if (tcp_send_head(sk) != NULL) {
TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN;
@@ -2325,7 +2309,7 @@ static void tcp_connect_init(struct sock *sk)
sk->sk_err = 0;
sock_reset_flag(sk, SOCK_DONE);
tp->snd_wnd = 0;
- tcp_init_wl(tp, tp->write_seq, 0);
+ tcp_init_wl(tp, 0);
tp->snd_una = tp->write_seq;
tp->snd_sml = tp->write_seq;
tp->snd_up = tp->write_seq;
@@ -2512,7 +2496,7 @@ int tcp_write_wakeup(struct sock *sk)
if ((skb = tcp_send_head(sk)) != NULL &&
before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
int err;
- unsigned int mss = tcp_current_mss(sk, 0);
+ unsigned int mss = tcp_current_mss(sk);
unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index 25524d4e372a..59f5b5e7c566 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -165,9 +165,10 @@ static int tcpprobe_sprint(char *tbuf, int n)
static ssize_t tcpprobe_read(struct file *file, char __user *buf,
size_t len, loff_t *ppos)
{
- int error = 0, cnt = 0;
+ int error = 0;
+ size_t cnt = 0;
- if (!buf || len < 0)
+ if (!buf)
return -EINVAL;
while (cnt < len) {
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c
index 4660b088a8ce..a76513779e2b 100644
--- a/net/ipv4/tcp_scalable.c
+++ b/net/ipv4/tcp_scalable.c
@@ -24,14 +24,8 @@ static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
if (tp->snd_cwnd <= tp->snd_ssthresh)
tcp_slow_start(tp);
- else {
- tp->snd_cwnd_cnt++;
- if (tp->snd_cwnd_cnt > min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)){
- if (tp->snd_cwnd < tp->snd_cwnd_clamp)
- tp->snd_cwnd++;
- tp->snd_cwnd_cnt = 0;
- }
- }
+ else
+ tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT));
}
static u32 tcp_scalable_ssthresh(struct sock *sk)
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 0170e914f1b0..b144a26359bc 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -328,19 +328,16 @@ static void tcp_retransmit_timer(struct sock *sk)
if (icsk->icsk_retransmits == 0) {
int mib_idx;
- if (icsk->icsk_ca_state == TCP_CA_Disorder ||
- icsk->icsk_ca_state == TCP_CA_Recovery) {
- if (tcp_is_sack(tp)) {
- if (icsk->icsk_ca_state == TCP_CA_Recovery)
- mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
- else
- mib_idx = LINUX_MIB_TCPSACKFAILURES;
- } else {
- if (icsk->icsk_ca_state == TCP_CA_Recovery)
- mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
- else
- mib_idx = LINUX_MIB_TCPRENOFAILURES;
- }
+ if (icsk->icsk_ca_state == TCP_CA_Disorder) {
+ if (tcp_is_sack(tp))
+ mib_idx = LINUX_MIB_TCPSACKFAILURES;
+ else
+ mib_idx = LINUX_MIB_TCPRENOFAILURES;
+ } else if (icsk->icsk_ca_state == TCP_CA_Recovery) {
+ if (tcp_is_sack(tp))
+ mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
+ else
+ mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
} else if (icsk->icsk_ca_state == TCP_CA_Loss) {
mib_idx = LINUX_MIB_TCPLOSSFAILURES;
} else {
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index d08b2e855c22..e9bbff746488 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -159,12 +159,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
/* In the "non-congestive state", increase cwnd
* every rtt.
*/
- if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
- if (tp->snd_cwnd < tp->snd_cwnd_clamp)
- tp->snd_cwnd++;
- tp->snd_cwnd_cnt = 0;
- } else
- tp->snd_cwnd_cnt++;
+ tcp_cong_avoid_ai(tp, tp->snd_cwnd);
} else {
/* In the "congestive state", increase cwnd
* every other rtt.
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
index 9ec843a9bbb2..66b6821b984e 100644
--- a/net/ipv4/tcp_yeah.c
+++ b/net/ipv4/tcp_yeah.c
@@ -94,14 +94,7 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
} else {
/* Reno */
-
- if (tp->snd_cwnd_cnt < tp->snd_cwnd)
- tp->snd_cwnd_cnt++;
-
- if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
- tp->snd_cwnd++;
- tp->snd_cwnd_cnt = 0;
- }
+ tcp_cong_avoid_ai(tp, tp->snd_cwnd);
}
/* The key players are v_vegas.beg_snd_una and v_beg_snd_nxt.
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index c47c989cb1fb..bda08a09357d 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -222,7 +222,7 @@ fail:
return error;
}
-static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
+int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
{
struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2);
@@ -596,6 +596,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
return -EOPNOTSUPP;
ipc.opt = NULL;
+ ipc.shtx.flags = 0;
if (up->pending) {
/*
@@ -643,6 +644,9 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
ipc.addr = inet->saddr;
ipc.oif = sk->sk_bound_dev_if;
+ err = sock_tx_timestamp(msg, sk, &ipc.shtx);
+ if (err)
+ return err;
if (msg->msg_controllen) {
err = ip_cmsg_send(sock_net(sk), msg, &ipc);
if (err)
@@ -1180,7 +1184,7 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
sk = sknext;
} while (sknext);
} else
- kfree_skb(skb);
+ consume_skb(skb);
spin_unlock(&hslot->lock);
return 0;
}
@@ -1614,7 +1618,8 @@ static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
} while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family));
if (!sk) {
- spin_unlock_bh(&state->udp_table->hash[state->bucket].lock);
+ if (state->bucket < UDP_HTABLE_SIZE)
+ spin_unlock_bh(&state->udp_table->hash[state->bucket].lock);
return udp_get_first(seq, state->bucket + 1);
}
return sk;
@@ -1632,6 +1637,9 @@ static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos)
static void *udp_seq_start(struct seq_file *seq, loff_t *pos)
{
+ struct udp_iter_state *state = seq->private;
+ state->bucket = UDP_HTABLE_SIZE;
+
return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN;
}
@@ -1815,6 +1823,7 @@ EXPORT_SYMBOL(udp_lib_getsockopt);
EXPORT_SYMBOL(udp_lib_setsockopt);
EXPORT_SYMBOL(udp_poll);
EXPORT_SYMBOL(udp_lib_get_port);
+EXPORT_SYMBOL(ipv4_rcv_saddr_equal);
#ifdef CONFIG_PROC_FS
EXPORT_SYMBOL(udp_proc_register);
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 2ad24ba31f9d..60d918c96a4f 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -241,7 +241,7 @@ static void xfrm4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
static struct dst_ops xfrm4_dst_ops = {
.family = AF_INET,
- .protocol = __constant_htons(ETH_P_IP),
+ .protocol = cpu_to_be16(ETH_P_IP),
.gc = xfrm4_garbage_collect,
.update_pmtu = xfrm4_update_pmtu,
.destroy = xfrm4_dst_destroy,