aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/Kconfig29
-rw-r--r--net/ipv4/Makefile3
-rw-r--r--net/ipv4/af_inet.c15
-rw-r--r--net/ipv4/bpfilter/sockopt.c3
-rw-r--r--net/ipv4/devinet.c27
-rw-r--r--net/ipv4/esp4_offload.c50
-rw-r--r--net/ipv4/fib_frontend.c89
-rw-r--r--net/ipv4/fib_lookup.h1
-rw-r--r--net/ipv4/fib_semantics.c988
-rw-r--r--net/ipv4/fib_trie.c33
-rw-r--r--net/ipv4/fou.c153
-rw-r--r--net/ipv4/inet_connection_sock.c4
-rw-r--r--net/ipv4/ip_forward.c2
-rw-r--r--net/ipv4/ip_output.c20
-rw-r--r--net/ipv4/ip_tunnel_core.c8
-rw-r--r--net/ipv4/ip_vti.c66
-rw-r--r--net/ipv4/ipmr.c21
-rw-r--r--net/ipv4/ipmr_base.c2
-rw-r--r--net/ipv4/netfilter/Kconfig20
-rw-r--r--net/ipv4/netfilter/Makefile2
-rw-r--r--net/ipv4/netfilter/ipt_MASQUERADE.c101
-rw-r--r--net/ipv4/netfilter/nft_chain_route_ipv4.c89
-rw-r--r--net/ipv4/route.c194
-rw-r--r--net/ipv4/syncookies.c17
-rw-r--r--net/ipv4/sysctl_net_ipv4.c9
-rw-r--r--net/ipv4/tcp.c61
-rw-r--r--net/ipv4/tcp_input.c111
-rw-r--r--net/ipv4/tcp_ipv4.c11
-rw-r--r--net/ipv4/tcp_metrics.c17
-rw-r--r--net/ipv4/tcp_minisocks.c5
-rw-r--r--net/ipv4/tcp_output.c19
-rw-r--r--net/ipv4/tcp_timer.c3
-rw-r--r--net/ipv4/udp.c19
-rw-r--r--net/ipv4/xfrm4_mode_beet.c155
-rw-r--r--net/ipv4/xfrm4_mode_transport.c114
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c152
-rw-r--r--net/ipv4/xfrm4_output.c27
-rw-r--r--net/ipv4/xfrm4_policy.c136
-rw-r--r--net/ipv4/xfrm4_protocol.c3
39 files changed, 1262 insertions, 1517 deletions
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 32cae39cdff6..8108e97d4285 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -304,7 +304,7 @@ config NET_IPVTI
tristate "Virtual (secure) IP: tunneling"
select INET_TUNNEL
select NET_IP_TUNNEL
- depends on INET_XFRM_MODE_TUNNEL
+ select XFRM
---help---
Tunneling means encapsulating data of one protocol type within
another protocol and sending it over a channel that understands the
@@ -396,33 +396,6 @@ config INET_TUNNEL
tristate
default n
-config INET_XFRM_MODE_TRANSPORT
- tristate "IP: IPsec transport mode"
- default y
- select XFRM
- ---help---
- Support for IPsec transport mode.
-
- If unsure, say Y.
-
-config INET_XFRM_MODE_TUNNEL
- tristate "IP: IPsec tunnel mode"
- default y
- select XFRM
- ---help---
- Support for IPsec tunnel mode.
-
- If unsure, say Y.
-
-config INET_XFRM_MODE_BEET
- tristate "IP: IPsec BEET mode"
- default y
- select XFRM
- ---help---
- Support for IPsec BEET mode.
-
- If unsure, say Y.
-
config INET_DIAG
tristate "INET: socket monitoring interface"
default y
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index 58629314eae9..000a61994c8f 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -37,10 +37,7 @@ obj-$(CONFIG_INET_ESP) += esp4.o
obj-$(CONFIG_INET_ESP_OFFLOAD) += esp4_offload.o
obj-$(CONFIG_INET_IPCOMP) += ipcomp.o
obj-$(CONFIG_INET_XFRM_TUNNEL) += xfrm4_tunnel.o
-obj-$(CONFIG_INET_XFRM_MODE_BEET) += xfrm4_mode_beet.o
obj-$(CONFIG_INET_TUNNEL) += tunnel4.o
-obj-$(CONFIG_INET_XFRM_MODE_TRANSPORT) += xfrm4_mode_transport.o
-obj-$(CONFIG_INET_XFRM_MODE_TUNNEL) += xfrm4_mode_tunnel.o
obj-$(CONFIG_IP_PNP) += ipconfig.o
obj-$(CONFIG_NETFILTER) += netfilter.o netfilter/
obj-$(CONFIG_INET_DIAG) += inet_diag.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index eab3ebde981e..5183a2daba64 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -136,6 +136,10 @@ void inet_sock_destruct(struct sock *sk)
struct inet_sock *inet = inet_sk(sk);
__skb_queue_purge(&sk->sk_receive_queue);
+ if (sk->sk_rx_skb_cache) {
+ __kfree_skb(sk->sk_rx_skb_cache);
+ sk->sk_rx_skb_cache = NULL;
+ }
__skb_queue_purge(&sk->sk_error_queue);
sk_mem_reclaim(sk);
@@ -156,7 +160,7 @@ void inet_sock_destruct(struct sock *sk)
WARN_ON(sk->sk_forward_alloc);
kfree(rcu_dereference_protected(inet->inet_opt, 1));
- dst_release(rcu_dereference_check(sk->sk_dst_cache, 1));
+ dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
dst_release(sk->sk_rx_dst);
sk_refcnt_debug_dec(sk);
}
@@ -911,12 +915,6 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
struct rtentry rt;
switch (cmd) {
- case SIOCGSTAMP:
- err = sock_get_timestamp(sk, (struct timeval __user *)arg);
- break;
- case SIOCGSTAMPNS:
- err = sock_get_timestampns(sk, (struct timespec __user *)arg);
- break;
case SIOCADDRT:
case SIOCDELRT:
if (copy_from_user(&rt, p, sizeof(struct rtentry)))
@@ -988,6 +986,7 @@ const struct proto_ops inet_stream_ops = {
.getname = inet_getname,
.poll = tcp_poll,
.ioctl = inet_ioctl,
+ .gettstamp = sock_gettstamp,
.listen = inet_listen,
.shutdown = inet_shutdown,
.setsockopt = sock_common_setsockopt,
@@ -1023,6 +1022,7 @@ const struct proto_ops inet_dgram_ops = {
.getname = inet_getname,
.poll = udp_poll,
.ioctl = inet_ioctl,
+ .gettstamp = sock_gettstamp,
.listen = sock_no_listen,
.shutdown = inet_shutdown,
.setsockopt = sock_common_setsockopt,
@@ -1055,6 +1055,7 @@ static const struct proto_ops inet_sockraw_ops = {
.getname = inet_getname,
.poll = datagram_poll,
.ioctl = inet_ioctl,
+ .gettstamp = sock_gettstamp,
.listen = sock_no_listen,
.shutdown = inet_shutdown,
.setsockopt = sock_common_setsockopt,
diff --git a/net/ipv4/bpfilter/sockopt.c b/net/ipv4/bpfilter/sockopt.c
index 1e976bb93d99..15427163a041 100644
--- a/net/ipv4/bpfilter/sockopt.c
+++ b/net/ipv4/bpfilter/sockopt.c
@@ -77,5 +77,4 @@ static int __init bpfilter_sockopt_init(void)
return 0;
}
-
-module_init(bpfilter_sockopt_init);
+device_initcall(bpfilter_sockopt_init);
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index eb514f312e6f..701c5d113a34 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -621,8 +621,8 @@ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
ASSERT_RTNL();
- err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy,
- extack);
+ err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
+ ifa_ipv4_policy, extack);
if (err < 0)
goto errout;
@@ -793,8 +793,8 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
struct in_device *in_dev;
int err;
- err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy,
- extack);
+ err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
+ ifa_ipv4_policy, extack);
if (err < 0)
goto errout;
@@ -1689,8 +1689,8 @@ static int inet_valid_dump_ifaddr_req(const struct nlmsghdr *nlh,
fillargs->flags |= NLM_F_DUMP_FILTERED;
}
- err = nlmsg_parse_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
- ifa_ipv4_policy, extack);
+ err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
+ ifa_ipv4_policy, extack);
if (err < 0)
return err;
@@ -1906,7 +1906,8 @@ static int inet_validate_link_af(const struct net_device *dev,
if (dev && !__in_dev_get_rcu(dev))
return -EAFNOSUPPORT;
- err = nla_parse_nested(tb, IFLA_INET_MAX, nla, inet_af_policy, NULL);
+ err = nla_parse_nested_deprecated(tb, IFLA_INET_MAX, nla,
+ inet_af_policy, NULL);
if (err < 0)
return err;
@@ -1934,7 +1935,7 @@ static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla)
if (!in_dev)
return -EAFNOSUPPORT;
- if (nla_parse_nested(tb, IFLA_INET_MAX, nla, NULL, NULL) < 0)
+ if (nla_parse_nested_deprecated(tb, IFLA_INET_MAX, nla, NULL, NULL) < 0)
BUG();
if (tb[IFLA_INET_CONF]) {
@@ -2076,11 +2077,13 @@ static int inet_netconf_valid_get_req(struct sk_buff *skb,
}
if (!netlink_strict_get_check(skb))
- return nlmsg_parse(nlh, sizeof(struct netconfmsg), tb,
- NETCONFA_MAX, devconf_ipv4_policy, extack);
+ return nlmsg_parse_deprecated(nlh, sizeof(struct netconfmsg),
+ tb, NETCONFA_MAX,
+ devconf_ipv4_policy, extack);
- err = nlmsg_parse_strict(nlh, sizeof(struct netconfmsg), tb,
- NETCONFA_MAX, devconf_ipv4_policy, extack);
+ err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct netconfmsg),
+ tb, NETCONFA_MAX,
+ devconf_ipv4_policy, extack);
if (err)
return err;
diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
index d3170a8001b2..8edcfa66d1e5 100644
--- a/net/ipv4/esp4_offload.c
+++ b/net/ipv4/esp4_offload.c
@@ -109,6 +109,44 @@ static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
xo->proto = proto;
}
+static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x,
+ struct sk_buff *skb,
+ netdev_features_t features)
+{
+ __skb_push(skb, skb->mac_len);
+ return skb_mac_gso_segment(skb, features);
+}
+
+static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x,
+ struct sk_buff *skb,
+ netdev_features_t features)
+{
+ const struct net_offload *ops;
+ struct sk_buff *segs = ERR_PTR(-EINVAL);
+ struct xfrm_offload *xo = xfrm_offload(skb);
+
+ skb->transport_header += x->props.header_len;
+ ops = rcu_dereference(inet_offloads[xo->proto]);
+ if (likely(ops && ops->callbacks.gso_segment))
+ segs = ops->callbacks.gso_segment(skb, features);
+
+ return segs;
+}
+
+static struct sk_buff *xfrm4_outer_mode_gso_segment(struct xfrm_state *x,
+ struct sk_buff *skb,
+ netdev_features_t features)
+{
+ switch (x->outer_mode.encap) {
+ case XFRM_MODE_TUNNEL:
+ return xfrm4_tunnel_gso_segment(x, skb, features);
+ case XFRM_MODE_TRANSPORT:
+ return xfrm4_transport_gso_segment(x, skb, features);
+ }
+
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
@@ -140,14 +178,16 @@ static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
skb->encap_hdr_csum = 1;
- if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev)
+ if ((!(skb->dev->gso_partial_features & NETIF_F_HW_ESP) &&
+ !(features & NETIF_F_HW_ESP)) || x->xso.dev != skb->dev)
esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
- else if (!(features & NETIF_F_HW_ESP_TX_CSUM))
+ else if (!(features & NETIF_F_HW_ESP_TX_CSUM) &&
+ !(skb->dev->gso_partial_features & NETIF_F_HW_ESP_TX_CSUM))
esp_features = features & ~NETIF_F_CSUM_MASK;
xo->flags |= XFRM_GSO_SEGMENT;
- return x->outer_mode->gso_segment(x, skb, esp_features);
+ return xfrm4_outer_mode_gso_segment(x, skb, esp_features);
}
static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
@@ -183,7 +223,9 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_
if (!xo)
return -EINVAL;
- if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev) {
+ if ((!(features & NETIF_F_HW_ESP) &&
+ !(skb->dev->gso_partial_features & NETIF_F_HW_ESP)) ||
+ x->xso.dev != skb->dev) {
xo->flags |= CRYPTO_FALLBACK;
hw_offload = false;
}
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index ed14ec245584..b298255f6fdb 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -307,7 +307,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
.flowi4_mark = vmark ? skb->mark : 0,
};
if (!fib_lookup(net, &fl4, &res, 0))
- return FIB_RES_PREFSRC(net, res);
+ return fib_result_prefsrc(net, &res);
} else {
scope = RT_SCOPE_LINK;
}
@@ -324,16 +324,16 @@ bool fib_info_nh_uses_dev(struct fib_info *fi, const struct net_device *dev)
for (ret = 0; ret < fi->fib_nhs; ret++) {
struct fib_nh *nh = &fi->fib_nh[ret];
- if (nh->nh_dev == dev) {
+ if (nh->fib_nh_dev == dev) {
dev_match = true;
break;
- } else if (l3mdev_master_ifindex_rcu(nh->nh_dev) == dev->ifindex) {
+ } else if (l3mdev_master_ifindex_rcu(nh->fib_nh_dev) == dev->ifindex) {
dev_match = true;
break;
}
}
#else
- if (fi->fib_nh[0].nh_dev == dev)
+ if (fi->fib_nh[0].fib_nh_dev == dev)
dev_match = true;
#endif
@@ -390,7 +390,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
dev_match = fib_info_nh_uses_dev(res.fi, dev);
if (dev_match) {
- ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST;
+ ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_HOST;
return ret;
}
if (no_addr)
@@ -402,7 +402,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
ret = 0;
if (fib_lookup(net, &fl4, &res, FIB_LOOKUP_IGNORE_LINKSTATE) == 0) {
if (res.type == RTN_UNICAST)
- ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST;
+ ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_HOST;
}
return ret;
@@ -558,7 +558,8 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
if (rt->rt_gateway.sa_family == AF_INET && addr) {
unsigned int addr_type;
- cfg->fc_gw = addr;
+ cfg->fc_gw4 = addr;
+ cfg->fc_gw_family = AF_INET;
addr_type = inet_addr_type_table(net, addr, cfg->fc_table);
if (rt->rt_flags & RTF_GATEWAY &&
addr_type == RTN_UNICAST)
@@ -568,7 +569,7 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
if (cmd == SIOCDELRT)
return 0;
- if (rt->rt_flags & RTF_GATEWAY && !cfg->fc_gw)
+ if (rt->rt_flags & RTF_GATEWAY && !cfg->fc_gw_family)
return -EINVAL;
if (cfg->fc_scope == RT_SCOPE_NOWHERE)
@@ -664,16 +665,61 @@ const struct nla_policy rtm_ipv4_policy[RTA_MAX + 1] = {
[RTA_DPORT] = { .type = NLA_U16 },
};
+int fib_gw_from_via(struct fib_config *cfg, struct nlattr *nla,
+ struct netlink_ext_ack *extack)
+{
+ struct rtvia *via;
+ int alen;
+
+ if (nla_len(nla) < offsetof(struct rtvia, rtvia_addr)) {
+ NL_SET_ERR_MSG(extack, "Invalid attribute length for RTA_VIA");
+ return -EINVAL;
+ }
+
+ via = nla_data(nla);
+ alen = nla_len(nla) - offsetof(struct rtvia, rtvia_addr);
+
+ switch (via->rtvia_family) {
+ case AF_INET:
+ if (alen != sizeof(__be32)) {
+ NL_SET_ERR_MSG(extack, "Invalid IPv4 address in RTA_VIA");
+ return -EINVAL;
+ }
+ cfg->fc_gw_family = AF_INET;
+ cfg->fc_gw4 = *((__be32 *)via->rtvia_addr);
+ break;
+ case AF_INET6:
+#ifdef CONFIG_IPV6
+ if (alen != sizeof(struct in6_addr)) {
+ NL_SET_ERR_MSG(extack, "Invalid IPv6 address in RTA_VIA");
+ return -EINVAL;
+ }
+ cfg->fc_gw_family = AF_INET6;
+ cfg->fc_gw6 = *((struct in6_addr *)via->rtvia_addr);
+#else
+ NL_SET_ERR_MSG(extack, "IPv6 support not enabled in kernel");
+ return -EINVAL;
+#endif
+ break;
+ default:
+ NL_SET_ERR_MSG(extack, "Unsupported address family in RTA_VIA");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
struct nlmsghdr *nlh, struct fib_config *cfg,
struct netlink_ext_ack *extack)
{
+ bool has_gw = false, has_via = false;
struct nlattr *attr;
int err, remaining;
struct rtmsg *rtm;
- err = nlmsg_validate(nlh, sizeof(*rtm), RTA_MAX, rtm_ipv4_policy,
- extack);
+ err = nlmsg_validate_deprecated(nlh, sizeof(*rtm), RTA_MAX,
+ rtm_ipv4_policy, extack);
if (err < 0)
goto errout;
@@ -708,12 +754,17 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
cfg->fc_oif = nla_get_u32(attr);
break;
case RTA_GATEWAY:
- cfg->fc_gw = nla_get_be32(attr);
+ has_gw = true;
+ cfg->fc_gw4 = nla_get_be32(attr);
+ if (cfg->fc_gw4)
+ cfg->fc_gw_family = AF_INET;
break;
case RTA_VIA:
- NL_SET_ERR_MSG(extack, "IPv4 does not support RTA_VIA attribute");
- err = -EINVAL;
- goto errout;
+ has_via = true;
+ err = fib_gw_from_via(cfg, attr, extack);
+ if (err)
+ goto errout;
+ break;
case RTA_PRIORITY:
cfg->fc_priority = nla_get_u32(attr);
break;
@@ -752,6 +803,12 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
}
}
+ if (has_gw && has_via) {
+ NL_SET_ERR_MSG(extack,
+ "Nexthop configuration can not contain both GATEWAY and VIA");
+ goto errout;
+ }
+
return 0;
errout:
return err;
@@ -839,8 +896,8 @@ int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
filter->rt_type = rtm->rtm_type;
filter->table_id = rtm->rtm_table;
- err = nlmsg_parse_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
- rtm_ipv4_policy, extack);
+ err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
+ rtm_ipv4_policy, extack);
if (err < 0)
return err;
diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h
index e6ff282bb7f4..7945f0534db7 100644
--- a/net/ipv4/fib_lookup.h
+++ b/net/ipv4/fib_lookup.h
@@ -45,6 +45,7 @@ static inline void fib_result_assign(struct fib_result *res,
{
/* we used to play games with refcounts, but we now use RCU */
res->fi = fi;
+ res->nhc = fib_info_nhc(fi, 0);
}
struct fib_prop {
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 8e185b5a2bf6..71c2165a2ce3 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -41,10 +41,12 @@
#include <net/tcp.h>
#include <net/sock.h>
#include <net/ip_fib.h>
+#include <net/ip6_fib.h>
#include <net/netlink.h>
-#include <net/nexthop.h>
+#include <net/rtnh.h>
#include <net/lwtunnel.h>
#include <net/fib_notifier.h>
+#include <net/addrconf.h>
#include "fib_lookup.h"
@@ -204,18 +206,34 @@ static void rt_fibinfo_free_cpus(struct rtable __rcu * __percpu *rtp)
free_percpu(rtp);
}
+void fib_nh_common_release(struct fib_nh_common *nhc)
+{
+ if (nhc->nhc_dev)
+ dev_put(nhc->nhc_dev);
+
+ lwtstate_put(nhc->nhc_lwtstate);
+}
+EXPORT_SYMBOL_GPL(fib_nh_common_release);
+
+void fib_nh_release(struct net *net, struct fib_nh *fib_nh)
+{
+#ifdef CONFIG_IP_ROUTE_CLASSID
+ if (fib_nh->nh_tclassid)
+ net->ipv4.fib_num_tclassid_users--;
+#endif
+ fib_nh_common_release(&fib_nh->nh_common);
+ free_nh_exceptions(fib_nh);
+ rt_fibinfo_free_cpus(fib_nh->nh_pcpu_rth_output);
+ rt_fibinfo_free(&fib_nh->nh_rth_input);
+}
+
/* Release a nexthop info record */
static void free_fib_info_rcu(struct rcu_head *head)
{
struct fib_info *fi = container_of(head, struct fib_info, rcu);
change_nexthops(fi) {
- if (nexthop_nh->nh_dev)
- dev_put(nexthop_nh->nh_dev);
- lwtstate_put(nexthop_nh->nh_lwtstate);
- free_nh_exceptions(nexthop_nh);
- rt_fibinfo_free_cpus(nexthop_nh->nh_pcpu_rth_output);
- rt_fibinfo_free(&nexthop_nh->nh_rth_input);
+ fib_nh_release(fi->fib_net, nexthop_nh);
} endfor_nexthops(fi);
ip_fib_metrics_put(fi->fib_metrics);
@@ -230,12 +248,7 @@ void free_fib_info(struct fib_info *fi)
return;
}
fib_info_cnt--;
-#ifdef CONFIG_IP_ROUTE_CLASSID
- change_nexthops(fi) {
- if (nexthop_nh->nh_tclassid)
- fi->fib_net->ipv4.fib_num_tclassid_users--;
- } endfor_nexthops(fi);
-#endif
+
call_rcu(&fi->rcu, free_fib_info_rcu);
}
EXPORT_SYMBOL_GPL(free_fib_info);
@@ -248,7 +261,7 @@ void fib_release_info(struct fib_info *fi)
if (fi->fib_prefsrc)
hlist_del(&fi->fib_lhash);
change_nexthops(fi) {
- if (!nexthop_nh->nh_dev)
+ if (!nexthop_nh->fib_nh_dev)
continue;
hlist_del(&nexthop_nh->nh_hash);
} endfor_nexthops(fi)
@@ -263,18 +276,27 @@ static inline int nh_comp(const struct fib_info *fi, const struct fib_info *ofi)
const struct fib_nh *onh = ofi->fib_nh;
for_nexthops(fi) {
- if (nh->nh_oif != onh->nh_oif ||
- nh->nh_gw != onh->nh_gw ||
- nh->nh_scope != onh->nh_scope ||
+ if (nh->fib_nh_oif != onh->fib_nh_oif ||
+ nh->fib_nh_gw_family != onh->fib_nh_gw_family ||
+ nh->fib_nh_scope != onh->fib_nh_scope ||
#ifdef CONFIG_IP_ROUTE_MULTIPATH
- nh->nh_weight != onh->nh_weight ||
+ nh->fib_nh_weight != onh->fib_nh_weight ||
#endif
#ifdef CONFIG_IP_ROUTE_CLASSID
nh->nh_tclassid != onh->nh_tclassid ||
#endif
- lwtunnel_cmp_encap(nh->nh_lwtstate, onh->nh_lwtstate) ||
- ((nh->nh_flags ^ onh->nh_flags) & ~RTNH_COMPARE_MASK))
+ lwtunnel_cmp_encap(nh->fib_nh_lws, onh->fib_nh_lws) ||
+ ((nh->fib_nh_flags ^ onh->fib_nh_flags) & ~RTNH_COMPARE_MASK))
+ return -1;
+
+ if (nh->fib_nh_gw_family == AF_INET &&
+ nh->fib_nh_gw4 != onh->fib_nh_gw4)
return -1;
+
+ if (nh->fib_nh_gw_family == AF_INET6 &&
+ ipv6_addr_cmp(&nh->fib_nh_gw6, &onh->fib_nh_gw6))
+ return -1;
+
onh++;
} endfor_nexthops(fi);
return 0;
@@ -298,7 +320,7 @@ static inline unsigned int fib_info_hashfn(const struct fib_info *fi)
val ^= (__force u32)fi->fib_prefsrc;
val ^= fi->fib_priority;
for_nexthops(fi) {
- val ^= fib_devindex_hashfn(nh->nh_oif);
+ val ^= fib_devindex_hashfn(nh->fib_nh_oif);
} endfor_nexthops(fi)
return (val ^ (val >> 7) ^ (val >> 12)) & mask;
@@ -347,9 +369,9 @@ int ip_fib_check_default(__be32 gw, struct net_device *dev)
hash = fib_devindex_hashfn(dev->ifindex);
head = &fib_info_devhash[hash];
hlist_for_each_entry(nh, head, nh_hash) {
- if (nh->nh_dev == dev &&
- nh->nh_gw == gw &&
- !(nh->nh_flags & RTNH_F_DEAD)) {
+ if (nh->fib_nh_dev == dev &&
+ nh->fib_nh_gw4 == gw &&
+ !(nh->fib_nh_flags & RTNH_F_DEAD)) {
spin_unlock(&fib_info_lock);
return 0;
}
@@ -384,10 +406,10 @@ static inline size_t fib_nlmsg_size(struct fib_info *fi)
/* grab encap info */
for_nexthops(fi) {
- if (nh->nh_lwtstate) {
+ if (nh->fib_nh_lws) {
/* RTA_ENCAP_TYPE */
nh_encapsize += lwtunnel_get_encap_size(
- nh->nh_lwtstate);
+ nh->fib_nh_lws);
/* RTA_ENCAP */
nh_encapsize += nla_total_size(2);
}
@@ -435,10 +457,18 @@ static int fib_detect_death(struct fib_info *fi, int order,
struct fib_info **last_resort, int *last_idx,
int dflt)
{
+ const struct fib_nh_common *nhc = fib_info_nhc(fi, 0);
struct neighbour *n;
int state = NUD_NONE;
- n = neigh_lookup(&arp_tbl, &fi->fib_nh[0].nh_gw, fi->fib_dev);
+ if (likely(nhc->nhc_gw_family == AF_INET))
+ n = neigh_lookup(&arp_tbl, &nhc->nhc_gw.ipv4, nhc->nhc_dev);
+ else if (nhc->nhc_gw_family == AF_INET6)
+ n = neigh_lookup(ipv6_stub->nd_tbl, &nhc->nhc_gw.ipv6,
+ nhc->nhc_dev);
+ else
+ n = NULL;
+
if (n) {
state = n->nud_state;
neigh_release(n);
@@ -457,6 +487,73 @@ static int fib_detect_death(struct fib_info *fi, int order,
return 1;
}
+int fib_nh_common_init(struct fib_nh_common *nhc, struct nlattr *encap,
+ u16 encap_type, void *cfg, gfp_t gfp_flags,
+ struct netlink_ext_ack *extack)
+{
+ if (encap) {
+ struct lwtunnel_state *lwtstate;
+ int err;
+
+ if (encap_type == LWTUNNEL_ENCAP_NONE) {
+ NL_SET_ERR_MSG(extack, "LWT encap type not specified");
+ return -EINVAL;
+ }
+ err = lwtunnel_build_state(encap_type, encap, nhc->nhc_family,
+ cfg, &lwtstate, extack);
+ if (err)
+ return err;
+
+ nhc->nhc_lwtstate = lwtstate_get(lwtstate);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fib_nh_common_init);
+
+int fib_nh_init(struct net *net, struct fib_nh *nh,
+ struct fib_config *cfg, int nh_weight,
+ struct netlink_ext_ack *extack)
+{
+ int err = -ENOMEM;
+
+ nh->fib_nh_family = AF_INET;
+
+ nh->nh_pcpu_rth_output = alloc_percpu(struct rtable __rcu *);
+ if (!nh->nh_pcpu_rth_output)
+ goto err_out;
+
+ err = fib_nh_common_init(&nh->nh_common, cfg->fc_encap,
+ cfg->fc_encap_type, cfg, GFP_KERNEL, extack);
+ if (err)
+ goto init_failure;
+
+ nh->fib_nh_oif = cfg->fc_oif;
+ nh->fib_nh_gw_family = cfg->fc_gw_family;
+ if (cfg->fc_gw_family == AF_INET)
+ nh->fib_nh_gw4 = cfg->fc_gw4;
+ else if (cfg->fc_gw_family == AF_INET6)
+ nh->fib_nh_gw6 = cfg->fc_gw6;
+
+ nh->fib_nh_flags = cfg->fc_flags;
+
+#ifdef CONFIG_IP_ROUTE_CLASSID
+ nh->nh_tclassid = cfg->fc_flow;
+ if (nh->nh_tclassid)
+ net->ipv4.fib_num_tclassid_users++;
+#endif
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
+ nh->fib_nh_weight = nh_weight;
+#endif
+ return 0;
+
+init_failure:
+ rt_fibinfo_free_cpus(nh->nh_pcpu_rth_output);
+ nh->nh_pcpu_rth_output = NULL;
+err_out:
+ return err;
+}
+
#ifdef CONFIG_IP_ROUTE_MULTIPATH
static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining,
@@ -483,11 +580,15 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
int remaining, struct fib_config *cfg,
struct netlink_ext_ack *extack)
{
+ struct net *net = fi->fib_net;
+ struct fib_config fib_cfg;
int ret;
change_nexthops(fi) {
int attrlen;
+ memset(&fib_cfg, 0, sizeof(fib_cfg));
+
if (!rtnh_ok(rtnh, remaining)) {
NL_SET_ERR_MSG(extack,
"Invalid nexthop configuration - extra data after nexthop");
@@ -500,56 +601,73 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
return -EINVAL;
}
- nexthop_nh->nh_flags =
- (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags;
- nexthop_nh->nh_oif = rtnh->rtnh_ifindex;
- nexthop_nh->nh_weight = rtnh->rtnh_hops + 1;
+ fib_cfg.fc_flags = (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags;
+ fib_cfg.fc_oif = rtnh->rtnh_ifindex;
attrlen = rtnh_attrlen(rtnh);
if (attrlen > 0) {
- struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
+ struct nlattr *nla, *nlav, *attrs = rtnh_attrs(rtnh);
nla = nla_find(attrs, attrlen, RTA_GATEWAY);
- nexthop_nh->nh_gw = nla ? nla_get_in_addr(nla) : 0;
-#ifdef CONFIG_IP_ROUTE_CLASSID
- nla = nla_find(attrs, attrlen, RTA_FLOW);
- nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0;
- if (nexthop_nh->nh_tclassid)
- fi->fib_net->ipv4.fib_num_tclassid_users++;
-#endif
- nla = nla_find(attrs, attrlen, RTA_ENCAP);
+ nlav = nla_find(attrs, attrlen, RTA_VIA);
+ if (nla && nlav) {
+ NL_SET_ERR_MSG(extack,
+ "Nexthop configuration can not contain both GATEWAY and VIA");
+ return -EINVAL;
+ }
if (nla) {
- struct lwtunnel_state *lwtstate;
- struct nlattr *nla_entype;
-
- nla_entype = nla_find(attrs, attrlen,
- RTA_ENCAP_TYPE);
- if (!nla_entype) {
- NL_SET_BAD_ATTR(extack, nla);
- NL_SET_ERR_MSG(extack,
- "Encap type is missing");
- goto err_inval;
- }
-
- ret = lwtunnel_build_state(nla_get_u16(
- nla_entype),
- nla, AF_INET, cfg,
- &lwtstate, extack);
+ fib_cfg.fc_gw4 = nla_get_in_addr(nla);
+ if (fib_cfg.fc_gw4)
+ fib_cfg.fc_gw_family = AF_INET;
+ } else if (nlav) {
+ ret = fib_gw_from_via(&fib_cfg, nlav, extack);
if (ret)
goto errout;
- nexthop_nh->nh_lwtstate =
- lwtstate_get(lwtstate);
}
+
+ nla = nla_find(attrs, attrlen, RTA_FLOW);
+ if (nla)
+ fib_cfg.fc_flow = nla_get_u32(nla);
+
+ fib_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
+ nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
+ if (nla)
+ fib_cfg.fc_encap_type = nla_get_u16(nla);
}
+ ret = fib_nh_init(net, nexthop_nh, &fib_cfg,
+ rtnh->rtnh_hops + 1, extack);
+ if (ret)
+ goto errout;
+
rtnh = rtnh_next(rtnh, &remaining);
} endfor_nexthops(fi);
- return 0;
-
-err_inval:
ret = -EINVAL;
-
+ if (cfg->fc_oif && fi->fib_nh->fib_nh_oif != cfg->fc_oif) {
+ NL_SET_ERR_MSG(extack,
+ "Nexthop device index does not match RTA_OIF");
+ goto errout;
+ }
+ if (cfg->fc_gw_family) {
+ if (cfg->fc_gw_family != fi->fib_nh->fib_nh_gw_family ||
+ (cfg->fc_gw_family == AF_INET &&
+ fi->fib_nh->fib_nh_gw4 != cfg->fc_gw4) ||
+ (cfg->fc_gw_family == AF_INET6 &&
+ ipv6_addr_cmp(&fi->fib_nh->fib_nh_gw6, &cfg->fc_gw6))) {
+ NL_SET_ERR_MSG(extack,
+ "Nexthop gateway does not match RTA_GATEWAY or RTA_VIA");
+ goto errout;
+ }
+ }
+#ifdef CONFIG_IP_ROUTE_CLASSID
+ if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow) {
+ NL_SET_ERR_MSG(extack,
+ "Nexthop class id does not match RTA_FLOW");
+ goto errout;
+ }
+#endif
+ ret = 0;
errout:
return ret;
}
@@ -558,49 +676,51 @@ static void fib_rebalance(struct fib_info *fi)
{
int total;
int w;
- struct in_device *in_dev;
if (fi->fib_nhs < 2)
return;
total = 0;
for_nexthops(fi) {
- if (nh->nh_flags & RTNH_F_DEAD)
+ if (nh->fib_nh_flags & RTNH_F_DEAD)
continue;
- in_dev = __in_dev_get_rtnl(nh->nh_dev);
-
- if (in_dev &&
- IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
- nh->nh_flags & RTNH_F_LINKDOWN)
+ if (ip_ignore_linkdown(nh->fib_nh_dev) &&
+ nh->fib_nh_flags & RTNH_F_LINKDOWN)
continue;
- total += nh->nh_weight;
+ total += nh->fib_nh_weight;
} endfor_nexthops(fi);
w = 0;
change_nexthops(fi) {
int upper_bound;
- in_dev = __in_dev_get_rtnl(nexthop_nh->nh_dev);
-
- if (nexthop_nh->nh_flags & RTNH_F_DEAD) {
+ if (nexthop_nh->fib_nh_flags & RTNH_F_DEAD) {
upper_bound = -1;
- } else if (in_dev &&
- IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
- nexthop_nh->nh_flags & RTNH_F_LINKDOWN) {
+ } else if (ip_ignore_linkdown(nexthop_nh->fib_nh_dev) &&
+ nexthop_nh->fib_nh_flags & RTNH_F_LINKDOWN) {
upper_bound = -1;
} else {
- w += nexthop_nh->nh_weight;
+ w += nexthop_nh->fib_nh_weight;
upper_bound = DIV_ROUND_CLOSEST_ULL((u64)w << 31,
total) - 1;
}
- atomic_set(&nexthop_nh->nh_upper_bound, upper_bound);
+ atomic_set(&nexthop_nh->fib_nh_upper_bound, upper_bound);
} endfor_nexthops(fi);
}
#else /* CONFIG_IP_ROUTE_MULTIPATH */
+static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
+ int remaining, struct fib_config *cfg,
+ struct netlink_ext_ack *extack)
+{
+ NL_SET_ERR_MSG(extack, "Multipath support not enabled in kernel");
+
+ return -EINVAL;
+}
+
#define fib_rebalance(fi) do { } while (0)
#endif /* CONFIG_IP_ROUTE_MULTIPATH */
@@ -620,7 +740,7 @@ static int fib_encap_match(u16 encap_type,
ret = lwtunnel_build_state(encap_type, encap, AF_INET,
cfg, &lwtstate, extack);
if (!ret) {
- result = lwtunnel_cmp_encap(lwtstate, nh->nh_lwtstate);
+ result = lwtunnel_cmp_encap(lwtstate, nh->fib_nh_lws);
lwtstate_free(lwtstate);
}
@@ -638,7 +758,7 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi,
if (cfg->fc_priority && cfg->fc_priority != fi->fib_priority)
return 1;
- if (cfg->fc_oif || cfg->fc_gw) {
+ if (cfg->fc_oif || cfg->fc_gw_family) {
if (cfg->fc_encap) {
if (fib_encap_match(cfg->fc_encap_type, cfg->fc_encap,
fi->fib_nh, cfg, extack))
@@ -649,10 +769,20 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi,
cfg->fc_flow != fi->fib_nh->nh_tclassid)
return 1;
#endif
- if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) &&
- (!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw))
- return 0;
- return 1;
+ if ((cfg->fc_oif && cfg->fc_oif != fi->fib_nh->fib_nh_oif) ||
+ (cfg->fc_gw_family &&
+ cfg->fc_gw_family != fi->fib_nh->fib_nh_gw_family))
+ return 1;
+
+ if (cfg->fc_gw_family == AF_INET &&
+ cfg->fc_gw4 != fi->fib_nh->fib_nh_gw4)
+ return 1;
+
+ if (cfg->fc_gw_family == AF_INET6 &&
+ ipv6_addr_cmp(&cfg->fc_gw6, &fi->fib_nh->fib_nh_gw6))
+ return 1;
+
+ return 0;
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH
@@ -668,16 +798,48 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi,
if (!rtnh_ok(rtnh, remaining))
return -EINVAL;
- if (rtnh->rtnh_ifindex && rtnh->rtnh_ifindex != nh->nh_oif)
+ if (rtnh->rtnh_ifindex && rtnh->rtnh_ifindex != nh->fib_nh_oif)
return 1;
attrlen = rtnh_attrlen(rtnh);
if (attrlen > 0) {
- struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
+ struct nlattr *nla, *nlav, *attrs = rtnh_attrs(rtnh);
nla = nla_find(attrs, attrlen, RTA_GATEWAY);
- if (nla && nla_get_in_addr(nla) != nh->nh_gw)
- return 1;
+ nlav = nla_find(attrs, attrlen, RTA_VIA);
+ if (nla && nlav) {
+ NL_SET_ERR_MSG(extack,
+ "Nexthop configuration can not contain both GATEWAY and VIA");
+ return -EINVAL;
+ }
+
+ if (nla) {
+ if (nh->fib_nh_gw_family != AF_INET ||
+ nla_get_in_addr(nla) != nh->fib_nh_gw4)
+ return 1;
+ } else if (nlav) {
+ struct fib_config cfg2;
+ int err;
+
+ err = fib_gw_from_via(&cfg2, nlav, extack);
+ if (err)
+ return err;
+
+ switch (nh->fib_nh_gw_family) {
+ case AF_INET:
+ if (cfg2.fc_gw_family != AF_INET ||
+ cfg2.fc_gw4 != nh->fib_nh_gw4)
+ return 1;
+ break;
+ case AF_INET6:
+ if (cfg2.fc_gw_family != AF_INET6 ||
+ ipv6_addr_cmp(&cfg2.fc_gw6,
+ &nh->fib_nh_gw6))
+ return 1;
+ break;
+ }
+ }
+
#ifdef CONFIG_IP_ROUTE_CLASSID
nla = nla_find(attrs, attrlen, RTA_FLOW);
if (nla && nla_get_u32(nla) != nh->nh_tclassid)
@@ -731,6 +893,30 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi)
return true;
}
+static int fib_check_nh_v6_gw(struct net *net, struct fib_nh *nh,
+ u32 table, struct netlink_ext_ack *extack)
+{
+ struct fib6_config cfg = {
+ .fc_table = table,
+ .fc_flags = nh->fib_nh_flags | RTF_GATEWAY,
+ .fc_ifindex = nh->fib_nh_oif,
+ .fc_gateway = nh->fib_nh_gw6,
+ };
+ struct fib6_nh fib6_nh = {};
+ int err;
+
+ err = ipv6_stub->fib6_nh_init(net, &fib6_nh, &cfg, GFP_KERNEL, extack);
+ if (!err) {
+ nh->fib_nh_dev = fib6_nh.fib_nh_dev;
+ dev_hold(nh->fib_nh_dev);
+ nh->fib_nh_oif = nh->fib_nh_dev->ifindex;
+ nh->fib_nh_scope = RT_SCOPE_LINK;
+
+ ipv6_stub->fib6_nh_release(&fib6_nh);
+ }
+
+ return err;
+}
/*
* Picture
@@ -775,133 +961,152 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi)
* |
* |-> {local prefix} (terminal node)
*/
-static int fib_check_nh(struct fib_config *cfg, struct fib_nh *nh,
- struct netlink_ext_ack *extack)
+static int fib_check_nh_v4_gw(struct net *net, struct fib_nh *nh, u32 table,
+ u8 scope, struct netlink_ext_ack *extack)
{
- int err = 0;
- struct net *net;
struct net_device *dev;
+ struct fib_result res;
+ int err;
- net = cfg->fc_nlinfo.nl_net;
- if (nh->nh_gw) {
- struct fib_result res;
-
- if (nh->nh_flags & RTNH_F_ONLINK) {
- unsigned int addr_type;
+ if (nh->fib_nh_flags & RTNH_F_ONLINK) {
+ unsigned int addr_type;
- if (cfg->fc_scope >= RT_SCOPE_LINK) {
- NL_SET_ERR_MSG(extack,
- "Nexthop has invalid scope");
- return -EINVAL;
- }
- dev = __dev_get_by_index(net, nh->nh_oif);
- if (!dev) {
- NL_SET_ERR_MSG(extack, "Nexthop device required for onlink");
- return -ENODEV;
- }
- if (!(dev->flags & IFF_UP)) {
- NL_SET_ERR_MSG(extack,
- "Nexthop device is not up");
- return -ENETDOWN;
- }
- addr_type = inet_addr_type_dev_table(net, dev, nh->nh_gw);
- if (addr_type != RTN_UNICAST) {
- NL_SET_ERR_MSG(extack,
- "Nexthop has invalid gateway");
- return -EINVAL;
- }
- if (!netif_carrier_ok(dev))
- nh->nh_flags |= RTNH_F_LINKDOWN;
- nh->nh_dev = dev;
- dev_hold(dev);
- nh->nh_scope = RT_SCOPE_LINK;
- return 0;
+ if (scope >= RT_SCOPE_LINK) {
+ NL_SET_ERR_MSG(extack, "Nexthop has invalid scope");
+ return -EINVAL;
}
- rcu_read_lock();
- {
- struct fib_table *tbl = NULL;
- struct flowi4 fl4 = {
- .daddr = nh->nh_gw,
- .flowi4_scope = cfg->fc_scope + 1,
- .flowi4_oif = nh->nh_oif,
- .flowi4_iif = LOOPBACK_IFINDEX,
- };
-
- /* It is not necessary, but requires a bit of thinking */
- if (fl4.flowi4_scope < RT_SCOPE_LINK)
- fl4.flowi4_scope = RT_SCOPE_LINK;
-
- if (cfg->fc_table)
- tbl = fib_get_table(net, cfg->fc_table);
-
- if (tbl)
- err = fib_table_lookup(tbl, &fl4, &res,
- FIB_LOOKUP_IGNORE_LINKSTATE |
- FIB_LOOKUP_NOREF);
-
- /* on error or if no table given do full lookup. This
- * is needed for example when nexthops are in the local
- * table rather than the given table
- */
- if (!tbl || err) {
- err = fib_lookup(net, &fl4, &res,
- FIB_LOOKUP_IGNORE_LINKSTATE);
- }
-
- if (err) {
- NL_SET_ERR_MSG(extack,
- "Nexthop has invalid gateway");
- rcu_read_unlock();
- return err;
- }
+ dev = __dev_get_by_index(net, nh->fib_nh_oif);
+ if (!dev) {
+ NL_SET_ERR_MSG(extack, "Nexthop device required for onlink");
+ return -ENODEV;
}
- err = -EINVAL;
- if (res.type != RTN_UNICAST && res.type != RTN_LOCAL) {
- NL_SET_ERR_MSG(extack, "Nexthop has invalid gateway");
- goto out;
+ if (!(dev->flags & IFF_UP)) {
+ NL_SET_ERR_MSG(extack, "Nexthop device is not up");
+ return -ENETDOWN;
}
- nh->nh_scope = res.scope;
- nh->nh_oif = FIB_RES_OIF(res);
- nh->nh_dev = dev = FIB_RES_DEV(res);
- if (!dev) {
- NL_SET_ERR_MSG(extack,
- "No egress device for nexthop gateway");
- goto out;
+ addr_type = inet_addr_type_dev_table(net, dev, nh->fib_nh_gw4);
+ if (addr_type != RTN_UNICAST) {
+ NL_SET_ERR_MSG(extack, "Nexthop has invalid gateway");
+ return -EINVAL;
}
- dev_hold(dev);
if (!netif_carrier_ok(dev))
- nh->nh_flags |= RTNH_F_LINKDOWN;
- err = (dev->flags & IFF_UP) ? 0 : -ENETDOWN;
- } else {
- struct in_device *in_dev;
-
- if (nh->nh_flags & (RTNH_F_PERVASIVE | RTNH_F_ONLINK)) {
- NL_SET_ERR_MSG(extack,
- "Invalid flags for nexthop - PERVASIVE and ONLINK can not be set");
- return -EINVAL;
+ nh->fib_nh_flags |= RTNH_F_LINKDOWN;
+ nh->fib_nh_dev = dev;
+ dev_hold(dev);
+ nh->fib_nh_scope = RT_SCOPE_LINK;
+ return 0;
+ }
+ rcu_read_lock();
+ {
+ struct fib_table *tbl = NULL;
+ struct flowi4 fl4 = {
+ .daddr = nh->fib_nh_gw4,
+ .flowi4_scope = scope + 1,
+ .flowi4_oif = nh->fib_nh_oif,
+ .flowi4_iif = LOOPBACK_IFINDEX,
+ };
+
+ /* It is not necessary, but requires a bit of thinking */
+ if (fl4.flowi4_scope < RT_SCOPE_LINK)
+ fl4.flowi4_scope = RT_SCOPE_LINK;
+
+ if (table)
+ tbl = fib_get_table(net, table);
+
+ if (tbl)
+ err = fib_table_lookup(tbl, &fl4, &res,
+ FIB_LOOKUP_IGNORE_LINKSTATE |
+ FIB_LOOKUP_NOREF);
+
+ /* on error or if no table given do full lookup. This
+ * is needed for example when nexthops are in the local
+ * table rather than the given table
+ */
+ if (!tbl || err) {
+ err = fib_lookup(net, &fl4, &res,
+ FIB_LOOKUP_IGNORE_LINKSTATE);
}
- rcu_read_lock();
- err = -ENODEV;
- in_dev = inetdev_by_index(net, nh->nh_oif);
- if (!in_dev)
- goto out;
- err = -ENETDOWN;
- if (!(in_dev->dev->flags & IFF_UP)) {
- NL_SET_ERR_MSG(extack, "Device for nexthop is not up");
+
+ if (err) {
+ NL_SET_ERR_MSG(extack, "Nexthop has invalid gateway");
goto out;
}
- nh->nh_dev = in_dev->dev;
- dev_hold(nh->nh_dev);
- nh->nh_scope = RT_SCOPE_HOST;
- if (!netif_carrier_ok(nh->nh_dev))
- nh->nh_flags |= RTNH_F_LINKDOWN;
- err = 0;
}
+
+ err = -EINVAL;
+ if (res.type != RTN_UNICAST && res.type != RTN_LOCAL) {
+ NL_SET_ERR_MSG(extack, "Nexthop has invalid gateway");
+ goto out;
+ }
+ nh->fib_nh_scope = res.scope;
+ nh->fib_nh_oif = FIB_RES_OIF(res);
+ nh->fib_nh_dev = dev = FIB_RES_DEV(res);
+ if (!dev) {
+ NL_SET_ERR_MSG(extack,
+ "No egress device for nexthop gateway");
+ goto out;
+ }
+ dev_hold(dev);
+ if (!netif_carrier_ok(dev))
+ nh->fib_nh_flags |= RTNH_F_LINKDOWN;
+ err = (dev->flags & IFF_UP) ? 0 : -ENETDOWN;
+out:
+ rcu_read_unlock();
+ return err;
+}
+
+static int fib_check_nh_nongw(struct net *net, struct fib_nh *nh,
+ struct netlink_ext_ack *extack)
+{
+ struct in_device *in_dev;
+ int err;
+
+ if (nh->fib_nh_flags & (RTNH_F_PERVASIVE | RTNH_F_ONLINK)) {
+ NL_SET_ERR_MSG(extack,
+ "Invalid flags for nexthop - PERVASIVE and ONLINK can not be set");
+ return -EINVAL;
+ }
+
+ rcu_read_lock();
+
+ err = -ENODEV;
+ in_dev = inetdev_by_index(net, nh->fib_nh_oif);
+ if (!in_dev)
+ goto out;
+ err = -ENETDOWN;
+ if (!(in_dev->dev->flags & IFF_UP)) {
+ NL_SET_ERR_MSG(extack, "Device for nexthop is not up");
+ goto out;
+ }
+
+ nh->fib_nh_dev = in_dev->dev;
+ dev_hold(nh->fib_nh_dev);
+ nh->fib_nh_scope = RT_SCOPE_HOST;
+ if (!netif_carrier_ok(nh->fib_nh_dev))
+ nh->fib_nh_flags |= RTNH_F_LINKDOWN;
+ err = 0;
out:
rcu_read_unlock();
return err;
}
+static int fib_check_nh(struct fib_config *cfg, struct fib_nh *nh,
+ struct netlink_ext_ack *extack)
+{
+ struct net *net = cfg->fc_nlinfo.nl_net;
+ u32 table = cfg->fc_table;
+ int err;
+
+ if (nh->fib_nh_gw_family == AF_INET)
+ err = fib_check_nh_v4_gw(net, nh, table, cfg->fc_scope, extack);
+ else if (nh->fib_nh_gw_family == AF_INET6)
+ err = fib_check_nh_v6_gw(net, nh, table, extack);
+ else
+ err = fib_check_nh_nongw(net, nh, extack);
+
+ return err;
+}
+
static inline unsigned int fib_laddr_hashfn(__be32 val)
{
unsigned int mask = (fib_info_hash_size - 1);
@@ -986,14 +1191,29 @@ static void fib_info_hash_move(struct hlist_head *new_info_hash,
__be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
{
- nh->nh_saddr = inet_select_addr(nh->nh_dev,
- nh->nh_gw,
+ nh->nh_saddr = inet_select_addr(nh->fib_nh_dev,
+ nh->fib_nh_gw4,
nh->nh_parent->fib_scope);
nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
return nh->nh_saddr;
}
+__be32 fib_result_prefsrc(struct net *net, struct fib_result *res)
+{
+ struct fib_nh_common *nhc = res->nhc;
+ struct fib_nh *nh;
+
+ if (res->fi->fib_prefsrc)
+ return res->fi->fib_prefsrc;
+
+ nh = container_of(nhc, struct fib_nh, nh_common);
+ if (nh->nh_saddr_genid == atomic_read(&net->ipv4.dev_addr_genid))
+ return nh->nh_saddr;
+
+ return fib_info_update_nh_saddr(net, nh);
+}
+
static bool fib_valid_prefsrc(struct fib_config *cfg, __be32 fib_prefsrc)
{
if (cfg->fc_type != RTN_LOCAL || !cfg->fc_dst ||
@@ -1096,72 +1316,18 @@ struct fib_info *fib_create_info(struct fib_config *cfg,
fi->fib_nhs = nhs;
change_nexthops(fi) {
nexthop_nh->nh_parent = fi;
- nexthop_nh->nh_pcpu_rth_output = alloc_percpu(struct rtable __rcu *);
- if (!nexthop_nh->nh_pcpu_rth_output)
- goto failure;
} endfor_nexthops(fi)
- if (cfg->fc_mp) {
-#ifdef CONFIG_IP_ROUTE_MULTIPATH
+ if (cfg->fc_mp)
err = fib_get_nhs(fi, cfg->fc_mp, cfg->fc_mp_len, cfg, extack);
- if (err != 0)
- goto failure;
- if (cfg->fc_oif && fi->fib_nh->nh_oif != cfg->fc_oif) {
- NL_SET_ERR_MSG(extack,
- "Nexthop device index does not match RTA_OIF");
- goto err_inval;
- }
- if (cfg->fc_gw && fi->fib_nh->nh_gw != cfg->fc_gw) {
- NL_SET_ERR_MSG(extack,
- "Nexthop gateway does not match RTA_GATEWAY");
- goto err_inval;
- }
-#ifdef CONFIG_IP_ROUTE_CLASSID
- if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow) {
- NL_SET_ERR_MSG(extack,
- "Nexthop class id does not match RTA_FLOW");
- goto err_inval;
- }
-#endif
-#else
- NL_SET_ERR_MSG(extack,
- "Multipath support not enabled in kernel");
- goto err_inval;
-#endif
- } else {
- struct fib_nh *nh = fi->fib_nh;
-
- if (cfg->fc_encap) {
- struct lwtunnel_state *lwtstate;
-
- if (cfg->fc_encap_type == LWTUNNEL_ENCAP_NONE) {
- NL_SET_ERR_MSG(extack,
- "LWT encap type not specified");
- goto err_inval;
- }
- err = lwtunnel_build_state(cfg->fc_encap_type,
- cfg->fc_encap, AF_INET, cfg,
- &lwtstate, extack);
- if (err)
- goto failure;
+ else
+ err = fib_nh_init(net, fi->fib_nh, cfg, 1, extack);
- nh->nh_lwtstate = lwtstate_get(lwtstate);
- }
- nh->nh_oif = cfg->fc_oif;
- nh->nh_gw = cfg->fc_gw;
- nh->nh_flags = cfg->fc_flags;
-#ifdef CONFIG_IP_ROUTE_CLASSID
- nh->nh_tclassid = cfg->fc_flow;
- if (nh->nh_tclassid)
- fi->fib_net->ipv4.fib_num_tclassid_users++;
-#endif
-#ifdef CONFIG_IP_ROUTE_MULTIPATH
- nh->nh_weight = 1;
-#endif
- }
+ if (err != 0)
+ goto failure;
if (fib_props[cfg->fc_type].error) {
- if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp) {
+ if (cfg->fc_gw_family || cfg->fc_oif || cfg->fc_mp) {
NL_SET_ERR_MSG(extack,
"Gateway, device and multipath can not be specified for this route type");
goto err_inval;
@@ -1195,15 +1361,15 @@ struct fib_info *fib_create_info(struct fib_config *cfg,
"Route with host scope can not have multiple nexthops");
goto err_inval;
}
- if (nh->nh_gw) {
+ if (nh->fib_nh_gw_family) {
NL_SET_ERR_MSG(extack,
"Route with host scope can not have a gateway");
goto err_inval;
}
- nh->nh_scope = RT_SCOPE_NOWHERE;
- nh->nh_dev = dev_get_by_index(net, fi->fib_nh->nh_oif);
+ nh->fib_nh_scope = RT_SCOPE_NOWHERE;
+ nh->fib_nh_dev = dev_get_by_index(net, fi->fib_nh->fib_nh_oif);
err = -ENODEV;
- if (!nh->nh_dev)
+ if (!nh->fib_nh_dev)
goto failure;
} else {
int linkdown = 0;
@@ -1212,7 +1378,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg,
err = fib_check_nh(cfg, nexthop_nh, extack);
if (err != 0)
goto failure;
- if (nexthop_nh->nh_flags & RTNH_F_LINKDOWN)
+ if (nexthop_nh->fib_nh_flags & RTNH_F_LINKDOWN)
linkdown++;
} endfor_nexthops(fi)
if (linkdown == fi->fib_nhs)
@@ -1226,6 +1392,8 @@ struct fib_info *fib_create_info(struct fib_config *cfg,
change_nexthops(fi) {
fib_info_update_nh_saddr(net, nexthop_nh);
+ if (nexthop_nh->fib_nh_gw_family == AF_INET6)
+ fi->fib_nh_is_v6 = true;
} endfor_nexthops(fi)
fib_rebalance(fi);
@@ -1254,9 +1422,9 @@ link_it:
struct hlist_head *head;
unsigned int hash;
- if (!nexthop_nh->nh_dev)
+ if (!nexthop_nh->fib_nh_dev)
continue;
- hash = fib_devindex_hashfn(nexthop_nh->nh_dev->ifindex);
+ hash = fib_devindex_hashfn(nexthop_nh->fib_nh_dev->ifindex);
head = &fib_info_devhash[hash];
hlist_add_head(&nexthop_nh->nh_hash, head);
} endfor_nexthops(fi)
@@ -1275,6 +1443,141 @@ failure:
return ERR_PTR(err);
}
+int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nhc,
+ unsigned char *flags, bool skip_oif)
+{
+ if (nhc->nhc_flags & RTNH_F_DEAD)
+ *flags |= RTNH_F_DEAD;
+
+ if (nhc->nhc_flags & RTNH_F_LINKDOWN) {
+ *flags |= RTNH_F_LINKDOWN;
+
+ rcu_read_lock();
+ switch (nhc->nhc_family) {
+ case AF_INET:
+ if (ip_ignore_linkdown(nhc->nhc_dev))
+ *flags |= RTNH_F_DEAD;
+ break;
+ case AF_INET6:
+ if (ip6_ignore_linkdown(nhc->nhc_dev))
+ *flags |= RTNH_F_DEAD;
+ break;
+ }
+ rcu_read_unlock();
+ }
+
+ switch (nhc->nhc_gw_family) {
+ case AF_INET:
+ if (nla_put_in_addr(skb, RTA_GATEWAY, nhc->nhc_gw.ipv4))
+ goto nla_put_failure;
+ break;
+ case AF_INET6:
+ /* if gateway family does not match nexthop family
+ * gateway is encoded as RTA_VIA
+ */
+ if (nhc->nhc_gw_family != nhc->nhc_family) {
+ int alen = sizeof(struct in6_addr);
+ struct nlattr *nla;
+ struct rtvia *via;
+
+ nla = nla_reserve(skb, RTA_VIA, alen + 2);
+ if (!nla)
+ goto nla_put_failure;
+
+ via = nla_data(nla);
+ via->rtvia_family = AF_INET6;
+ memcpy(via->rtvia_addr, &nhc->nhc_gw.ipv6, alen);
+ } else if (nla_put_in6_addr(skb, RTA_GATEWAY,
+ &nhc->nhc_gw.ipv6) < 0) {
+ goto nla_put_failure;
+ }
+ break;
+ }
+
+ *flags |= (nhc->nhc_flags & RTNH_F_ONLINK);
+ if (nhc->nhc_flags & RTNH_F_OFFLOAD)
+ *flags |= RTNH_F_OFFLOAD;
+
+ if (!skip_oif && nhc->nhc_dev &&
+ nla_put_u32(skb, RTA_OIF, nhc->nhc_dev->ifindex))
+ goto nla_put_failure;
+
+ if (nhc->nhc_lwtstate &&
+ lwtunnel_fill_encap(skb, nhc->nhc_lwtstate,
+ RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+EXPORT_SYMBOL_GPL(fib_nexthop_info);
+
+#if IS_ENABLED(CONFIG_IP_ROUTE_MULTIPATH) || IS_ENABLED(CONFIG_IPV6)
+int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nhc,
+ int nh_weight)
+{
+ const struct net_device *dev = nhc->nhc_dev;
+ struct rtnexthop *rtnh;
+ unsigned char flags = 0;
+
+ rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
+ if (!rtnh)
+ goto nla_put_failure;
+
+ rtnh->rtnh_hops = nh_weight - 1;
+ rtnh->rtnh_ifindex = dev ? dev->ifindex : 0;
+
+ if (fib_nexthop_info(skb, nhc, &flags, true) < 0)
+ goto nla_put_failure;
+
+ rtnh->rtnh_flags = flags;
+
+ /* length of rtnetlink header + attributes */
+ rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+EXPORT_SYMBOL_GPL(fib_add_nexthop);
+#endif
+
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
+static int fib_add_multipath(struct sk_buff *skb, struct fib_info *fi)
+{
+ struct nlattr *mp;
+
+ mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
+ if (!mp)
+ goto nla_put_failure;
+
+ for_nexthops(fi) {
+ if (fib_add_nexthop(skb, &nh->nh_common, nh->fib_nh_weight) < 0)
+ goto nla_put_failure;
+#ifdef CONFIG_IP_ROUTE_CLASSID
+ if (nh->nh_tclassid &&
+ nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
+ goto nla_put_failure;
+#endif
+ } endfor_nexthops(fi);
+
+ nla_nest_end(skb, mp);
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+#else
+static int fib_add_multipath(struct sk_buff *skb, struct fib_info *fi)
+{
+ return 0;
+}
+#endif
+
int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
u32 tb_id, u8 type, __be32 dst, int dst_len, u8 tos,
struct fib_info *fi, unsigned int flags)
@@ -1315,80 +1618,23 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
nla_put_in_addr(skb, RTA_PREFSRC, fi->fib_prefsrc))
goto nla_put_failure;
if (fi->fib_nhs == 1) {
- if (fi->fib_nh->nh_gw &&
- nla_put_in_addr(skb, RTA_GATEWAY, fi->fib_nh->nh_gw))
- goto nla_put_failure;
- if (fi->fib_nh->nh_oif &&
- nla_put_u32(skb, RTA_OIF, fi->fib_nh->nh_oif))
+ struct fib_nh *nh = &fi->fib_nh[0];
+ unsigned char flags = 0;
+
+ if (fib_nexthop_info(skb, &nh->nh_common, &flags, false) < 0)
goto nla_put_failure;
- if (fi->fib_nh->nh_flags & RTNH_F_LINKDOWN) {
- struct in_device *in_dev;
-
- rcu_read_lock();
- in_dev = __in_dev_get_rcu(fi->fib_nh->nh_dev);
- if (in_dev &&
- IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev))
- rtm->rtm_flags |= RTNH_F_DEAD;
- rcu_read_unlock();
- }
- if (fi->fib_nh->nh_flags & RTNH_F_OFFLOAD)
- rtm->rtm_flags |= RTNH_F_OFFLOAD;
+
+ rtm->rtm_flags = flags;
#ifdef CONFIG_IP_ROUTE_CLASSID
- if (fi->fib_nh[0].nh_tclassid &&
- nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid))
+ if (nh->nh_tclassid &&
+ nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
goto nla_put_failure;
#endif
- if (fi->fib_nh->nh_lwtstate &&
- lwtunnel_fill_encap(skb, fi->fib_nh->nh_lwtstate) < 0)
+ } else {
+ if (fib_add_multipath(skb, fi) < 0)
goto nla_put_failure;
}
-#ifdef CONFIG_IP_ROUTE_MULTIPATH
- if (fi->fib_nhs > 1) {
- struct rtnexthop *rtnh;
- struct nlattr *mp;
- mp = nla_nest_start(skb, RTA_MULTIPATH);
- if (!mp)
- goto nla_put_failure;
-
- for_nexthops(fi) {
- rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
- if (!rtnh)
- goto nla_put_failure;
-
- rtnh->rtnh_flags = nh->nh_flags & 0xFF;
- if (nh->nh_flags & RTNH_F_LINKDOWN) {
- struct in_device *in_dev;
-
- rcu_read_lock();
- in_dev = __in_dev_get_rcu(nh->nh_dev);
- if (in_dev &&
- IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev))
- rtnh->rtnh_flags |= RTNH_F_DEAD;
- rcu_read_unlock();
- }
- rtnh->rtnh_hops = nh->nh_weight - 1;
- rtnh->rtnh_ifindex = nh->nh_oif;
-
- if (nh->nh_gw &&
- nla_put_in_addr(skb, RTA_GATEWAY, nh->nh_gw))
- goto nla_put_failure;
-#ifdef CONFIG_IP_ROUTE_CLASSID
- if (nh->nh_tclassid &&
- nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
- goto nla_put_failure;
-#endif
- if (nh->nh_lwtstate &&
- lwtunnel_fill_encap(skb, nh->nh_lwtstate) < 0)
- goto nla_put_failure;
-
- /* length of rtnetlink header + attributes */
- rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh;
- } endfor_nexthops(fi);
-
- nla_nest_end(skb, mp);
- }
-#endif
nlmsg_end(skb, nlh);
return 0;
@@ -1427,28 +1673,26 @@ int fib_sync_down_addr(struct net_device *dev, __be32 local)
return ret;
}
-static int call_fib_nh_notifiers(struct fib_nh *fib_nh,
+static int call_fib_nh_notifiers(struct fib_nh *nh,
enum fib_event_type event_type)
{
- struct in_device *in_dev = __in_dev_get_rtnl(fib_nh->nh_dev);
+ bool ignore_link_down = ip_ignore_linkdown(nh->fib_nh_dev);
struct fib_nh_notifier_info info = {
- .fib_nh = fib_nh,
+ .fib_nh = nh,
};
switch (event_type) {
case FIB_EVENT_NH_ADD:
- if (fib_nh->nh_flags & RTNH_F_DEAD)
+ if (nh->fib_nh_flags & RTNH_F_DEAD)
break;
- if (IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
- fib_nh->nh_flags & RTNH_F_LINKDOWN)
+ if (ignore_link_down && nh->fib_nh_flags & RTNH_F_LINKDOWN)
break;
- return call_fib4_notifiers(dev_net(fib_nh->nh_dev), event_type,
+ return call_fib4_notifiers(dev_net(nh->fib_nh_dev), event_type,
&info.info);
case FIB_EVENT_NH_DEL:
- if ((in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
- fib_nh->nh_flags & RTNH_F_LINKDOWN) ||
- (fib_nh->nh_flags & RTNH_F_DEAD))
- return call_fib4_notifiers(dev_net(fib_nh->nh_dev),
+ if ((ignore_link_down && nh->fib_nh_flags & RTNH_F_LINKDOWN) ||
+ (nh->fib_nh_flags & RTNH_F_DEAD))
+ return call_fib4_notifiers(dev_net(nh->fib_nh_dev),
event_type, &info.info);
default:
break;
@@ -1502,7 +1746,7 @@ void fib_sync_mtu(struct net_device *dev, u32 orig_mtu)
struct fib_nh *nh;
hlist_for_each_entry(nh, head, nh_hash) {
- if (nh->nh_dev == dev)
+ if (nh->fib_nh_dev == dev)
nh_update_mtu(nh, dev->mtu, orig_mtu);
}
}
@@ -1530,22 +1774,22 @@ int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force)
int dead;
BUG_ON(!fi->fib_nhs);
- if (nh->nh_dev != dev || fi == prev_fi)
+ if (nh->fib_nh_dev != dev || fi == prev_fi)
continue;
prev_fi = fi;
dead = 0;
change_nexthops(fi) {
- if (nexthop_nh->nh_flags & RTNH_F_DEAD)
+ if (nexthop_nh->fib_nh_flags & RTNH_F_DEAD)
dead++;
- else if (nexthop_nh->nh_dev == dev &&
- nexthop_nh->nh_scope != scope) {
+ else if (nexthop_nh->fib_nh_dev == dev &&
+ nexthop_nh->fib_nh_scope != scope) {
switch (event) {
case NETDEV_DOWN:
case NETDEV_UNREGISTER:
- nexthop_nh->nh_flags |= RTNH_F_DEAD;
+ nexthop_nh->fib_nh_flags |= RTNH_F_DEAD;
/* fall through */
case NETDEV_CHANGE:
- nexthop_nh->nh_flags |= RTNH_F_LINKDOWN;
+ nexthop_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
break;
}
call_fib_nh_notifiers(nexthop_nh,
@@ -1554,7 +1798,7 @@ int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force)
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH
if (event == NETDEV_UNREGISTER &&
- nexthop_nh->nh_dev == dev) {
+ nexthop_nh->fib_nh_dev == dev) {
dead = fi->fib_nhs;
break;
}
@@ -1614,8 +1858,8 @@ static void fib_select_default(const struct flowi4 *flp, struct fib_result *res)
if (next_fi->fib_scope != res->scope ||
fa->fa_type != RTN_UNICAST)
continue;
- if (!next_fi->fib_nh[0].nh_gw ||
- next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
+ if (!next_fi->fib_nh[0].fib_nh_gw4 ||
+ next_fi->fib_nh[0].fib_nh_scope != RT_SCOPE_LINK)
continue;
fib_alias_accessed(fa);
@@ -1658,7 +1902,7 @@ out:
* Dead device goes up. We wake up dead nexthops.
* It takes sense only on multipath routes.
*/
-int fib_sync_up(struct net_device *dev, unsigned int nh_flags)
+int fib_sync_up(struct net_device *dev, unsigned char nh_flags)
{
struct fib_info *prev_fi;
unsigned int hash;
@@ -1686,24 +1930,24 @@ int fib_sync_up(struct net_device *dev, unsigned int nh_flags)
int alive;
BUG_ON(!fi->fib_nhs);
- if (nh->nh_dev != dev || fi == prev_fi)
+ if (nh->fib_nh_dev != dev || fi == prev_fi)
continue;
prev_fi = fi;
alive = 0;
change_nexthops(fi) {
- if (!(nexthop_nh->nh_flags & nh_flags)) {
+ if (!(nexthop_nh->fib_nh_flags & nh_flags)) {
alive++;
continue;
}
- if (!nexthop_nh->nh_dev ||
- !(nexthop_nh->nh_dev->flags & IFF_UP))
+ if (!nexthop_nh->fib_nh_dev ||
+ !(nexthop_nh->fib_nh_dev->flags & IFF_UP))
continue;
- if (nexthop_nh->nh_dev != dev ||
+ if (nexthop_nh->fib_nh_dev != dev ||
!__in_dev_get_rtnl(dev))
continue;
alive++;
- nexthop_nh->nh_flags &= ~nh_flags;
+ nexthop_nh->fib_nh_flags &= ~nh_flags;
call_fib_nh_notifiers(nexthop_nh, FIB_EVENT_NH_ADD);
} endfor_nexthops(fi)
@@ -1723,13 +1967,19 @@ static bool fib_good_nh(const struct fib_nh *nh)
{
int state = NUD_REACHABLE;
- if (nh->nh_scope == RT_SCOPE_LINK) {
+ if (nh->fib_nh_scope == RT_SCOPE_LINK) {
struct neighbour *n;
rcu_read_lock_bh();
- n = __ipv4_neigh_lookup_noref(nh->nh_dev,
- (__force u32)nh->nh_gw);
+ if (likely(nh->fib_nh_gw_family == AF_INET))
+ n = __ipv4_neigh_lookup_noref(nh->fib_nh_dev,
+ (__force u32)nh->fib_nh_gw4);
+ else if (nh->fib_nh_gw_family == AF_INET6)
+ n = __ipv6_neigh_lookup_noref_stub(nh->fib_nh_dev,
+ &nh->fib_nh_gw6);
+ else
+ n = NULL;
if (n)
state = n->nud_state;
@@ -1745,20 +1995,22 @@ void fib_select_multipath(struct fib_result *res, int hash)
struct net *net = fi->fib_net;
bool first = false;
- for_nexthops(fi) {
+ change_nexthops(fi) {
if (net->ipv4.sysctl_fib_multipath_use_neigh) {
- if (!fib_good_nh(nh))
+ if (!fib_good_nh(nexthop_nh))
continue;
if (!first) {
res->nh_sel = nhsel;
+ res->nhc = &nexthop_nh->nh_common;
first = true;
}
}
- if (hash > atomic_read(&nh->nh_upper_bound))
+ if (hash > atomic_read(&nexthop_nh->fib_nh_upper_bound))
continue;
res->nh_sel = nhsel;
+ res->nhc = &nexthop_nh->nh_common;
return;
} endfor_nexthops(fi);
}
@@ -1785,5 +2037,5 @@ void fib_select_path(struct net *net, struct fib_result *res,
check_saddr:
if (!fl4->saddr)
- fl4->saddr = FIB_RES_PREFSRC(net, *res);
+ fl4->saddr = fib_result_prefsrc(net, res);
}
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index a573e37e0615..334f723bdf80 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -183,14 +183,16 @@ struct trie {
};
static struct key_vector *resize(struct trie *t, struct key_vector *tn);
-static size_t tnode_free_size;
+static unsigned int tnode_free_size;
/*
- * synchronize_rcu after call_rcu for that many pages; it should be especially
- * useful before resizing the root node with PREEMPT_NONE configs; the value was
- * obtained experimentally, aiming to avoid visible slowdown.
+ * synchronize_rcu after call_rcu for outstanding dirty memory; it should be
+ * especially useful before resizing the root node with PREEMPT_NONE configs;
+ * the value was obtained experimentally, aiming to avoid visible slowdown.
*/
-static const int sync_pages = 128;
+unsigned int sysctl_fib_sync_mem = 512 * 1024;
+unsigned int sysctl_fib_sync_mem_min = 64 * 1024;
+unsigned int sysctl_fib_sync_mem_max = 64 * 1024 * 1024;
static struct kmem_cache *fn_alias_kmem __ro_after_init;
static struct kmem_cache *trie_leaf_kmem __ro_after_init;
@@ -504,7 +506,7 @@ static void tnode_free(struct key_vector *tn)
tn = container_of(head, struct tnode, rcu)->kv;
}
- if (tnode_free_size >= PAGE_SIZE * sync_pages) {
+ if (tnode_free_size >= sysctl_fib_sync_mem) {
tnode_free_size = 0;
synchronize_rcu();
}
@@ -1468,19 +1470,17 @@ found:
if (fi->fib_flags & RTNH_F_DEAD)
continue;
for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
- const struct fib_nh *nh = &fi->fib_nh[nhsel];
- struct in_device *in_dev = __in_dev_get_rcu(nh->nh_dev);
+ struct fib_nh_common *nhc = fib_info_nhc(fi, nhsel);
- if (nh->nh_flags & RTNH_F_DEAD)
+ if (nhc->nhc_flags & RTNH_F_DEAD)
continue;
- if (in_dev &&
- IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
- nh->nh_flags & RTNH_F_LINKDOWN &&
+ if (ip_ignore_linkdown(nhc->nhc_dev) &&
+ nhc->nhc_flags & RTNH_F_LINKDOWN &&
!(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE))
continue;
if (!(flp->flowi4_flags & FLOWI_FLAG_SKIP_NH_OIF)) {
if (flp->flowi4_oif &&
- flp->flowi4_oif != nh->nh_oif)
+ flp->flowi4_oif != nhc->nhc_oif)
continue;
}
@@ -1490,6 +1490,7 @@ found:
res->prefix = htonl(n->key);
res->prefixlen = KEYLENGTH - fa->fa_slen;
res->nh_sel = nhsel;
+ res->nhc = nhc;
res->type = fa->fa_type;
res->scope = fi->fib_scope;
res->fi = fi;
@@ -1498,7 +1499,7 @@ found:
#ifdef CONFIG_IP_FIB_TRIE_STATS
this_cpu_inc(stats->semantic_match_passed);
#endif
- trace_fib_table_lookup(tb->tb_id, flp, nh, err);
+ trace_fib_table_lookup(tb->tb_id, flp, nhc, err);
return err;
}
@@ -2651,7 +2652,7 @@ static unsigned int fib_flag_trans(int type, __be32 mask, const struct fib_info
if (type == RTN_UNREACHABLE || type == RTN_PROHIBIT)
flags = RTF_REJECT;
- if (fi && fi->fib_nh->nh_gw)
+ if (fi && fi->fib_nh->fib_nh_gw4)
flags |= RTF_GATEWAY;
if (mask == htonl(0xFFFFFFFF))
flags |= RTF_HOST;
@@ -2702,7 +2703,7 @@ static int fib_route_seq_show(struct seq_file *seq, void *v)
"%d\t%08X\t%d\t%u\t%u",
fi->fib_dev ? fi->fib_dev->name : "*",
prefix,
- fi->fib_nh->nh_gw, flags, 0, 0,
+ fi->fib_nh->fib_nh_gw4, flags, 0, 0,
fi->fib_priority,
mask,
(fi->fib_advmss ?
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 12ce6c526d72..ca95051317ed 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -137,7 +137,7 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
break;
case 1: {
- /* Direct encasulation of IPv4 or IPv6 */
+ /* Direct encapsulation of IPv4 or IPv6 */
int prot;
@@ -171,9 +171,7 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
/* guehdr may change after pull */
guehdr = (struct guehdr *)&udp_hdr(skb)[1];
- hdrlen = sizeof(struct guehdr) + optlen;
-
- if (guehdr->version != 0 || validate_gue_flags(guehdr, optlen))
+ if (validate_gue_flags(guehdr, optlen))
goto drop;
hdrlen = sizeof(struct guehdr) + optlen;
@@ -501,15 +499,45 @@ out_unlock:
return err;
}
-static int fou_add_to_port_list(struct net *net, struct fou *fou)
+static bool fou_cfg_cmp(struct fou *fou, struct fou_cfg *cfg)
+{
+ struct sock *sk = fou->sock->sk;
+ struct udp_port_cfg *udp_cfg = &cfg->udp_config;
+
+ if (fou->family != udp_cfg->family ||
+ fou->port != udp_cfg->local_udp_port ||
+ sk->sk_dport != udp_cfg->peer_udp_port ||
+ sk->sk_bound_dev_if != udp_cfg->bind_ifindex)
+ return false;
+
+ if (fou->family == AF_INET) {
+ if (sk->sk_rcv_saddr != udp_cfg->local_ip.s_addr ||
+ sk->sk_daddr != udp_cfg->peer_ip.s_addr)
+ return false;
+ else
+ return true;
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ if (ipv6_addr_cmp(&sk->sk_v6_rcv_saddr, &udp_cfg->local_ip6) ||
+ ipv6_addr_cmp(&sk->sk_v6_daddr, &udp_cfg->peer_ip6))
+ return false;
+ else
+ return true;
+#endif
+ }
+
+ return false;
+}
+
+static int fou_add_to_port_list(struct net *net, struct fou *fou,
+ struct fou_cfg *cfg)
{
struct fou_net *fn = net_generic(net, fou_net_id);
struct fou *fout;
mutex_lock(&fn->fou_lock);
list_for_each_entry(fout, &fn->fou_list, list) {
- if (fou->port == fout->port &&
- fou->family == fout->family) {
+ if (fou_cfg_cmp(fout, cfg)) {
mutex_unlock(&fn->fou_lock);
return -EALREADY;
}
@@ -587,7 +615,7 @@ static int fou_create(struct net *net, struct fou_cfg *cfg,
sk->sk_allocation = GFP_ATOMIC;
- err = fou_add_to_port_list(net, fou);
+ err = fou_add_to_port_list(net, fou, cfg);
if (err)
goto error;
@@ -607,14 +635,12 @@ error:
static int fou_destroy(struct net *net, struct fou_cfg *cfg)
{
struct fou_net *fn = net_generic(net, fou_net_id);
- __be16 port = cfg->udp_config.local_udp_port;
- u8 family = cfg->udp_config.family;
int err = -EINVAL;
struct fou *fou;
mutex_lock(&fn->fou_lock);
list_for_each_entry(fou, &fn->fou_list, list) {
- if (fou->port == port && fou->family == family) {
+ if (fou_cfg_cmp(fou, cfg)) {
fou_release(fou);
err = 0;
break;
@@ -628,16 +654,27 @@ static int fou_destroy(struct net *net, struct fou_cfg *cfg)
static struct genl_family fou_nl_family;
static const struct nla_policy fou_nl_policy[FOU_ATTR_MAX + 1] = {
- [FOU_ATTR_PORT] = { .type = NLA_U16, },
- [FOU_ATTR_AF] = { .type = NLA_U8, },
- [FOU_ATTR_IPPROTO] = { .type = NLA_U8, },
- [FOU_ATTR_TYPE] = { .type = NLA_U8, },
- [FOU_ATTR_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG, },
+ [FOU_ATTR_PORT] = { .type = NLA_U16, },
+ [FOU_ATTR_AF] = { .type = NLA_U8, },
+ [FOU_ATTR_IPPROTO] = { .type = NLA_U8, },
+ [FOU_ATTR_TYPE] = { .type = NLA_U8, },
+ [FOU_ATTR_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG, },
+ [FOU_ATTR_LOCAL_V4] = { .type = NLA_U32, },
+ [FOU_ATTR_PEER_V4] = { .type = NLA_U32, },
+ [FOU_ATTR_LOCAL_V6] = { .type = sizeof(struct in6_addr), },
+ [FOU_ATTR_PEER_V6] = { .type = sizeof(struct in6_addr), },
+ [FOU_ATTR_PEER_PORT] = { .type = NLA_U16, },
+ [FOU_ATTR_IFINDEX] = { .type = NLA_S32, },
};
static int parse_nl_config(struct genl_info *info,
struct fou_cfg *cfg)
{
+ bool has_local = false, has_peer = false;
+ struct nlattr *attr;
+ int ifindex;
+ __be16 port;
+
memset(cfg, 0, sizeof(*cfg));
cfg->udp_config.family = AF_INET;
@@ -659,8 +696,7 @@ static int parse_nl_config(struct genl_info *info,
}
if (info->attrs[FOU_ATTR_PORT]) {
- __be16 port = nla_get_be16(info->attrs[FOU_ATTR_PORT]);
-
+ port = nla_get_be16(info->attrs[FOU_ATTR_PORT]);
cfg->udp_config.local_udp_port = port;
}
@@ -673,6 +709,52 @@ static int parse_nl_config(struct genl_info *info,
if (info->attrs[FOU_ATTR_REMCSUM_NOPARTIAL])
cfg->flags |= FOU_F_REMCSUM_NOPARTIAL;
+ if (cfg->udp_config.family == AF_INET) {
+ if (info->attrs[FOU_ATTR_LOCAL_V4]) {
+ attr = info->attrs[FOU_ATTR_LOCAL_V4];
+ cfg->udp_config.local_ip.s_addr = nla_get_in_addr(attr);
+ has_local = true;
+ }
+
+ if (info->attrs[FOU_ATTR_PEER_V4]) {
+ attr = info->attrs[FOU_ATTR_PEER_V4];
+ cfg->udp_config.peer_ip.s_addr = nla_get_in_addr(attr);
+ has_peer = true;
+ }
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ if (info->attrs[FOU_ATTR_LOCAL_V6]) {
+ attr = info->attrs[FOU_ATTR_LOCAL_V6];
+ cfg->udp_config.local_ip6 = nla_get_in6_addr(attr);
+ has_local = true;
+ }
+
+ if (info->attrs[FOU_ATTR_PEER_V6]) {
+ attr = info->attrs[FOU_ATTR_PEER_V6];
+ cfg->udp_config.peer_ip6 = nla_get_in6_addr(attr);
+ has_peer = true;
+ }
+#endif
+ }
+
+ if (has_peer) {
+ if (info->attrs[FOU_ATTR_PEER_PORT]) {
+ port = nla_get_be16(info->attrs[FOU_ATTR_PEER_PORT]);
+ cfg->udp_config.peer_udp_port = port;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ if (info->attrs[FOU_ATTR_IFINDEX]) {
+ if (!has_local)
+ return -EINVAL;
+
+ ifindex = nla_get_s32(info->attrs[FOU_ATTR_IFINDEX]);
+
+ cfg->udp_config.bind_ifindex = ifindex;
+ }
+
return 0;
}
@@ -704,15 +786,37 @@ static int fou_nl_cmd_rm_port(struct sk_buff *skb, struct genl_info *info)
static int fou_fill_info(struct fou *fou, struct sk_buff *msg)
{
+ struct sock *sk = fou->sock->sk;
+
if (nla_put_u8(msg, FOU_ATTR_AF, fou->sock->sk->sk_family) ||
nla_put_be16(msg, FOU_ATTR_PORT, fou->port) ||
+ nla_put_be16(msg, FOU_ATTR_PEER_PORT, sk->sk_dport) ||
nla_put_u8(msg, FOU_ATTR_IPPROTO, fou->protocol) ||
- nla_put_u8(msg, FOU_ATTR_TYPE, fou->type))
+ nla_put_u8(msg, FOU_ATTR_TYPE, fou->type) ||
+ nla_put_s32(msg, FOU_ATTR_IFINDEX, sk->sk_bound_dev_if))
return -1;
if (fou->flags & FOU_F_REMCSUM_NOPARTIAL)
if (nla_put_flag(msg, FOU_ATTR_REMCSUM_NOPARTIAL))
return -1;
+
+ if (fou->sock->sk->sk_family == AF_INET) {
+ if (nla_put_in_addr(msg, FOU_ATTR_LOCAL_V4, sk->sk_rcv_saddr))
+ return -1;
+
+ if (nla_put_in_addr(msg, FOU_ATTR_PEER_V4, sk->sk_daddr))
+ return -1;
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ if (nla_put_in6_addr(msg, FOU_ATTR_LOCAL_V6,
+ &sk->sk_v6_rcv_saddr))
+ return -1;
+
+ if (nla_put_in6_addr(msg, FOU_ATTR_PEER_V6, &sk->sk_v6_daddr))
+ return -1;
+#endif
+ }
+
return 0;
}
@@ -765,7 +869,7 @@ static int fou_nl_cmd_get_port(struct sk_buff *skb, struct genl_info *info)
ret = -ESRCH;
mutex_lock(&fn->fou_lock);
list_for_each_entry(fout, &fn->fou_list, list) {
- if (port == fout->port && family == fout->family) {
+ if (fou_cfg_cmp(fout, &cfg)) {
ret = fou_dump_info(fout, info->snd_portid,
info->snd_seq, 0, msg,
info->genlhdr->cmd);
@@ -809,21 +913,21 @@ static int fou_nl_dump(struct sk_buff *skb, struct netlink_callback *cb)
static const struct genl_ops fou_nl_ops[] = {
{
.cmd = FOU_CMD_ADD,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = fou_nl_cmd_add_port,
- .policy = fou_nl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = FOU_CMD_DEL,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = fou_nl_cmd_rm_port,
- .policy = fou_nl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = FOU_CMD_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = fou_nl_cmd_get_port,
.dumpit = fou_nl_dump,
- .policy = fou_nl_policy,
},
};
@@ -832,6 +936,7 @@ static struct genl_family fou_nl_family __ro_after_init = {
.name = FOU_GENL_NAME,
.version = FOU_GENL_VERSION,
.maxattr = FOU_ATTR_MAX,
+ .policy = fou_nl_policy,
.netnsok = true,
.module = THIS_MODULE,
.ops = fou_nl_ops,
@@ -1035,7 +1140,7 @@ static int gue_err(struct sk_buff *skb, u32 info)
case 0: /* Full GUE header present */
break;
case 1: {
- /* Direct encasulation of IPv4 or IPv6 */
+ /* Direct encapsulation of IPv4 or IPv6 */
skb_set_transport_header(skb, -(int)sizeof(struct icmphdr));
switch (((struct iphdr *)guehdr)->version) {
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 6ea523d71947..a175e3e7ae97 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -564,7 +564,7 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
rt = ip_route_output_flow(net, fl4, sk);
if (IS_ERR(rt))
goto no_route;
- if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
+ if (opt && opt->opt.is_strictroute && rt->rt_gw_family)
goto route_err;
rcu_read_unlock();
return &rt->dst;
@@ -602,7 +602,7 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
rt = ip_route_output_flow(net, fl4, sk);
if (IS_ERR(rt))
goto no_route;
- if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
+ if (opt && opt->opt.is_strictroute && rt->rt_gw_family)
goto route_err;
return &rt->dst;
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 00ec819f949b..06f6f280b9ff 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -123,7 +123,7 @@ int ip_forward(struct sk_buff *skb)
rt = skb_rtable(skb);
- if (opt->is_strictroute && rt->rt_uses_gateway)
+ if (opt->is_strictroute && rt->rt_gw_family)
goto sr_failed;
IPCB(skb)->flags |= IPSKB_FORWARDED;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index e8bb2e85c5a4..ac880beda8a7 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -188,7 +188,7 @@ static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *s
struct net_device *dev = dst->dev;
unsigned int hh_len = LL_RESERVED_SPACE(dev);
struct neighbour *neigh;
- u32 nexthop;
+ bool is_v6gw = false;
if (rt->rt_type == RTN_MULTICAST) {
IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTMCAST, skb->len);
@@ -218,16 +218,13 @@ static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *s
}
rcu_read_lock_bh();
- nexthop = (__force u32) rt_nexthop(rt, ip_hdr(skb)->daddr);
- neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
- if (unlikely(!neigh))
- neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
+ neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
if (!IS_ERR(neigh)) {
int res;
sock_confirm_neigh(skb, neigh);
- res = neigh_output(neigh, skb);
-
+ /* if crossing protocols, can not use the cached header */
+ res = neigh_output(neigh, skb, is_v6gw);
rcu_read_unlock_bh();
return res;
}
@@ -472,7 +469,7 @@ int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
skb_dst_set_noref(skb, &rt->dst);
packet_routed:
- if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_uses_gateway)
+ if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_gw_family)
goto no_route;
/* OK, we know where to send it, allocate and build IP header. */
@@ -694,11 +691,8 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
return 0;
}
- while (frag) {
- skb = frag->next;
- kfree_skb(frag);
- frag = skb;
- }
+ kfree_skb_list(frag);
+
IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
return err;
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index c3f3d28d1087..30c1c264bdfc 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -239,8 +239,8 @@ static int ip_tun_build_state(struct nlattr *attr,
struct nlattr *tb[LWTUNNEL_IP_MAX + 1];
int err;
- err = nla_parse_nested(tb, LWTUNNEL_IP_MAX, attr, ip_tun_policy,
- extack);
+ err = nla_parse_nested_deprecated(tb, LWTUNNEL_IP_MAX, attr,
+ ip_tun_policy, extack);
if (err < 0)
return err;
@@ -356,8 +356,8 @@ static int ip6_tun_build_state(struct nlattr *attr,
struct nlattr *tb[LWTUNNEL_IP6_MAX + 1];
int err;
- err = nla_parse_nested(tb, LWTUNNEL_IP6_MAX, attr, ip6_tun_policy,
- extack);
+ err = nla_parse_nested_deprecated(tb, LWTUNNEL_IP6_MAX, attr,
+ ip6_tun_policy, extack);
if (err < 0)
return err;
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 35d8346742e2..254a42e83ff9 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -50,7 +50,7 @@ static unsigned int vti_net_id __read_mostly;
static int vti_tunnel_init(struct net_device *dev);
static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi,
- int encap_type)
+ int encap_type, bool update_skb_dev)
{
struct ip_tunnel *tunnel;
const struct iphdr *iph = ip_hdr(skb);
@@ -65,6 +65,9 @@ static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi,
XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel;
+ if (update_skb_dev)
+ skb->dev = tunnel->dev;
+
return xfrm_input(skb, nexthdr, spi, encap_type);
}
@@ -74,47 +77,28 @@ drop:
return 0;
}
-static int vti_input_ipip(struct sk_buff *skb, int nexthdr, __be32 spi,
- int encap_type)
+static int vti_input_proto(struct sk_buff *skb, int nexthdr, __be32 spi,
+ int encap_type)
{
- struct ip_tunnel *tunnel;
- const struct iphdr *iph = ip_hdr(skb);
- struct net *net = dev_net(skb->dev);
- struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
-
- tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
- iph->saddr, iph->daddr, 0);
- if (tunnel) {
- if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
- goto drop;
-
- XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel;
-
- skb->dev = tunnel->dev;
-
- return xfrm_input(skb, nexthdr, spi, encap_type);
- }
-
- return -EINVAL;
-drop:
- kfree_skb(skb);
- return 0;
+ return vti_input(skb, nexthdr, spi, encap_type, false);
}
-static int vti_rcv(struct sk_buff *skb)
+static int vti_rcv(struct sk_buff *skb, __be32 spi, bool update_skb_dev)
{
XFRM_SPI_SKB_CB(skb)->family = AF_INET;
XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
- return vti_input(skb, ip_hdr(skb)->protocol, 0, 0);
+ return vti_input(skb, ip_hdr(skb)->protocol, spi, 0, update_skb_dev);
}
-static int vti_rcv_ipip(struct sk_buff *skb)
+static int vti_rcv_proto(struct sk_buff *skb)
{
- XFRM_SPI_SKB_CB(skb)->family = AF_INET;
- XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
+ return vti_rcv(skb, 0, false);
+}
- return vti_input_ipip(skb, ip_hdr(skb)->protocol, ip_hdr(skb)->saddr, 0);
+static int vti_rcv_tunnel(struct sk_buff *skb)
+{
+ return vti_rcv(skb, ip_hdr(skb)->saddr, true);
}
static int vti_rcv_cb(struct sk_buff *skb, int err)
@@ -123,7 +107,7 @@ static int vti_rcv_cb(struct sk_buff *skb, int err)
struct net_device *dev;
struct pcpu_sw_netstats *tstats;
struct xfrm_state *x;
- struct xfrm_mode *inner_mode;
+ const struct xfrm_mode *inner_mode;
struct ip_tunnel *tunnel = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4;
u32 orig_mark = skb->mark;
int ret;
@@ -142,7 +126,7 @@ static int vti_rcv_cb(struct sk_buff *skb, int err)
x = xfrm_input_state(skb);
- inner_mode = x->inner_mode;
+ inner_mode = &x->inner_mode;
if (x->sel.family == AF_UNSPEC) {
inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
@@ -153,7 +137,7 @@ static int vti_rcv_cb(struct sk_buff *skb, int err)
}
}
- family = inner_mode->afinfo->family;
+ family = inner_mode->family;
skb->mark = be32_to_cpu(tunnel->parms.i_key);
ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family);
@@ -447,31 +431,31 @@ static void __net_init vti_fb_tunnel_init(struct net_device *dev)
}
static struct xfrm4_protocol vti_esp4_protocol __read_mostly = {
- .handler = vti_rcv,
- .input_handler = vti_input,
+ .handler = vti_rcv_proto,
+ .input_handler = vti_input_proto,
.cb_handler = vti_rcv_cb,
.err_handler = vti4_err,
.priority = 100,
};
static struct xfrm4_protocol vti_ah4_protocol __read_mostly = {
- .handler = vti_rcv,
- .input_handler = vti_input,
+ .handler = vti_rcv_proto,
+ .input_handler = vti_input_proto,
.cb_handler = vti_rcv_cb,
.err_handler = vti4_err,
.priority = 100,
};
static struct xfrm4_protocol vti_ipcomp4_protocol __read_mostly = {
- .handler = vti_rcv,
- .input_handler = vti_input,
+ .handler = vti_rcv_proto,
+ .input_handler = vti_input_proto,
.cb_handler = vti_rcv_cb,
.err_handler = vti4_err,
.priority = 100,
};
static struct xfrm_tunnel ipip_handler __read_mostly = {
- .handler = vti_rcv_ipip,
+ .handler = vti_rcv_tunnel,
.err_handler = vti4_err,
.priority = 0,
};
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 2c931120c494..2c61e10a60e3 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -66,7 +66,7 @@
#include <net/netlink.h>
#include <net/fib_rules.h>
#include <linux/netconf.h>
-#include <net/nexthop.h>
+#include <net/rtnh.h>
#include <linux/nospec.h>
@@ -373,7 +373,6 @@ static const struct rhashtable_params ipmr_rht_params = {
.key_offset = offsetof(struct mfc_cache, cmparg),
.key_len = sizeof(struct mfc_cache_cmp_arg),
.nelem_hint = 3,
- .locks_mul = 1,
.obj_cmpfn = ipmr_hash_cmp,
.automatic_shrinking = true,
};
@@ -2499,8 +2498,8 @@ static int ipmr_rtm_valid_getroute_req(struct sk_buff *skb,
}
if (!netlink_strict_get_check(skb))
- return nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX,
- rtm_ipv4_policy, extack);
+ return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
+ rtm_ipv4_policy, extack);
rtm = nlmsg_data(nlh);
if ((rtm->rtm_src_len && rtm->rtm_src_len != 32) ||
@@ -2511,8 +2510,8 @@ static int ipmr_rtm_valid_getroute_req(struct sk_buff *skb,
return -EINVAL;
}
- err = nlmsg_parse_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
- rtm_ipv4_policy, extack);
+ err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
+ rtm_ipv4_policy, extack);
if (err)
return err;
@@ -2675,8 +2674,8 @@ static int rtm_to_ipmr_mfcc(struct net *net, struct nlmsghdr *nlh,
struct rtmsg *rtm;
int ret, rem;
- ret = nlmsg_validate(nlh, sizeof(*rtm), RTA_MAX, rtm_ipmr_policy,
- extack);
+ ret = nlmsg_validate_deprecated(nlh, sizeof(*rtm), RTA_MAX,
+ rtm_ipmr_policy, extack);
if (ret < 0)
goto out;
rtm = nlmsg_data(nlh);
@@ -2784,7 +2783,7 @@ static bool ipmr_fill_vif(struct mr_table *mrt, u32 vifid, struct sk_buff *skb)
return true;
vif = &mrt->vif_table[vifid];
- vif_nest = nla_nest_start(skb, IPMRA_VIF);
+ vif_nest = nla_nest_start_noflag(skb, IPMRA_VIF);
if (!vif_nest)
return false;
if (nla_put_u32(skb, IPMRA_VIFA_IFINDEX, vif->dev->ifindex) ||
@@ -2868,7 +2867,7 @@ static int ipmr_rtm_dumplink(struct sk_buff *skb, struct netlink_callback *cb)
memset(hdr, 0, sizeof(*hdr));
hdr->ifi_family = RTNL_FAMILY_IPMR;
- af = nla_nest_start(skb, IFLA_AF_SPEC);
+ af = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
if (!af) {
nlmsg_cancel(skb, nlh);
goto out;
@@ -2879,7 +2878,7 @@ static int ipmr_rtm_dumplink(struct sk_buff *skb, struct netlink_callback *cb)
goto out;
}
- vifs = nla_nest_start(skb, IPMRA_TABLE_VIFS);
+ vifs = nla_nest_start_noflag(skb, IPMRA_TABLE_VIFS);
if (!vifs) {
nla_nest_end(skb, af);
nlmsg_end(skb, nlh);
diff --git a/net/ipv4/ipmr_base.c b/net/ipv4/ipmr_base.c
index 3e614cc824f7..278834d4babc 100644
--- a/net/ipv4/ipmr_base.c
+++ b/net/ipv4/ipmr_base.c
@@ -228,7 +228,7 @@ int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
if (c->mfc_flags & MFC_OFFLOAD)
rtm->rtm_flags |= RTNH_F_OFFLOAD;
- mp_attr = nla_nest_start(skb, RTA_MULTIPATH);
+ mp_attr = nla_nest_start_noflag(skb, RTA_MULTIPATH);
if (!mp_attr)
return -EMSGSIZE;
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index c98391d49200..1412b029f37f 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -27,14 +27,6 @@ config NF_TABLES_IPV4
if NF_TABLES_IPV4
-config NFT_CHAIN_ROUTE_IPV4
- tristate "IPv4 nf_tables route chain support"
- help
- This option enables the "route" chain for IPv4 in nf_tables. This
- chain type is used to force packet re-routing after mangling header
- fields such as the source, destination, type of service and
- the packet mark.
-
config NFT_REJECT_IPV4
select NF_REJECT_IPV4
default NFT_REJECT
@@ -232,16 +224,10 @@ if IP_NF_NAT
config IP_NF_TARGET_MASQUERADE
tristate "MASQUERADE target support"
- select NF_NAT_MASQUERADE
- default m if NETFILTER_ADVANCED=n
+ select NETFILTER_XT_TARGET_MASQUERADE
help
- Masquerading is a special case of NAT: all outgoing connections are
- changed to seem to come from a particular interface's address, and
- if the interface goes down, those connections are lost. This is
- only useful for dialup accounts with dynamic IP address (ie. your IP
- address will be different on next dialup).
-
- To compile it as a module, choose M here. If unsure, say N.
+ This is a backwards-compat option for the user's convenience
+ (e.g. when running oldconfig). It selects NETFILTER_XT_TARGET_MASQUERADE.
config IP_NF_TARGET_NETMAP
tristate "NETMAP target support"
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index e241f5188ebe..c50e0ec095d2 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -24,7 +24,6 @@ nf_nat_snmp_basic-y := nf_nat_snmp_basic.asn1.o nf_nat_snmp_basic_main.o
$(obj)/nf_nat_snmp_basic_main.o: $(obj)/nf_nat_snmp_basic.asn1.h
obj-$(CONFIG_NF_NAT_SNMP_BASIC) += nf_nat_snmp_basic.o
-obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV4) += nft_chain_route_ipv4.o
obj-$(CONFIG_NFT_REJECT_IPV4) += nft_reject_ipv4.o
obj-$(CONFIG_NFT_FIB_IPV4) += nft_fib_ipv4.o
obj-$(CONFIG_NFT_DUP_IPV4) += nft_dup_ipv4.o
@@ -49,7 +48,6 @@ obj-$(CONFIG_IP_NF_MATCH_RPFILTER) += ipt_rpfilter.o
# targets
obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o
-obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o
obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
obj-$(CONFIG_IP_NF_TARGET_SYNPROXY) += ipt_SYNPROXY.o
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
deleted file mode 100644
index fd3f9e8a74da..000000000000
--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
+++ /dev/null
@@ -1,101 +0,0 @@
-/* Masquerade. Simple mapping which alters range to a local IP address
- (depending on route). */
-
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/types.h>
-#include <linux/inetdevice.h>
-#include <linux/ip.h>
-#include <linux/timer.h>
-#include <linux/module.h>
-#include <linux/netfilter.h>
-#include <net/protocol.h>
-#include <net/ip.h>
-#include <net/checksum.h>
-#include <net/route.h>
-#include <linux/netfilter_ipv4.h>
-#include <linux/netfilter/x_tables.h>
-#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/ipv4/nf_nat_masquerade.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
-MODULE_DESCRIPTION("Xtables: automatic-address SNAT");
-
-/* FIXME: Multiple targets. --RR */
-static int masquerade_tg_check(const struct xt_tgchk_param *par)
-{
- const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
-
- if (mr->range[0].flags & NF_NAT_RANGE_MAP_IPS) {
- pr_debug("bad MAP_IPS.\n");
- return -EINVAL;
- }
- if (mr->rangesize != 1) {
- pr_debug("bad rangesize %u\n", mr->rangesize);
- return -EINVAL;
- }
- return nf_ct_netns_get(par->net, par->family);
-}
-
-static unsigned int
-masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par)
-{
- struct nf_nat_range2 range;
- const struct nf_nat_ipv4_multi_range_compat *mr;
-
- mr = par->targinfo;
- range.flags = mr->range[0].flags;
- range.min_proto = mr->range[0].min;
- range.max_proto = mr->range[0].max;
-
- return nf_nat_masquerade_ipv4(skb, xt_hooknum(par), &range,
- xt_out(par));
-}
-
-static void masquerade_tg_destroy(const struct xt_tgdtor_param *par)
-{
- nf_ct_netns_put(par->net, par->family);
-}
-
-static struct xt_target masquerade_tg_reg __read_mostly = {
- .name = "MASQUERADE",
- .family = NFPROTO_IPV4,
- .target = masquerade_tg,
- .targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat),
- .table = "nat",
- .hooks = 1 << NF_INET_POST_ROUTING,
- .checkentry = masquerade_tg_check,
- .destroy = masquerade_tg_destroy,
- .me = THIS_MODULE,
-};
-
-static int __init masquerade_tg_init(void)
-{
- int ret;
-
- ret = xt_register_target(&masquerade_tg_reg);
- if (ret)
- return ret;
-
- ret = nf_nat_masquerade_ipv4_register_notifier();
- if (ret)
- xt_unregister_target(&masquerade_tg_reg);
-
- return ret;
-}
-
-static void __exit masquerade_tg_exit(void)
-{
- xt_unregister_target(&masquerade_tg_reg);
- nf_nat_masquerade_ipv4_unregister_notifier();
-}
-
-module_init(masquerade_tg_init);
-module_exit(masquerade_tg_exit);
diff --git a/net/ipv4/netfilter/nft_chain_route_ipv4.c b/net/ipv4/netfilter/nft_chain_route_ipv4.c
deleted file mode 100644
index 7d82934c46f4..000000000000
--- a/net/ipv4/netfilter/nft_chain_route_ipv4.c
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
- * Copyright (c) 2012 Pablo Neira Ayuso <pablo@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/skbuff.h>
-#include <linux/netlink.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv4.h>
-#include <linux/netfilter/nfnetlink.h>
-#include <linux/netfilter/nf_tables.h>
-#include <net/netfilter/nf_tables.h>
-#include <net/netfilter/nf_tables_ipv4.h>
-#include <net/route.h>
-#include <net/ip.h>
-
-static unsigned int nf_route_table_hook(void *priv,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- unsigned int ret;
- struct nft_pktinfo pkt;
- u32 mark;
- __be32 saddr, daddr;
- u_int8_t tos;
- const struct iphdr *iph;
- int err;
-
- nft_set_pktinfo(&pkt, skb, state);
- nft_set_pktinfo_ipv4(&pkt, skb);
-
- mark = skb->mark;
- iph = ip_hdr(skb);
- saddr = iph->saddr;
- daddr = iph->daddr;
- tos = iph->tos;
-
- ret = nft_do_chain(&pkt, priv);
- if (ret != NF_DROP && ret != NF_STOLEN) {
- iph = ip_hdr(skb);
-
- if (iph->saddr != saddr ||
- iph->daddr != daddr ||
- skb->mark != mark ||
- iph->tos != tos) {
- err = ip_route_me_harder(state->net, skb, RTN_UNSPEC);
- if (err < 0)
- ret = NF_DROP_ERR(err);
- }
- }
- return ret;
-}
-
-static const struct nft_chain_type nft_chain_route_ipv4 = {
- .name = "route",
- .type = NFT_CHAIN_T_ROUTE,
- .family = NFPROTO_IPV4,
- .owner = THIS_MODULE,
- .hook_mask = (1 << NF_INET_LOCAL_OUT),
- .hooks = {
- [NF_INET_LOCAL_OUT] = nf_route_table_hook,
- },
-};
-
-static int __init nft_chain_route_init(void)
-{
- nft_register_chain_type(&nft_chain_route_ipv4);
-
- return 0;
-}
-
-static void __exit nft_chain_route_exit(void)
-{
- nft_unregister_chain_type(&nft_chain_route_ipv4);
-}
-
-module_init(nft_chain_route_init);
-module_exit(nft_chain_route_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
-MODULE_ALIAS_NFT_CHAIN(AF_INET, "route");
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 6fdf1c195d8e..795aed6e4720 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -434,37 +434,46 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
struct sk_buff *skb,
const void *daddr)
{
+ const struct rtable *rt = container_of(dst, struct rtable, dst);
struct net_device *dev = dst->dev;
- const __be32 *pkey = daddr;
- const struct rtable *rt;
struct neighbour *n;
- rt = (const struct rtable *) dst;
- if (rt->rt_gateway)
- pkey = (const __be32 *) &rt->rt_gateway;
- else if (skb)
- pkey = &ip_hdr(skb)->daddr;
+ rcu_read_lock_bh();
+
+ if (likely(rt->rt_gw_family == AF_INET)) {
+ n = ip_neigh_gw4(dev, rt->rt_gw4);
+ } else if (rt->rt_gw_family == AF_INET6) {
+ n = ip_neigh_gw6(dev, &rt->rt_gw6);
+ } else {
+ __be32 pkey;
+
+ pkey = skb ? ip_hdr(skb)->daddr : *((__be32 *) daddr);
+ n = ip_neigh_gw4(dev, pkey);
+ }
+
+ if (n && !refcount_inc_not_zero(&n->refcnt))
+ n = NULL;
- n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey);
- if (n)
- return n;
- return neigh_create(&arp_tbl, pkey, dev);
+ rcu_read_unlock_bh();
+
+ return n;
}
static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
{
+ const struct rtable *rt = container_of(dst, struct rtable, dst);
struct net_device *dev = dst->dev;
const __be32 *pkey = daddr;
- const struct rtable *rt;
- rt = (const struct rtable *)dst;
- if (rt->rt_gateway)
- pkey = (const __be32 *)&rt->rt_gateway;
- else if (!daddr ||
+ if (rt->rt_gw_family == AF_INET) {
+ pkey = (const __be32 *)&rt->rt_gw4;
+ } else if (rt->rt_gw_family == AF_INET6) {
+ return __ipv6_confirm_neigh_stub(dev, &rt->rt_gw6);
+ } else if (!daddr ||
(rt->rt_flags &
- (RTCF_MULTICAST | RTCF_BROADCAST | RTCF_LOCAL)))
+ (RTCF_MULTICAST | RTCF_BROADCAST | RTCF_LOCAL))) {
return;
-
+ }
__ipv4_confirm_neigh(dev, *(__force u32 *)pkey);
}
@@ -500,15 +509,17 @@ EXPORT_SYMBOL(ip_idents_reserve);
void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
{
- static u32 ip_idents_hashrnd __read_mostly;
u32 hash, id;
- net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
+ /* Note the following code is not safe, but this is okay. */
+ if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key)))
+ get_random_bytes(&net->ipv4.ip_id_key,
+ sizeof(net->ipv4.ip_id_key));
- hash = jhash_3words((__force u32)iph->daddr,
+ hash = siphash_3u32((__force u32)iph->daddr,
(__force u32)iph->saddr,
- iph->protocol ^ net_hash_mix(net),
- ip_idents_hashrnd);
+ iph->protocol,
+ &net->ipv4.ip_id_key);
id = ip_idents_reserve(hash, segs);
iph->id = htons(id);
}
@@ -627,8 +638,8 @@ static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnh
if (fnhe->fnhe_gw) {
rt->rt_flags |= RTCF_REDIRECTED;
- rt->rt_gateway = fnhe->fnhe_gw;
- rt->rt_uses_gateway = 1;
+ rt->rt_gw_family = AF_INET;
+ rt->rt_gw4 = fnhe->fnhe_gw;
}
}
@@ -642,7 +653,7 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
unsigned int i;
int depth;
- genid = fnhe_genid(dev_net(nh->nh_dev));
+ genid = fnhe_genid(dev_net(nh->fib_nh_dev));
hval = fnhe_hashfun(daddr);
spin_lock_bh(&fnhe_lock);
@@ -745,7 +756,7 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
return;
}
- if (rt->rt_gateway != old_gw)
+ if (rt->rt_gw_family != AF_INET || rt->rt_gw4 != old_gw)
return;
in_dev = __in_dev_get_rcu(dev);
@@ -776,8 +787,10 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
neigh_event_send(n, NULL);
} else {
if (fib_lookup(net, fl4, &res, 0) == 0) {
- struct fib_nh *nh = &FIB_RES_NH(res);
+ struct fib_nh_common *nhc = FIB_RES_NHC(res);
+ struct fib_nh *nh;
+ nh = container_of(nhc, struct fib_nh, nh_common);
update_or_create_fnhe(nh, fl4->daddr, new_gw,
0, false,
jiffies + ip_rt_gc_timeout);
@@ -1025,8 +1038,10 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
rcu_read_lock();
if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) {
- struct fib_nh *nh = &FIB_RES_NH(res);
+ struct fib_nh_common *nhc = FIB_RES_NHC(res);
+ struct fib_nh *nh;
+ nh = container_of(nhc, struct fib_nh, nh_common);
update_or_create_fnhe(nh, fl4->daddr, 0, mtu, lock,
jiffies + ip_rt_mtu_expires);
}
@@ -1176,7 +1191,7 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
*
* When a PMTU/redirect information update invalidates a route,
* this is indicated by setting obsolete to DST_OBSOLETE_KILL or
- * DST_OBSOLETE_DEAD by dst_free().
+ * DST_OBSOLETE_DEAD.
*/
if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt))
return NULL;
@@ -1261,7 +1276,7 @@ void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
rcu_read_lock();
if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0)
- src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
+ src = fib_result_prefsrc(dev_net(rt->dst.dev), &res);
else
src = inet_select_addr(rt->dst.dev,
rt_nexthop(rt, iph->daddr),
@@ -1304,7 +1319,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
mtu = READ_ONCE(dst->dev->mtu);
if (unlikely(ip_mtu_locked(dst))) {
- if (rt->rt_uses_gateway && mtu > 576)
+ if (rt->rt_gw_family && mtu > 576)
mtu = 576;
}
@@ -1380,9 +1395,9 @@ static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr)
{
+ struct fib_nh_common *nhc = res->nhc;
+ struct net_device *dev = nhc->nhc_dev;
struct fib_info *fi = res->fi;
- struct fib_nh *nh = &fi->fib_nh[res->nh_sel];
- struct net_device *dev = nh->nh_dev;
u32 mtu = 0;
if (dev_net(dev)->ipv4.sysctl_ip_fwd_use_pmtu ||
@@ -1390,6 +1405,7 @@ u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr)
mtu = fi->fib_mtu;
if (likely(!mtu)) {
+ struct fib_nh *nh = container_of(nhc, struct fib_nh, nh_common);
struct fib_nh_exception *fnhe;
fnhe = find_exception(nh, daddr);
@@ -1400,7 +1416,7 @@ u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr)
if (likely(!mtu))
mtu = min(READ_ONCE(dev->mtu), IP_MAX_MTU);
- return mtu - lwtunnel_headroom(nh->nh_lwtstate, mtu);
+ return mtu - lwtunnel_headroom(nhc->nhc_lwtstate, mtu);
}
static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
@@ -1431,8 +1447,10 @@ static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
orig = NULL;
}
fill_route_from_fnhe(rt, fnhe);
- if (!rt->rt_gateway)
- rt->rt_gateway = daddr;
+ if (!rt->rt_gw4) {
+ rt->rt_gw4 = daddr;
+ rt->rt_gw_family = AF_INET;
+ }
if (do_cache) {
dst_hold(&rt->dst);
@@ -1555,18 +1573,25 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
bool cached = false;
if (fi) {
- struct fib_nh *nh = &FIB_RES_NH(*res);
-
- if (nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK) {
- rt->rt_gateway = nh->nh_gw;
- rt->rt_uses_gateway = 1;
+ struct fib_nh_common *nhc = FIB_RES_NHC(*res);
+ struct fib_nh *nh;
+
+ if (nhc->nhc_gw_family && nhc->nhc_scope == RT_SCOPE_LINK) {
+ rt->rt_gw_family = nhc->nhc_gw_family;
+ /* only INET and INET6 are supported */
+ if (likely(nhc->nhc_gw_family == AF_INET))
+ rt->rt_gw4 = nhc->nhc_gw.ipv4;
+ else
+ rt->rt_gw6 = nhc->nhc_gw.ipv6;
}
+
ip_dst_init_metrics(&rt->dst, fi->fib_metrics);
+ nh = container_of(nhc, struct fib_nh, nh_common);
#ifdef CONFIG_IP_ROUTE_CLASSID
rt->dst.tclassid = nh->nh_tclassid;
#endif
- rt->dst.lwtstate = lwtstate_get(nh->nh_lwtstate);
+ rt->dst.lwtstate = lwtstate_get(nh->fib_nh_lws);
if (unlikely(fnhe))
cached = rt_bind_exception(rt, fnhe, daddr, do_cache);
else if (do_cache)
@@ -1577,8 +1602,10 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
* However, if we are unsuccessful at storing this
* route into the cache we really need to set it.
*/
- if (!rt->rt_gateway)
- rt->rt_gateway = daddr;
+ if (!rt->rt_gw4) {
+ rt->rt_gw_family = AF_INET;
+ rt->rt_gw4 = daddr;
+ }
rt_add_uncached_list(rt);
}
} else
@@ -1611,8 +1638,8 @@ struct rtable *rt_dst_alloc(struct net_device *dev,
rt->rt_iif = 0;
rt->rt_pmtu = 0;
rt->rt_mtu_locked = 0;
- rt->rt_gateway = 0;
- rt->rt_uses_gateway = 0;
+ rt->rt_gw_family = 0;
+ rt->rt_gw4 = 0;
INIT_LIST_HEAD(&rt->rt_uncached);
rt->dst.output = ip_output;
@@ -1725,15 +1752,18 @@ static int __mkroute_input(struct sk_buff *skb,
struct in_device *in_dev,
__be32 daddr, __be32 saddr, u32 tos)
{
+ struct fib_nh_common *nhc = FIB_RES_NHC(*res);
+ struct net_device *dev = nhc->nhc_dev;
struct fib_nh_exception *fnhe;
struct rtable *rth;
+ struct fib_nh *nh;
int err;
struct in_device *out_dev;
bool do_cache;
u32 itag = 0;
/* get a working reference to the output device */
- out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
+ out_dev = __in_dev_get_rcu(dev);
if (!out_dev) {
net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
return -EINVAL;
@@ -1750,10 +1780,14 @@ static int __mkroute_input(struct sk_buff *skb,
do_cache = res->fi && !itag;
if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
- skb->protocol == htons(ETH_P_IP) &&
- (IN_DEV_SHARED_MEDIA(out_dev) ||
- inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
- IPCB(skb)->flags |= IPSKB_DOREDIRECT;
+ skb->protocol == htons(ETH_P_IP)) {
+ __be32 gw;
+
+ gw = nhc->nhc_gw_family == AF_INET ? nhc->nhc_gw.ipv4 : 0;
+ if (IN_DEV_SHARED_MEDIA(out_dev) ||
+ inet_addr_onlink(out_dev, saddr, gw))
+ IPCB(skb)->flags |= IPSKB_DOREDIRECT;
+ }
if (skb->protocol != htons(ETH_P_IP)) {
/* Not IP (i.e. ARP). Do not create route, if it is
@@ -1770,12 +1804,13 @@ static int __mkroute_input(struct sk_buff *skb,
}
}
- fnhe = find_exception(&FIB_RES_NH(*res), daddr);
+ nh = container_of(nhc, struct fib_nh, nh_common);
+ fnhe = find_exception(nh, daddr);
if (do_cache) {
if (fnhe)
rth = rcu_dereference(fnhe->fnhe_rth_input);
else
- rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
+ rth = rcu_dereference(nh->nh_rth_input);
if (rt_cache_valid(rth)) {
skb_dst_set_noref(skb, &rth->dst);
goto out;
@@ -2069,7 +2104,11 @@ local_input:
do_cache = false;
if (res->fi) {
if (!itag) {
- rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
+ struct fib_nh_common *nhc = FIB_RES_NHC(*res);
+ struct fib_nh *nh;
+
+ nh = container_of(nhc, struct fib_nh, nh_common);
+ rth = rcu_dereference(nh->nh_rth_input);
if (rt_cache_valid(rth)) {
skb_dst_set_noref(skb, &rth->dst);
err = 0;
@@ -2099,15 +2138,17 @@ local_input:
}
if (do_cache) {
- struct fib_nh *nh = &FIB_RES_NH(*res);
+ struct fib_nh_common *nhc = FIB_RES_NHC(*res);
+ struct fib_nh *nh;
- rth->dst.lwtstate = lwtstate_get(nh->nh_lwtstate);
+ rth->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
WARN_ON(rth->dst.input == lwtunnel_input);
rth->dst.lwtstate->orig_input = rth->dst.input;
rth->dst.input = lwtunnel_input;
}
+ nh = container_of(nhc, struct fib_nh, nh_common);
if (unlikely(!rt_cache_route(nh, rth)))
rt_add_uncached_list(rth);
}
@@ -2279,8 +2320,9 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
fnhe = NULL;
do_cache &= fi != NULL;
if (fi) {
+ struct fib_nh_common *nhc = FIB_RES_NHC(*res);
+ struct fib_nh *nh = container_of(nhc, struct fib_nh, nh_common);
struct rtable __rcu **prth;
- struct fib_nh *nh = &FIB_RES_NH(*res);
fnhe = find_exception(nh, fl4->daddr);
if (!do_cache)
@@ -2290,8 +2332,8 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
} else {
if (unlikely(fl4->flowi4_flags &
FLOWI_FLAG_KNOWN_NH &&
- !(nh->nh_gw &&
- nh->nh_scope == RT_SCOPE_LINK))) {
+ !(nhc->nhc_gw_family &&
+ nhc->nhc_scope == RT_SCOPE_LINK))) {
do_cache = false;
goto add;
}
@@ -2600,8 +2642,11 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
rt->rt_genid = rt_genid_ipv4(net);
rt->rt_flags = ort->rt_flags;
rt->rt_type = ort->rt_type;
- rt->rt_gateway = ort->rt_gateway;
- rt->rt_uses_gateway = ort->rt_uses_gateway;
+ rt->rt_gw_family = ort->rt_gw_family;
+ if (rt->rt_gw_family == AF_INET)
+ rt->rt_gw4 = ort->rt_gw4;
+ else if (rt->rt_gw_family == AF_INET6)
+ rt->rt_gw6 = ort->rt_gw6;
INIT_LIST_HEAD(&rt->rt_uncached);
}
@@ -2680,9 +2725,22 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr))
goto nla_put_failure;
}
- if (rt->rt_uses_gateway &&
- nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gateway))
+ if (rt->rt_gw_family == AF_INET &&
+ nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gw4)) {
goto nla_put_failure;
+ } else if (rt->rt_gw_family == AF_INET6) {
+ int alen = sizeof(struct in6_addr);
+ struct nlattr *nla;
+ struct rtvia *via;
+
+ nla = nla_reserve(skb, RTA_VIA, alen + 2);
+ if (!nla)
+ goto nla_put_failure;
+
+ via = nla_data(nla);
+ via->rtvia_family = AF_INET6;
+ memcpy(via->rtvia_addr, &rt->rt_gw6, alen);
+ }
expires = rt->dst.expires;
if (expires) {
@@ -2819,8 +2877,8 @@ static int inet_rtm_valid_getroute_req(struct sk_buff *skb,
}
if (!netlink_strict_get_check(skb))
- return nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX,
- rtm_ipv4_policy, extack);
+ return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
+ rtm_ipv4_policy, extack);
rtm = nlmsg_data(nlh);
if ((rtm->rtm_src_len && rtm->rtm_src_len != 32) ||
@@ -2838,8 +2896,8 @@ static int inet_rtm_valid_getroute_req(struct sk_buff *skb,
return -EINVAL;
}
- err = nlmsg_parse_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
- rtm_ipv4_policy, extack);
+ err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
+ rtm_ipv4_policy, extack);
if (err)
return err;
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index e531344611a0..008545f63667 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -216,16 +216,15 @@ struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
refcount_set(&req->rsk_refcnt, 1);
tcp_sk(child)->tsoffset = tsoff;
sock_rps_save_rxhash(child, skb);
- if (!inet_csk_reqsk_queue_add(sk, req, child)) {
- bh_unlock_sock(child);
- sock_put(child);
- child = NULL;
- reqsk_put(req);
- }
- } else {
- reqsk_free(req);
+ if (inet_csk_reqsk_queue_add(sk, req, child))
+ return child;
+
+ bh_unlock_sock(child);
+ sock_put(child);
}
- return child;
+ __reqsk_free(req);
+
+ return NULL;
}
EXPORT_SYMBOL(tcp_get_cookie_sock);
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index eeb4041fa5f9..875867b64d6a 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -550,6 +550,15 @@ static struct ctl_table ipv4_table[] = {
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
},
+ {
+ .procname = "fib_sync_mem",
+ .data = &sysctl_fib_sync_mem,
+ .maxlen = sizeof(sysctl_fib_sync_mem),
+ .mode = 0644,
+ .proc_handler = proc_douintvec_minmax,
+ .extra1 = &sysctl_fib_sync_mem_min,
+ .extra2 = &sysctl_fib_sync_mem_max,
+ },
{ }
};
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 6baa6dc1b13b..1fa15beb8380 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -457,18 +457,6 @@ void tcp_init_sock(struct sock *sk)
}
EXPORT_SYMBOL(tcp_init_sock);
-void tcp_init_transfer(struct sock *sk, int bpf_op)
-{
- struct inet_connection_sock *icsk = inet_csk(sk);
-
- tcp_mtup_init(sk);
- icsk->icsk_af_ops->rebuild_header(sk);
- tcp_init_metrics(sk);
- tcp_call_bpf(sk, bpf_op, 0, NULL);
- tcp_init_congestion_control(sk);
- tcp_init_buffer_space(sk);
-}
-
static void tcp_tx_timestamp(struct sock *sk, u16 tsflags)
{
struct sk_buff *skb = tcp_write_queue_tail(sk);
@@ -865,6 +853,18 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
{
struct sk_buff *skb;
+ if (likely(!size)) {
+ skb = sk->sk_tx_skb_cache;
+ if (skb && !skb_cloned(skb)) {
+ skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
+ sk->sk_tx_skb_cache = NULL;
+ pskb_trim(skb, 0);
+ INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
+ skb_shinfo(skb)->tx_flags = 0;
+ memset(TCP_SKB_CB(skb), 0, sizeof(struct tcp_skb_cb));
+ return skb;
+ }
+ }
/* The TCP header must be at least 32-bit aligned. */
size = ALIGN(size, 4);
@@ -1098,30 +1098,6 @@ int tcp_sendpage(struct sock *sk, struct page *page, int offset,
}
EXPORT_SYMBOL(tcp_sendpage);
-/* Do not bother using a page frag for very small frames.
- * But use this heuristic only for the first skb in write queue.
- *
- * Having no payload in skb->head allows better SACK shifting
- * in tcp_shift_skb_data(), reducing sack/rack overhead, because
- * write queue has less skbs.
- * Each skb can hold up to MAX_SKB_FRAGS * 32Kbytes, or ~0.5 MB.
- * This also speeds up tso_fragment(), since it wont fallback
- * to tcp_fragment().
- */
-static int linear_payload_sz(bool first_skb)
-{
- if (first_skb)
- return SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER);
- return 0;
-}
-
-static int select_size(bool first_skb, bool zc)
-{
- if (zc)
- return 0;
- return linear_payload_sz(first_skb);
-}
-
void tcp_free_fastopen_req(struct tcp_sock *tp)
{
if (tp->fastopen_req) {
@@ -1272,7 +1248,6 @@ restart:
if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) {
bool first_skb;
- int linear;
new_segment:
if (!sk_stream_memory_free(sk))
@@ -1283,8 +1258,7 @@ new_segment:
goto restart;
}
first_skb = tcp_rtx_and_write_queues_empty(sk);
- linear = select_size(first_skb, zc);
- skb = sk_stream_alloc_skb(sk, linear, sk->sk_allocation,
+ skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation,
first_skb);
if (!skb)
goto wait_for_memory;
@@ -2552,6 +2526,11 @@ void tcp_write_queue_purge(struct sock *sk)
sk_wmem_free_skb(sk, skb);
}
tcp_rtx_queue_purge(sk);
+ skb = sk->sk_tx_skb_cache;
+ if (skb) {
+ __kfree_skb(skb);
+ sk->sk_tx_skb_cache = NULL;
+ }
INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue);
sk_mem_reclaim(sk);
tcp_clear_all_retrans_hints(tcp_sk(sk));
@@ -2587,6 +2566,10 @@ int tcp_disconnect(struct sock *sk, int flags)
tcp_clear_xmit_timers(sk);
__skb_queue_purge(&sk->sk_receive_queue);
+ if (sk->sk_rx_skb_cache) {
+ __kfree_skb(sk->sk_rx_skb_cache);
+ sk->sk_rx_skb_cache = NULL;
+ }
tp->copied_seq = tp->rcv_nxt;
tp->urg_data = 0;
tcp_write_queue_purge(sk);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 731d3045b50a..077d9abdfcf5 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2252,7 +2252,7 @@ static bool tcp_skb_spurious_retrans(const struct tcp_sock *tp,
*/
static inline bool tcp_packet_delayed(const struct tcp_sock *tp)
{
- return !tp->retrans_stamp ||
+ return tp->retrans_stamp &&
tcp_tsopt_ecr_before(tp, tp->retrans_stamp);
}
@@ -3521,7 +3521,7 @@ static void tcp_xmit_recovery(struct sock *sk, int rexmit)
{
struct tcp_sock *tp = tcp_sk(sk);
- if (rexmit == REXMIT_NONE)
+ if (rexmit == REXMIT_NONE || sk->sk_state == TCP_SYN_SENT)
return;
if (unlikely(rexmit == 2)) {
@@ -5647,6 +5647,32 @@ discard:
}
EXPORT_SYMBOL(tcp_rcv_established);
+void tcp_init_transfer(struct sock *sk, int bpf_op)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ tcp_mtup_init(sk);
+ icsk->icsk_af_ops->rebuild_header(sk);
+ tcp_init_metrics(sk);
+
+ /* Initialize the congestion window to start the transfer.
+ * Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
+ * retransmitted. In light of RFC6298 more aggressive 1sec
+ * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
+ * retransmission has occurred.
+ */
+ if (tp->total_retrans > 1 && tp->undo_marker)
+ tp->snd_cwnd = 1;
+ else
+ tp->snd_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
+ tp->snd_cwnd_stamp = tcp_jiffies32;
+
+ tcp_call_bpf(sk, bpf_op, 0, NULL);
+ tcp_init_congestion_control(sk);
+ tcp_init_buffer_space(sk);
+}
+
void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -5748,6 +5774,21 @@ static void smc_check_reset_syn(struct tcp_sock *tp)
#endif
}
+static void tcp_try_undo_spurious_syn(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ u32 syn_stamp;
+
+ /* undo_marker is set when SYN or SYNACK times out. The timeout is
+ * spurious if the ACK's timestamp option echo value matches the
+ * original SYN timestamp.
+ */
+ syn_stamp = tp->retrans_stamp;
+ if (tp->undo_marker && syn_stamp && tp->rx_opt.saw_tstamp &&
+ syn_stamp == tp->rx_opt.rcv_tsecr)
+ tp->undo_marker = 0;
+}
+
static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
const struct tcphdr *th)
{
@@ -5815,6 +5856,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
tcp_ecn_rcv_synack(tp, th);
tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
+ tcp_try_undo_spurious_syn(sk);
tcp_ack(sk, skb, FLAG_SLOWPATH);
/* Ok.. it's good. Set up sequence numbers and
@@ -5973,6 +6015,27 @@ reset_and_undo:
return 1;
}
+static void tcp_rcv_synrecv_state_fastopen(struct sock *sk)
+{
+ tcp_try_undo_loss(sk, false);
+ inet_csk(sk)->icsk_retransmits = 0;
+
+ /* Once we leave TCP_SYN_RECV or TCP_FIN_WAIT_1,
+ * we no longer need req so release it.
+ */
+ reqsk_fastopen_remove(sk, tcp_sk(sk)->fastopen_rsk, false);
+
+ /* Re-arm the timer because data may have been sent out.
+ * This is similar to the regular data transmission case
+ * when new data has just been ack'ed.
+ *
+ * (TFO) - we could try to be more aggressive and
+ * retransmitting any data sooner based on when they
+ * are sent out.
+ */
+ tcp_rearm_rto(sk);
+}
+
/*
* This function implements the receiving procedure of RFC 793 for
* all states except ESTABLISHED and TIME_WAIT.
@@ -6069,22 +6132,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
if (!tp->srtt_us)
tcp_synack_rtt_meas(sk, req);
- /* Once we leave TCP_SYN_RECV, we no longer need req
- * so release it.
- */
if (req) {
- inet_csk(sk)->icsk_retransmits = 0;
- reqsk_fastopen_remove(sk, req, false);
- /* Re-arm the timer because data may have been sent out.
- * This is similar to the regular data transmission case
- * when new data has just been ack'ed.
- *
- * (TFO) - we could try to be more aggressive and
- * retransmitting any data sooner based on when they
- * are sent out.
- */
- tcp_rearm_rto(sk);
+ tcp_rcv_synrecv_state_fastopen(sk);
} else {
+ tcp_try_undo_spurious_syn(sk);
+ tp->retrans_stamp = 0;
tcp_init_transfer(sk, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB);
tp->copied_seq = tp->rcv_nxt;
}
@@ -6119,16 +6171,9 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
case TCP_FIN_WAIT1: {
int tmo;
- /* If we enter the TCP_FIN_WAIT1 state and we are a
- * Fast Open socket and this is the first acceptable
- * ACK we have received, this would have acknowledged
- * our SYNACK so stop the SYNACK timer.
- */
- if (req) {
- /* We no longer need the request sock. */
- reqsk_fastopen_remove(sk, req, false);
- tcp_rearm_rto(sk);
- }
+ if (req)
+ tcp_rcv_synrecv_state_fastopen(sk);
+
if (tp->snd_una != tp->write_seq)
break;
@@ -6263,6 +6308,11 @@ static inline void pr_drop_req(struct request_sock *req, __u16 port, int family)
* congestion control: Linux DCTCP asserts ECT on all packets,
* including SYN, which is most optimal solution; however,
* others, such as FreeBSD do not.
+ *
+ * Exception: At least one of the reserved bits of the TCP header (th->res1) is
+ * set, indicating the use of a future TCP extension (such as AccECN). See
+ * RFC8311 ยง4.3 which updates RFC3168 to allow the development of such
+ * extensions.
*/
static void tcp_ecn_create_request(struct request_sock *req,
const struct sk_buff *skb,
@@ -6282,7 +6332,7 @@ static void tcp_ecn_create_request(struct request_sock *req,
ecn_ok_dst = dst_feature(dst, DST_FEATURE_ECN_MASK);
ecn_ok = net->ipv4.sysctl_tcp_ecn || ecn_ok_dst;
- if ((!ect && ecn_ok) || tcp_ca_needs_ecn(listen_sk) ||
+ if (((!ect || th->res1) && ecn_ok) || tcp_ca_needs_ecn(listen_sk) ||
(ecn_ok_dst & DST_FEATURE_ECN_CA) ||
tcp_bpf_ca_needs_ecn((struct sock *)req))
inet_rsk(req)->ecn_ok = 1;
@@ -6298,7 +6348,7 @@ static void tcp_openreq_init(struct request_sock *req,
req->cookie_ts = 0;
tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
- tcp_rsk(req)->snt_synack = tcp_clock_us();
+ tcp_rsk(req)->snt_synack = 0;
tcp_rsk(req)->last_oow_ack_time = 0;
req->mss = rx_opt->mss_clamp;
req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
@@ -6502,8 +6552,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
reqsk_fastopen_remove(fastopen_sk, req, false);
bh_unlock_sock(fastopen_sk);
sock_put(fastopen_sk);
- reqsk_put(req);
- goto drop;
+ goto drop_and_free;
}
sk->sk_data_ready(sk);
bh_unlock_sock(fastopen_sk);
@@ -6527,7 +6576,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
drop_and_release:
dst_release(dst);
drop_and_free:
- reqsk_free(req);
+ __reqsk_free(req);
drop:
tcp_listendrop(sk);
return 0;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index a2896944aa37..af81e4a6a8d8 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1785,6 +1785,7 @@ static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
int tcp_v4_rcv(struct sk_buff *skb)
{
struct net *net = dev_net(skb->dev);
+ struct sk_buff *skb_to_free;
int sdif = inet_sdif(skb);
const struct iphdr *iph;
const struct tcphdr *th;
@@ -1916,11 +1917,17 @@ process:
tcp_segs_in(tcp_sk(sk), skb);
ret = 0;
if (!sock_owned_by_user(sk)) {
+ skb_to_free = sk->sk_rx_skb_cache;
+ sk->sk_rx_skb_cache = NULL;
ret = tcp_v4_do_rcv(sk, skb);
- } else if (tcp_add_backlog(sk, skb)) {
- goto discard_and_relse;
+ } else {
+ if (tcp_add_backlog(sk, skb))
+ goto discard_and_relse;
+ skb_to_free = NULL;
}
bh_unlock_sock(sk);
+ if (skb_to_free)
+ __kfree_skb(skb_to_free);
put_and_return:
if (refcounted)
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index b467a7cabf40..c4848e7a0aad 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -512,16 +512,6 @@ reset:
inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
}
- /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
- * retransmitted. In light of RFC6298 more aggressive 1sec
- * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
- * retransmission has occurred.
- */
- if (tp->total_retrans > 1)
- tp->snd_cwnd = 1;
- else
- tp->snd_cwnd = tcp_init_cwnd(tp, dst);
- tp->snd_cwnd_stamp = tcp_jiffies32;
}
bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst)
@@ -658,7 +648,7 @@ static int tcp_metrics_fill_info(struct sk_buff *msg,
{
int n = 0;
- nest = nla_nest_start(msg, TCP_METRICS_ATTR_VALS);
+ nest = nla_nest_start_noflag(msg, TCP_METRICS_ATTR_VALS);
if (!nest)
goto nla_put_failure;
for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) {
@@ -951,14 +941,14 @@ static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
static const struct genl_ops tcp_metrics_nl_ops[] = {
{
.cmd = TCP_METRICS_CMD_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = tcp_metrics_nl_cmd_get,
.dumpit = tcp_metrics_nl_dump,
- .policy = tcp_metrics_nl_policy,
},
{
.cmd = TCP_METRICS_CMD_DEL,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = tcp_metrics_nl_cmd_del,
- .policy = tcp_metrics_nl_policy,
.flags = GENL_ADMIN_PERM,
},
};
@@ -968,6 +958,7 @@ static struct genl_family tcp_metrics_nl_family __ro_after_init = {
.name = TCP_METRICS_GENL_NAME,
.version = TCP_METRICS_GENL_VERSION,
.maxattr = TCP_METRICS_ATTR_MAX,
+ .policy = tcp_metrics_nl_policy,
.netnsok = true,
.module = THIS_MODULE,
.ops = tcp_metrics_nl_ops,
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 79900f783e0d..9c2a0d36fb20 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -522,6 +522,11 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
newtp->rx_opt.ts_recent_stamp = 0;
newtp->tcp_header_len = sizeof(struct tcphdr);
}
+ if (req->num_timeout) {
+ newtp->undo_marker = treq->snt_isn;
+ newtp->retrans_stamp = div_u64(treq->snt_synack,
+ USEC_PER_SEC / TCP_TS_HZ);
+ }
newtp->tsoffset = treq->ts_off;
#ifdef CONFIG_TCP_MD5SIG
newtp->md5sig_info = NULL; /*XXX*/
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 4522579aaca2..0c4ed66dc1bf 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -52,12 +52,8 @@ void tcp_mstamp_refresh(struct tcp_sock *tp)
{
u64 val = tcp_clock_ns();
- if (val > tp->tcp_clock_cache)
- tp->tcp_clock_cache = val;
-
- val = div_u64(val, NSEC_PER_USEC);
- if (val > tp->tcp_mstamp)
- tp->tcp_mstamp = val;
+ tp->tcp_clock_cache = val;
+ tp->tcp_mstamp = div_u64(val, NSEC_PER_USEC);
}
static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
@@ -3092,7 +3088,6 @@ void tcp_send_fin(struct sock *sk)
tskb = skb_rb_last(&sk->tcp_rtx_queue);
if (tskb) {
-coalesce:
TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
TCP_SKB_CB(tskb)->end_seq++;
tp->write_seq++;
@@ -3108,11 +3103,9 @@ coalesce:
}
} else {
skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
- if (unlikely(!skb)) {
- if (tskb)
- goto coalesce;
+ if (unlikely(!skb))
return;
- }
+
INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
skb_reserve(skb, MAX_TCP_HEADER);
sk_forced_mem_schedule(sk, skb->truesize);
@@ -3254,7 +3247,11 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
skb->skb_mstamp_ns = cookie_init_timestamp(req);
else
#endif
+ {
skb->skb_mstamp_ns = tcp_clock_ns();
+ if (!tcp_rsk(req)->snt_synack) /* Timestamp first SYNACK */
+ tcp_rsk(req)->snt_synack = tcp_skb_timestamp_us(skb);
+ }
#ifdef CONFIG_TCP_MD5SIG
rcu_read_lock();
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index f0c86398e6a7..2ac23da42dd2 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -393,6 +393,9 @@ static void tcp_fastopen_synack_timer(struct sock *sk)
tcp_write_err(sk);
return;
}
+ /* Lower cwnd after certain SYNACK timeout like tcp_init_transfer() */
+ if (icsk->icsk_retransmits == 1)
+ tcp_enter_loss(sk);
/* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error
* returned from rtx_syn_ack() to make it more persistent like
* regular retransmit because if the child socket has been accepted
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 372fdc5381a9..3c58ba02af7d 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1631,7 +1631,7 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
EXPORT_SYMBOL(udp_ioctl);
struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
- int noblock, int *peeked, int *off, int *err)
+ int noblock, int *off, int *err)
{
struct sk_buff_head *sk_queue = &sk->sk_receive_queue;
struct sk_buff_head *queue;
@@ -1650,13 +1650,11 @@ struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
break;
error = -EAGAIN;
- *peeked = 0;
do {
spin_lock_bh(&queue->lock);
skb = __skb_try_recv_from_queue(sk, queue, flags,
udp_skb_destructor,
- peeked, off, err,
- &last);
+ off, err, &last);
if (skb) {
spin_unlock_bh(&queue->lock);
return skb;
@@ -1677,8 +1675,7 @@ struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
skb = __skb_try_recv_from_queue(sk, queue, flags,
udp_skb_dtor_locked,
- peeked, off, err,
- &last);
+ off, err, &last);
spin_unlock(&sk_queue->lock);
spin_unlock_bh(&queue->lock);
if (skb)
@@ -1713,8 +1710,7 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
struct sk_buff *skb;
unsigned int ulen, copied;
- int peeked, peeking, off;
- int err;
+ int off, err, peeking = flags & MSG_PEEK;
int is_udplite = IS_UDPLITE(sk);
bool checksum_valid = false;
@@ -1722,9 +1718,8 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
return ip_recv_error(sk, msg, len, addr_len);
try_again:
- peeking = flags & MSG_PEEK;
off = sk_peek_offset(sk, flags);
- skb = __skb_recv_udp(sk, flags, noblock, &peeked, &off, &err);
+ skb = __skb_recv_udp(sk, flags, noblock, &off, &err);
if (!skb)
return err;
@@ -1762,7 +1757,7 @@ try_again:
}
if (unlikely(err)) {
- if (!peeked) {
+ if (!peeking) {
atomic_inc(&sk->sk_drops);
UDP_INC_STATS(sock_net(sk),
UDP_MIB_INERRORS, is_udplite);
@@ -1771,7 +1766,7 @@ try_again:
return err;
}
- if (!peeked)
+ if (!peeking)
UDP_INC_STATS(sock_net(sk),
UDP_MIB_INDATAGRAMS, is_udplite);
diff --git a/net/ipv4/xfrm4_mode_beet.c b/net/ipv4/xfrm4_mode_beet.c
deleted file mode 100644
index 856d2dfdb44b..000000000000
--- a/net/ipv4/xfrm4_mode_beet.c
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * xfrm4_mode_beet.c - BEET mode encapsulation for IPv4.
- *
- * Copyright (c) 2006 Diego Beltrami <diego.beltrami@gmail.com>
- * Miika Komu <miika@iki.fi>
- * Herbert Xu <herbert@gondor.apana.org.au>
- * Abhinav Pathak <abhinav.pathak@hiit.fi>
- * Jeff Ahrenholz <ahrenholz@gmail.com>
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/stringify.h>
-#include <net/dst.h>
-#include <net/ip.h>
-#include <net/xfrm.h>
-
-static void xfrm4_beet_make_header(struct sk_buff *skb)
-{
- struct iphdr *iph = ip_hdr(skb);
-
- iph->ihl = 5;
- iph->version = 4;
-
- iph->protocol = XFRM_MODE_SKB_CB(skb)->protocol;
- iph->tos = XFRM_MODE_SKB_CB(skb)->tos;
-
- iph->id = XFRM_MODE_SKB_CB(skb)->id;
- iph->frag_off = XFRM_MODE_SKB_CB(skb)->frag_off;
- iph->ttl = XFRM_MODE_SKB_CB(skb)->ttl;
-}
-
-/* Add encapsulation header.
- *
- * The top IP header will be constructed per draft-nikander-esp-beet-mode-06.txt.
- */
-static int xfrm4_beet_output(struct xfrm_state *x, struct sk_buff *skb)
-{
- struct ip_beet_phdr *ph;
- struct iphdr *top_iph;
- int hdrlen, optlen;
-
- hdrlen = 0;
- optlen = XFRM_MODE_SKB_CB(skb)->optlen;
- if (unlikely(optlen))
- hdrlen += IPV4_BEET_PHMAXLEN - (optlen & 4);
-
- skb_set_network_header(skb, -x->props.header_len -
- hdrlen + (XFRM_MODE_SKB_CB(skb)->ihl - sizeof(*top_iph)));
- if (x->sel.family != AF_INET6)
- skb->network_header += IPV4_BEET_PHMAXLEN;
- skb->mac_header = skb->network_header +
- offsetof(struct iphdr, protocol);
- skb->transport_header = skb->network_header + sizeof(*top_iph);
-
- xfrm4_beet_make_header(skb);
-
- ph = __skb_pull(skb, XFRM_MODE_SKB_CB(skb)->ihl - hdrlen);
-
- top_iph = ip_hdr(skb);
-
- if (unlikely(optlen)) {
- BUG_ON(optlen < 0);
-
- ph->padlen = 4 - (optlen & 4);
- ph->hdrlen = optlen / 8;
- ph->nexthdr = top_iph->protocol;
- if (ph->padlen)
- memset(ph + 1, IPOPT_NOP, ph->padlen);
-
- top_iph->protocol = IPPROTO_BEETPH;
- top_iph->ihl = sizeof(struct iphdr) / 4;
- }
-
- top_iph->saddr = x->props.saddr.a4;
- top_iph->daddr = x->id.daddr.a4;
-
- return 0;
-}
-
-static int xfrm4_beet_input(struct xfrm_state *x, struct sk_buff *skb)
-{
- struct iphdr *iph;
- int optlen = 0;
- int err = -EINVAL;
-
- if (unlikely(XFRM_MODE_SKB_CB(skb)->protocol == IPPROTO_BEETPH)) {
- struct ip_beet_phdr *ph;
- int phlen;
-
- if (!pskb_may_pull(skb, sizeof(*ph)))
- goto out;
-
- ph = (struct ip_beet_phdr *)skb->data;
-
- phlen = sizeof(*ph) + ph->padlen;
- optlen = ph->hdrlen * 8 + (IPV4_BEET_PHMAXLEN - phlen);
- if (optlen < 0 || optlen & 3 || optlen > 250)
- goto out;
-
- XFRM_MODE_SKB_CB(skb)->protocol = ph->nexthdr;
-
- if (!pskb_may_pull(skb, phlen))
- goto out;
- __skb_pull(skb, phlen);
- }
-
- skb_push(skb, sizeof(*iph));
- skb_reset_network_header(skb);
- skb_mac_header_rebuild(skb);
-
- xfrm4_beet_make_header(skb);
-
- iph = ip_hdr(skb);
-
- iph->ihl += optlen / 4;
- iph->tot_len = htons(skb->len);
- iph->daddr = x->sel.daddr.a4;
- iph->saddr = x->sel.saddr.a4;
- iph->check = 0;
- iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl);
- err = 0;
-out:
- return err;
-}
-
-static struct xfrm_mode xfrm4_beet_mode = {
- .input2 = xfrm4_beet_input,
- .input = xfrm_prepare_input,
- .output2 = xfrm4_beet_output,
- .output = xfrm4_prepare_output,
- .owner = THIS_MODULE,
- .encap = XFRM_MODE_BEET,
- .flags = XFRM_MODE_FLAG_TUNNEL,
-};
-
-static int __init xfrm4_beet_init(void)
-{
- return xfrm_register_mode(&xfrm4_beet_mode, AF_INET);
-}
-
-static void __exit xfrm4_beet_exit(void)
-{
- int err;
-
- err = xfrm_unregister_mode(&xfrm4_beet_mode, AF_INET);
- BUG_ON(err);
-}
-
-module_init(xfrm4_beet_init);
-module_exit(xfrm4_beet_exit);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_XFRM_MODE(AF_INET, XFRM_MODE_BEET);
diff --git a/net/ipv4/xfrm4_mode_transport.c b/net/ipv4/xfrm4_mode_transport.c
deleted file mode 100644
index 1ad2c2c4e250..000000000000
--- a/net/ipv4/xfrm4_mode_transport.c
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * xfrm4_mode_transport.c - Transport mode encapsulation for IPv4.
- *
- * Copyright (c) 2004-2006 Herbert Xu <herbert@gondor.apana.org.au>
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/stringify.h>
-#include <net/dst.h>
-#include <net/ip.h>
-#include <net/xfrm.h>
-#include <net/protocol.h>
-
-/* Add encapsulation header.
- *
- * The IP header will be moved forward to make space for the encapsulation
- * header.
- */
-static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb)
-{
- struct iphdr *iph = ip_hdr(skb);
- int ihl = iph->ihl * 4;
-
- skb_set_inner_transport_header(skb, skb_transport_offset(skb));
-
- skb_set_network_header(skb, -x->props.header_len);
- skb->mac_header = skb->network_header +
- offsetof(struct iphdr, protocol);
- skb->transport_header = skb->network_header + ihl;
- __skb_pull(skb, ihl);
- memmove(skb_network_header(skb), iph, ihl);
- return 0;
-}
-
-/* Remove encapsulation header.
- *
- * The IP header will be moved over the top of the encapsulation header.
- *
- * On entry, skb->h shall point to where the IP header should be and skb->nh
- * shall be set to where the IP header currently is. skb->data shall point
- * to the start of the payload.
- */
-static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
-{
- int ihl = skb->data - skb_transport_header(skb);
-
- if (skb->transport_header != skb->network_header) {
- memmove(skb_transport_header(skb),
- skb_network_header(skb), ihl);
- skb->network_header = skb->transport_header;
- }
- ip_hdr(skb)->tot_len = htons(skb->len + ihl);
- skb_reset_transport_header(skb);
- return 0;
-}
-
-static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x,
- struct sk_buff *skb,
- netdev_features_t features)
-{
- const struct net_offload *ops;
- struct sk_buff *segs = ERR_PTR(-EINVAL);
- struct xfrm_offload *xo = xfrm_offload(skb);
-
- skb->transport_header += x->props.header_len;
- ops = rcu_dereference(inet_offloads[xo->proto]);
- if (likely(ops && ops->callbacks.gso_segment))
- segs = ops->callbacks.gso_segment(skb, features);
-
- return segs;
-}
-
-static void xfrm4_transport_xmit(struct xfrm_state *x, struct sk_buff *skb)
-{
- struct xfrm_offload *xo = xfrm_offload(skb);
-
- skb_reset_mac_len(skb);
- pskb_pull(skb, skb->mac_len + sizeof(struct iphdr) + x->props.header_len);
-
- if (xo->flags & XFRM_GSO_SEGMENT) {
- skb_reset_transport_header(skb);
- skb->transport_header -= x->props.header_len;
- }
-}
-
-static struct xfrm_mode xfrm4_transport_mode = {
- .input = xfrm4_transport_input,
- .output = xfrm4_transport_output,
- .gso_segment = xfrm4_transport_gso_segment,
- .xmit = xfrm4_transport_xmit,
- .owner = THIS_MODULE,
- .encap = XFRM_MODE_TRANSPORT,
-};
-
-static int __init xfrm4_transport_init(void)
-{
- return xfrm_register_mode(&xfrm4_transport_mode, AF_INET);
-}
-
-static void __exit xfrm4_transport_exit(void)
-{
- int err;
-
- err = xfrm_unregister_mode(&xfrm4_transport_mode, AF_INET);
- BUG_ON(err);
-}
-
-module_init(xfrm4_transport_init);
-module_exit(xfrm4_transport_exit);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_XFRM_MODE(AF_INET, XFRM_MODE_TRANSPORT);
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
deleted file mode 100644
index 2a9764bd1719..000000000000
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * xfrm4_mode_tunnel.c - Tunnel mode encapsulation for IPv4.
- *
- * Copyright (c) 2004-2006 Herbert Xu <herbert@gondor.apana.org.au>
- */
-
-#include <linux/gfp.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/stringify.h>
-#include <net/dst.h>
-#include <net/inet_ecn.h>
-#include <net/ip.h>
-#include <net/xfrm.h>
-
-static inline void ipip_ecn_decapsulate(struct sk_buff *skb)
-{
- struct iphdr *inner_iph = ipip_hdr(skb);
-
- if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos))
- IP_ECN_set_ce(inner_iph);
-}
-
-/* Add encapsulation header.
- *
- * The top IP header will be constructed per RFC 2401.
- */
-static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
-{
- struct dst_entry *dst = skb_dst(skb);
- struct iphdr *top_iph;
- int flags;
-
- skb_set_inner_network_header(skb, skb_network_offset(skb));
- skb_set_inner_transport_header(skb, skb_transport_offset(skb));
-
- skb_set_network_header(skb, -x->props.header_len);
- skb->mac_header = skb->network_header +
- offsetof(struct iphdr, protocol);
- skb->transport_header = skb->network_header + sizeof(*top_iph);
- top_iph = ip_hdr(skb);
-
- top_iph->ihl = 5;
- top_iph->version = 4;
-
- top_iph->protocol = xfrm_af2proto(skb_dst(skb)->ops->family);
-
- /* DS disclosing depends on XFRM_SA_XFLAG_DONT_ENCAP_DSCP */
- if (x->props.extra_flags & XFRM_SA_XFLAG_DONT_ENCAP_DSCP)
- top_iph->tos = 0;
- else
- top_iph->tos = XFRM_MODE_SKB_CB(skb)->tos;
- top_iph->tos = INET_ECN_encapsulate(top_iph->tos,
- XFRM_MODE_SKB_CB(skb)->tos);
-
- flags = x->props.flags;
- if (flags & XFRM_STATE_NOECN)
- IP_ECN_clear(top_iph);
-
- top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ?
- 0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF));
-
- top_iph->ttl = ip4_dst_hoplimit(xfrm_dst_child(dst));
-
- top_iph->saddr = x->props.saddr.a4;
- top_iph->daddr = x->id.daddr.a4;
- ip_select_ident(dev_net(dst->dev), skb, NULL);
-
- return 0;
-}
-
-static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
-{
- int err = -EINVAL;
-
- if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPIP)
- goto out;
-
- if (!pskb_may_pull(skb, sizeof(struct iphdr)))
- goto out;
-
- err = skb_unclone(skb, GFP_ATOMIC);
- if (err)
- goto out;
-
- if (x->props.flags & XFRM_STATE_DECAP_DSCP)
- ipv4_copy_dscp(XFRM_MODE_SKB_CB(skb)->tos, ipip_hdr(skb));
- if (!(x->props.flags & XFRM_STATE_NOECN))
- ipip_ecn_decapsulate(skb);
-
- skb_reset_network_header(skb);
- skb_mac_header_rebuild(skb);
- if (skb->mac_len)
- eth_hdr(skb)->h_proto = skb->protocol;
-
- err = 0;
-
-out:
- return err;
-}
-
-static struct sk_buff *xfrm4_mode_tunnel_gso_segment(struct xfrm_state *x,
- struct sk_buff *skb,
- netdev_features_t features)
-{
- __skb_push(skb, skb->mac_len);
- return skb_mac_gso_segment(skb, features);
-}
-
-static void xfrm4_mode_tunnel_xmit(struct xfrm_state *x, struct sk_buff *skb)
-{
- struct xfrm_offload *xo = xfrm_offload(skb);
-
- if (xo->flags & XFRM_GSO_SEGMENT)
- skb->transport_header = skb->network_header +
- sizeof(struct iphdr);
-
- skb_reset_mac_len(skb);
- pskb_pull(skb, skb->mac_len + x->props.header_len);
-}
-
-static struct xfrm_mode xfrm4_tunnel_mode = {
- .input2 = xfrm4_mode_tunnel_input,
- .input = xfrm_prepare_input,
- .output2 = xfrm4_mode_tunnel_output,
- .output = xfrm4_prepare_output,
- .gso_segment = xfrm4_mode_tunnel_gso_segment,
- .xmit = xfrm4_mode_tunnel_xmit,
- .owner = THIS_MODULE,
- .encap = XFRM_MODE_TUNNEL,
- .flags = XFRM_MODE_FLAG_TUNNEL,
-};
-
-static int __init xfrm4_mode_tunnel_init(void)
-{
- return xfrm_register_mode(&xfrm4_tunnel_mode, AF_INET);
-}
-
-static void __exit xfrm4_mode_tunnel_exit(void)
-{
- int err;
-
- err = xfrm_unregister_mode(&xfrm4_tunnel_mode, AF_INET);
- BUG_ON(err);
-}
-
-module_init(xfrm4_mode_tunnel_init);
-module_exit(xfrm4_mode_tunnel_exit);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_XFRM_MODE(AF_INET, XFRM_MODE_TUNNEL);
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index be980c195fc5..9bb8905088c7 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -58,21 +58,6 @@ int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb)
return xfrm4_extract_header(skb);
}
-int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
-{
- int err;
-
- err = xfrm_inner_extract_output(x, skb);
- if (err)
- return err;
-
- IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
- skb->protocol = htons(ETH_P_IP);
-
- return x->outer_mode->output2(x, skb);
-}
-EXPORT_SYMBOL(xfrm4_prepare_output);
-
int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb)
{
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
@@ -87,6 +72,8 @@ int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb)
static int __xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
struct xfrm_state *x = skb_dst(skb)->xfrm;
+ const struct xfrm_state_afinfo *afinfo;
+ int ret = -EAFNOSUPPORT;
#ifdef CONFIG_NETFILTER
if (!x) {
@@ -95,7 +82,15 @@ static int __xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb)
}
#endif
- return x->outer_mode->afinfo->output_finish(sk, skb);
+ rcu_read_lock();
+ afinfo = xfrm_state_afinfo_get_rcu(x->outer_mode.family);
+ if (likely(afinfo))
+ ret = afinfo->output_finish(sk, skb);
+ else
+ kfree_skb(skb);
+ rcu_read_unlock();
+
+ return ret;
}
int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb)
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 2b144b92ae46..cdef8f9a3b01 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -12,7 +12,6 @@
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/inetdevice.h>
-#include <linux/if_tunnel.h>
#include <net/dst.h>
#include <net/xfrm.h>
#include <net/ip.h>
@@ -69,17 +68,6 @@ static int xfrm4_get_saddr(struct net *net, int oif,
return 0;
}
-static int xfrm4_get_tos(const struct flowi *fl)
-{
- return IPTOS_RT_MASK & fl->u.ip4.flowi4_tos; /* Strip ECN bits */
-}
-
-static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst,
- int nfheader_len)
-{
- return 0;
-}
-
static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
const struct flowi *fl)
{
@@ -97,8 +85,11 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
xdst->u.rt.rt_flags = rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST |
RTCF_LOCAL);
xdst->u.rt.rt_type = rt->rt_type;
- xdst->u.rt.rt_gateway = rt->rt_gateway;
- xdst->u.rt.rt_uses_gateway = rt->rt_uses_gateway;
+ xdst->u.rt.rt_gw_family = rt->rt_gw_family;
+ if (rt->rt_gw_family == AF_INET)
+ xdst->u.rt.rt_gw4 = rt->rt_gw4;
+ else if (rt->rt_gw_family == AF_INET6)
+ xdst->u.rt.rt_gw6 = rt->rt_gw6;
xdst->u.rt.rt_pmtu = rt->rt_pmtu;
xdst->u.rt.rt_mtu_locked = rt->rt_mtu_locked;
INIT_LIST_HEAD(&xdst->u.rt.rt_uncached);
@@ -107,120 +98,6 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
return 0;
}
-static void
-_decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
-{
- const struct iphdr *iph = ip_hdr(skb);
- int ihl = iph->ihl;
- u8 *xprth = skb_network_header(skb) + ihl * 4;
- struct flowi4 *fl4 = &fl->u.ip4;
- int oif = 0;
-
- if (skb_dst(skb))
- oif = skb_dst(skb)->dev->ifindex;
-
- memset(fl4, 0, sizeof(struct flowi4));
- fl4->flowi4_mark = skb->mark;
- fl4->flowi4_oif = reverse ? skb->skb_iif : oif;
-
- fl4->flowi4_proto = iph->protocol;
- fl4->daddr = reverse ? iph->saddr : iph->daddr;
- fl4->saddr = reverse ? iph->daddr : iph->saddr;
- fl4->flowi4_tos = iph->tos;
-
- if (!ip_is_fragment(iph)) {
- switch (iph->protocol) {
- case IPPROTO_UDP:
- case IPPROTO_UDPLITE:
- case IPPROTO_TCP:
- case IPPROTO_SCTP:
- case IPPROTO_DCCP:
- if (xprth + 4 < skb->data ||
- pskb_may_pull(skb, xprth + 4 - skb->data)) {
- __be16 *ports;
-
- xprth = skb_network_header(skb) + ihl * 4;
- ports = (__be16 *)xprth;
-
- fl4->fl4_sport = ports[!!reverse];
- fl4->fl4_dport = ports[!reverse];
- }
- break;
-
- case IPPROTO_ICMP:
- if (xprth + 2 < skb->data ||
- pskb_may_pull(skb, xprth + 2 - skb->data)) {
- u8 *icmp;
-
- xprth = skb_network_header(skb) + ihl * 4;
- icmp = xprth;
-
- fl4->fl4_icmp_type = icmp[0];
- fl4->fl4_icmp_code = icmp[1];
- }
- break;
-
- case IPPROTO_ESP:
- if (xprth + 4 < skb->data ||
- pskb_may_pull(skb, xprth + 4 - skb->data)) {
- __be32 *ehdr;
-
- xprth = skb_network_header(skb) + ihl * 4;
- ehdr = (__be32 *)xprth;
-
- fl4->fl4_ipsec_spi = ehdr[0];
- }
- break;
-
- case IPPROTO_AH:
- if (xprth + 8 < skb->data ||
- pskb_may_pull(skb, xprth + 8 - skb->data)) {
- __be32 *ah_hdr;
-
- xprth = skb_network_header(skb) + ihl * 4;
- ah_hdr = (__be32 *)xprth;
-
- fl4->fl4_ipsec_spi = ah_hdr[1];
- }
- break;
-
- case IPPROTO_COMP:
- if (xprth + 4 < skb->data ||
- pskb_may_pull(skb, xprth + 4 - skb->data)) {
- __be16 *ipcomp_hdr;
-
- xprth = skb_network_header(skb) + ihl * 4;
- ipcomp_hdr = (__be16 *)xprth;
-
- fl4->fl4_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
- }
- break;
-
- case IPPROTO_GRE:
- if (xprth + 12 < skb->data ||
- pskb_may_pull(skb, xprth + 12 - skb->data)) {
- __be16 *greflags;
- __be32 *gre_hdr;
-
- xprth = skb_network_header(skb) + ihl * 4;
- greflags = (__be16 *)xprth;
- gre_hdr = (__be32 *)xprth;
-
- if (greflags[0] & GRE_KEY) {
- if (greflags[0] & GRE_CSUM)
- gre_hdr++;
- fl4->fl4_gre_key = gre_hdr[1];
- }
- }
- break;
-
- default:
- fl4->fl4_ipsec_spi = 0;
- break;
- }
- }
-}
-
static void xfrm4_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu)
{
@@ -273,9 +150,6 @@ static const struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
.dst_ops = &xfrm4_dst_ops_template,
.dst_lookup = xfrm4_dst_lookup,
.get_saddr = xfrm4_get_saddr,
- .decode_session = _decode_session4,
- .get_tos = xfrm4_get_tos,
- .init_path = xfrm4_init_path,
.fill_dst = xfrm4_fill_dst,
.blackhole_route = ipv4_blackhole_route,
};
diff --git a/net/ipv4/xfrm4_protocol.c b/net/ipv4/xfrm4_protocol.c
index 35c54865dc42..bcab48944c15 100644
--- a/net/ipv4/xfrm4_protocol.c
+++ b/net/ipv4/xfrm4_protocol.c
@@ -46,7 +46,7 @@ static inline struct xfrm4_protocol __rcu **proto_handlers(u8 protocol)
handler != NULL; \
handler = rcu_dereference(handler->next)) \
-int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err)
+static int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err)
{
int ret;
struct xfrm4_protocol *handler;
@@ -61,7 +61,6 @@ int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err)
return 0;
}
-EXPORT_SYMBOL(xfrm4_rcv_cb);
int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
int encap_type)