aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/net
diff options
context:
space:
mode:
authorDavid Ahern <dsahern@gmail.com>2020-05-26 12:56:17 -0600
committerDavid S. Miller <davem@davemloft.net>2020-05-26 16:06:07 -0700
commitaf7888ad9edbd8ba7f6449d1c27ce281ad4b26fd (patch)
treedfbee9452e5f2673a01f531dc4332bfcbc68770f /net
parentnexthop: Expand nexthop_is_multipath in a few places (diff)
downloadwireguard-linux-af7888ad9edbd8ba7f6449d1c27ce281ad4b26fd.tar.xz
wireguard-linux-af7888ad9edbd8ba7f6449d1c27ce281ad4b26fd.zip
ipv4: Refactor nhc evaluation in fib_table_lookup
FIB lookups can return an entry that references an external nexthop. While walking the nexthop struct we do not want to make multiple calls into the nexthop code which can result in 2 different structs getting accessed - one returning the number of paths the rest of the loop seeing a different nh_grp struct. If the nexthop group shrunk, the result is an attempt to access a fib_nh_common that does not exist for the new nh_grp struct but did for the old one. To fix that move the device evaluation code to a helper that can be used for inline fib_nh path as well as external nexthops. Update the existing check for fi->nh in fib_table_lookup to call a new helper, nexthop_get_nhc_lookup, which walks the external nexthop with a single rcu dereference. Fixes: 430a049190de ("nexthop: Add support for nexthop groups") Signed-off-by: David Ahern <dsahern@gmail.com> Acked-by: Nikolay Aleksandrov <nikolay@cumulusnetworks.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/fib_trie.c51
1 files changed, 36 insertions, 15 deletions
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 4f334b425538..248f1c1959a6 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1371,6 +1371,26 @@ static inline t_key prefix_mismatch(t_key key, struct key_vector *n)
return (key ^ prefix) & (prefix | -prefix);
}
+bool fib_lookup_good_nhc(const struct fib_nh_common *nhc, int fib_flags,
+ const struct flowi4 *flp)
+{
+ if (nhc->nhc_flags & RTNH_F_DEAD)
+ return false;
+
+ if (ip_ignore_linkdown(nhc->nhc_dev) &&
+ nhc->nhc_flags & RTNH_F_LINKDOWN &&
+ !(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE))
+ return false;
+
+ if (!(flp->flowi4_flags & FLOWI_FLAG_SKIP_NH_OIF)) {
+ if (flp->flowi4_oif &&
+ flp->flowi4_oif != nhc->nhc_oif)
+ return false;
+ }
+
+ return true;
+}
+
/* should be called with rcu_read_lock */
int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
struct fib_result *res, int fib_flags)
@@ -1503,6 +1523,7 @@ found:
/* Step 3: Process the leaf, if that fails fall back to backtracing */
hlist_for_each_entry_rcu(fa, &n->leaf, fa_list) {
struct fib_info *fi = fa->fa_info;
+ struct fib_nh_common *nhc;
int nhsel, err;
if ((BITS_PER_LONG > KEYLENGTH) || (fa->fa_slen < KEYLENGTH)) {
@@ -1528,26 +1549,25 @@ out_reject:
if (fi->fib_flags & RTNH_F_DEAD)
continue;
- if (unlikely(fi->nh && nexthop_is_blackhole(fi->nh))) {
- err = fib_props[RTN_BLACKHOLE].error;
- goto out_reject;
+ if (unlikely(fi->nh)) {
+ if (nexthop_is_blackhole(fi->nh)) {
+ err = fib_props[RTN_BLACKHOLE].error;
+ goto out_reject;
+ }
+
+ nhc = nexthop_get_nhc_lookup(fi->nh, fib_flags, flp,
+ &nhsel);
+ if (nhc)
+ goto set_result;
+ goto miss;
}
for (nhsel = 0; nhsel < fib_info_num_path(fi); nhsel++) {
- struct fib_nh_common *nhc = fib_info_nhc(fi, nhsel);
+ nhc = fib_info_nhc(fi, nhsel);
- if (nhc->nhc_flags & RTNH_F_DEAD)
+ if (!fib_lookup_good_nhc(nhc, fib_flags, flp))
continue;
- if (ip_ignore_linkdown(nhc->nhc_dev) &&
- nhc->nhc_flags & RTNH_F_LINKDOWN &&
- !(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE))
- continue;
- if (!(flp->flowi4_flags & FLOWI_FLAG_SKIP_NH_OIF)) {
- if (flp->flowi4_oif &&
- flp->flowi4_oif != nhc->nhc_oif)
- continue;
- }
-
+set_result:
if (!(fib_flags & FIB_LOOKUP_NOREF))
refcount_inc(&fi->fib_clntref);
@@ -1568,6 +1588,7 @@ out_reject:
return err;
}
}
+miss:
#ifdef CONFIG_IP_FIB_TRIE_STATS
this_cpu_inc(stats->semantic_match_miss);
#endif