aboutsummaryrefslogtreecommitdiffstats
path: root/net/netfilter
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--net/netfilter/Makefile3
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_gen.h2
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ip.c6
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ipmac.c6
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_port.c6
-rw-r--r--net/netfilter/ipset/ip_set_core.c75
-rw-r--r--net/netfilter/ipset/ip_set_hash_gen.h635
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c196
-rw-r--r--net/netfilter/nf_conntrack_extend.c1
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c6
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c20
-rw-r--r--net/netfilter/nf_conntrack_standalone.c2
-rw-r--r--net/netfilter/nf_flow_table_core.c34
-rw-r--r--net/netfilter/nf_flow_table_ip.c21
-rw-r--r--net/netfilter/nf_flow_table_offload.c165
-rw-r--r--net/netfilter/nf_synproxy_core.c2
-rw-r--r--net/netfilter/nf_tables_api.c423
-rw-r--r--net/netfilter/nf_tables_offload.c2
-rw-r--r--net/netfilter/nf_tables_set_core.c2
-rw-r--r--net/netfilter/nfnetlink.c6
-rw-r--r--net/netfilter/nfnetlink_cthelper.c2
-rw-r--r--net/netfilter/nfnetlink_queue.c8
-rw-r--r--net/netfilter/nft_bitwise.c224
-rw-r--r--net/netfilter/nft_chain_nat.c1
-rw-r--r--net/netfilter/nft_dynset.c2
-rw-r--r--net/netfilter/nft_meta.c440
-rw-r--r--net/netfilter/nft_osf.c3
-rw-r--r--net/netfilter/nft_payload.c1
-rw-r--r--net/netfilter/nft_set_bitmap.c4
-rw-r--r--net/netfilter/nft_set_hash.c2
-rw-r--r--net/netfilter/nft_set_pipapo.c2104
-rw-r--r--net/netfilter/nft_set_rbtree.c3
-rw-r--r--net/netfilter/nft_tunnel.c54
-rw-r--r--net/netfilter/x_tables.c10
-rw-r--r--net/netfilter/xt_hashlimit.c58
-rw-r--r--net/netfilter/xt_recent.c19
37 files changed, 3822 insertions, 728 deletions
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 5e9b2eb24349..3f572e5a975e 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -81,7 +81,8 @@ nf_tables-objs := nf_tables_core.o nf_tables_api.o nft_chain_filter.o \
nft_chain_route.o nf_tables_offload.o
nf_tables_set-objs := nf_tables_set_core.o \
- nft_set_hash.o nft_set_bitmap.o nft_set_rbtree.o
+ nft_set_hash.o nft_set_bitmap.o nft_set_rbtree.o \
+ nft_set_pipapo.o
obj-$(CONFIG_NF_TABLES) += nf_tables.o
obj-$(CONFIG_NF_TABLES_SET) += nf_tables_set.o
diff --git a/net/netfilter/ipset/ip_set_bitmap_gen.h b/net/netfilter/ipset/ip_set_bitmap_gen.h
index 077a2cb65fcb..26ab0e9612d8 100644
--- a/net/netfilter/ipset/ip_set_bitmap_gen.h
+++ b/net/netfilter/ipset/ip_set_bitmap_gen.h
@@ -75,7 +75,7 @@ mtype_flush(struct ip_set *set)
if (set->extensions & IPSET_EXT_DESTROY)
mtype_ext_cleanup(set);
- memset(map->members, 0, map->memsize);
+ bitmap_zero(map->members, map->elements);
set->elements = 0;
set->ext_size = 0;
}
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
index abe8f77d7d23..0a2196f59106 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -37,7 +37,7 @@ MODULE_ALIAS("ip_set_bitmap:ip");
/* Type structure */
struct bitmap_ip {
- void *members; /* the set members */
+ unsigned long *members; /* the set members */
u32 first_ip; /* host byte order, included in range */
u32 last_ip; /* host byte order, included in range */
u32 elements; /* number of max elements in the set */
@@ -220,7 +220,7 @@ init_map_ip(struct ip_set *set, struct bitmap_ip *map,
u32 first_ip, u32 last_ip,
u32 elements, u32 hosts, u8 netmask)
{
- map->members = ip_set_alloc(map->memsize);
+ map->members = bitmap_zalloc(elements, GFP_KERNEL | __GFP_NOWARN);
if (!map->members)
return false;
map->first_ip = first_ip;
@@ -322,7 +322,7 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
if (!map)
return -ENOMEM;
- map->memsize = bitmap_bytes(0, elements - 1);
+ map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
set->variant = &bitmap_ip;
if (!init_map_ip(set, map, first_ip, last_ip,
elements, hosts, netmask)) {
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index b618713297da..739e343efaf6 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -42,7 +42,7 @@ enum {
/* Type structure */
struct bitmap_ipmac {
- void *members; /* the set members */
+ unsigned long *members; /* the set members */
u32 first_ip; /* host byte order, included in range */
u32 last_ip; /* host byte order, included in range */
u32 elements; /* number of max elements in the set */
@@ -299,7 +299,7 @@ static bool
init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
u32 first_ip, u32 last_ip, u32 elements)
{
- map->members = ip_set_alloc(map->memsize);
+ map->members = bitmap_zalloc(elements, GFP_KERNEL | __GFP_NOWARN);
if (!map->members)
return false;
map->first_ip = first_ip;
@@ -360,7 +360,7 @@ bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
if (!map)
return -ENOMEM;
- map->memsize = bitmap_bytes(0, elements - 1);
+ map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
set->variant = &bitmap_ipmac;
if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) {
kfree(map);
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
index 23d6095cb196..b49978dd810d 100644
--- a/net/netfilter/ipset/ip_set_bitmap_port.c
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
@@ -30,7 +30,7 @@ MODULE_ALIAS("ip_set_bitmap:port");
/* Type structure */
struct bitmap_port {
- void *members; /* the set members */
+ unsigned long *members; /* the set members */
u16 first_port; /* host byte order, included in range */
u16 last_port; /* host byte order, included in range */
u32 elements; /* number of max elements in the set */
@@ -231,7 +231,7 @@ static bool
init_map_port(struct ip_set *set, struct bitmap_port *map,
u16 first_port, u16 last_port)
{
- map->members = ip_set_alloc(map->memsize);
+ map->members = bitmap_zalloc(map->elements, GFP_KERNEL | __GFP_NOWARN);
if (!map->members)
return false;
map->first_port = first_port;
@@ -271,7 +271,7 @@ bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
return -ENOMEM;
map->elements = elements;
- map->memsize = bitmap_bytes(0, map->elements);
+ map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
set->variant = &bitmap_port;
if (!init_map_port(set, map, first_port, last_port)) {
kfree(map);
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index cf895bc80871..8dd17589217d 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -723,6 +723,20 @@ ip_set_rcu_get(struct net *net, ip_set_id_t index)
return set;
}
+static inline void
+ip_set_lock(struct ip_set *set)
+{
+ if (!set->variant->region_lock)
+ spin_lock_bh(&set->lock);
+}
+
+static inline void
+ip_set_unlock(struct ip_set *set)
+{
+ if (!set->variant->region_lock)
+ spin_unlock_bh(&set->lock);
+}
+
int
ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
const struct xt_action_param *par, struct ip_set_adt_opt *opt)
@@ -744,9 +758,9 @@ ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
if (ret == -EAGAIN) {
/* Type requests element to be completed */
pr_debug("element must be completed, ADD is triggered\n");
- spin_lock_bh(&set->lock);
+ ip_set_lock(set);
set->variant->kadt(set, skb, par, IPSET_ADD, opt);
- spin_unlock_bh(&set->lock);
+ ip_set_unlock(set);
ret = 1;
} else {
/* --return-nomatch: invert matched element */
@@ -775,9 +789,9 @@ ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
!(opt->family == set->family || set->family == NFPROTO_UNSPEC))
return -IPSET_ERR_TYPE_MISMATCH;
- spin_lock_bh(&set->lock);
+ ip_set_lock(set);
ret = set->variant->kadt(set, skb, par, IPSET_ADD, opt);
- spin_unlock_bh(&set->lock);
+ ip_set_unlock(set);
return ret;
}
@@ -797,9 +811,9 @@ ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
!(opt->family == set->family || set->family == NFPROTO_UNSPEC))
return -IPSET_ERR_TYPE_MISMATCH;
- spin_lock_bh(&set->lock);
+ ip_set_lock(set);
ret = set->variant->kadt(set, skb, par, IPSET_DEL, opt);
- spin_unlock_bh(&set->lock);
+ ip_set_unlock(set);
return ret;
}
@@ -1264,9 +1278,9 @@ ip_set_flush_set(struct ip_set *set)
{
pr_debug("set: %s\n", set->name);
- spin_lock_bh(&set->lock);
+ ip_set_lock(set);
set->variant->flush(set);
- spin_unlock_bh(&set->lock);
+ ip_set_unlock(set);
}
static int ip_set_flush(struct net *net, struct sock *ctnl, struct sk_buff *skb,
@@ -1483,31 +1497,34 @@ ip_set_dump_policy[IPSET_ATTR_CMD_MAX + 1] = {
};
static int
-dump_init(struct netlink_callback *cb, struct ip_set_net *inst)
+ip_set_dump_start(struct netlink_callback *cb)
{
struct nlmsghdr *nlh = nlmsg_hdr(cb->skb);
int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
struct nlattr *cda[IPSET_ATTR_CMD_MAX + 1];
struct nlattr *attr = (void *)nlh + min_len;
+ struct sk_buff *skb = cb->skb;
+ struct ip_set_net *inst = ip_set_pernet(sock_net(skb->sk));
u32 dump_type;
- ip_set_id_t index;
int ret;
ret = nla_parse(cda, IPSET_ATTR_CMD_MAX, attr,
nlh->nlmsg_len - min_len,
ip_set_dump_policy, NULL);
if (ret)
- return ret;
+ goto error;
cb->args[IPSET_CB_PROTO] = nla_get_u8(cda[IPSET_ATTR_PROTOCOL]);
if (cda[IPSET_ATTR_SETNAME]) {
+ ip_set_id_t index;
struct ip_set *set;
set = find_set_and_id(inst, nla_data(cda[IPSET_ATTR_SETNAME]),
&index);
- if (!set)
- return -ENOENT;
-
+ if (!set) {
+ ret = -ENOENT;
+ goto error;
+ }
dump_type = DUMP_ONE;
cb->args[IPSET_CB_INDEX] = index;
} else {
@@ -1523,10 +1540,17 @@ dump_init(struct netlink_callback *cb, struct ip_set_net *inst)
cb->args[IPSET_CB_DUMP] = dump_type;
return 0;
+
+error:
+ /* We have to create and send the error message manually :-( */
+ if (nlh->nlmsg_flags & NLM_F_ACK) {
+ netlink_ack(cb->skb, nlh, ret, NULL);
+ }
+ return ret;
}
static int
-ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
+ip_set_dump_do(struct sk_buff *skb, struct netlink_callback *cb)
{
ip_set_id_t index = IPSET_INVALID_ID, max;
struct ip_set *set = NULL;
@@ -1537,18 +1561,8 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
bool is_destroyed;
int ret = 0;
- if (!cb->args[IPSET_CB_DUMP]) {
- ret = dump_init(cb, inst);
- if (ret < 0) {
- nlh = nlmsg_hdr(cb->skb);
- /* We have to create and send the error message
- * manually :-(
- */
- if (nlh->nlmsg_flags & NLM_F_ACK)
- netlink_ack(cb->skb, nlh, ret, NULL);
- return ret;
- }
- }
+ if (!cb->args[IPSET_CB_DUMP])
+ return -EINVAL;
if (cb->args[IPSET_CB_INDEX] >= inst->ip_set_max)
goto out;
@@ -1684,7 +1698,8 @@ static int ip_set_dump(struct net *net, struct sock *ctnl, struct sk_buff *skb,
{
struct netlink_dump_control c = {
- .dump = ip_set_dump_start,
+ .start = ip_set_dump_start,
+ .dump = ip_set_dump_do,
.done = ip_set_dump_done,
};
return netlink_dump_start(ctnl, skb, nlh, &c);
@@ -1712,9 +1727,9 @@ call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
bool eexist = flags & IPSET_FLAG_EXIST, retried = false;
do {
- spin_lock_bh(&set->lock);
+ ip_set_lock(set);
ret = set->variant->uadt(set, tb, adt, &lineno, flags, retried);
- spin_unlock_bh(&set->lock);
+ ip_set_unlock(set);
retried = true;
} while (ret == -EAGAIN &&
set->variant->resize &&
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index 7480ce55b5c8..e52d7b7597a0 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -7,13 +7,21 @@
#include <linux/rcupdate.h>
#include <linux/jhash.h>
#include <linux/types.h>
+#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/ipset/ip_set.h>
-#define __ipset_dereference_protected(p, c) rcu_dereference_protected(p, c)
-#define ipset_dereference_protected(p, set) \
- __ipset_dereference_protected(p, lockdep_is_held(&(set)->lock))
-
-#define rcu_dereference_bh_nfnl(p) rcu_dereference_bh_check(p, 1)
+#define __ipset_dereference(p) \
+ rcu_dereference_protected(p, 1)
+#define ipset_dereference_nfnl(p) \
+ rcu_dereference_protected(p, \
+ lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET))
+#define ipset_dereference_set(p, set) \
+ rcu_dereference_protected(p, \
+ lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET) || \
+ lockdep_is_held(&(set)->lock))
+#define ipset_dereference_bh_nfnl(p) \
+ rcu_dereference_bh_check(p, \
+ lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET))
/* Hashing which uses arrays to resolve clashing. The hash table is resized
* (doubled) when searching becomes too long.
@@ -72,11 +80,35 @@ struct hbucket {
__aligned(__alignof__(u64));
};
+/* Region size for locking == 2^HTABLE_REGION_BITS */
+#define HTABLE_REGION_BITS 10
+#define ahash_numof_locks(htable_bits) \
+ ((htable_bits) < HTABLE_REGION_BITS ? 1 \
+ : jhash_size((htable_bits) - HTABLE_REGION_BITS))
+#define ahash_sizeof_regions(htable_bits) \
+ (ahash_numof_locks(htable_bits) * sizeof(struct ip_set_region))
+#define ahash_region(n, htable_bits) \
+ ((n) % ahash_numof_locks(htable_bits))
+#define ahash_bucket_start(h, htable_bits) \
+ ((htable_bits) < HTABLE_REGION_BITS ? 0 \
+ : (h) * jhash_size(HTABLE_REGION_BITS))
+#define ahash_bucket_end(h, htable_bits) \
+ ((htable_bits) < HTABLE_REGION_BITS ? jhash_size(htable_bits) \
+ : ((h) + 1) * jhash_size(HTABLE_REGION_BITS))
+
+struct htable_gc {
+ struct delayed_work dwork;
+ struct ip_set *set; /* Set the gc belongs to */
+ u32 region; /* Last gc run position */
+};
+
/* The hash table: the table size stored here in order to make resizing easy */
struct htable {
atomic_t ref; /* References for resizing */
- atomic_t uref; /* References for dumping */
+ atomic_t uref; /* References for dumping and gc */
u8 htable_bits; /* size of hash table == 2^htable_bits */
+ u32 maxelem; /* Maxelem per region */
+ struct ip_set_region *hregion; /* Region locks and ext sizes */
struct hbucket __rcu *bucket[0]; /* hashtable buckets */
};
@@ -162,6 +194,10 @@ htable_bits(u32 hashsize)
#define NLEN 0
#endif /* IP_SET_HASH_WITH_NETS */
+#define SET_ELEM_EXPIRED(set, d) \
+ (SET_WITH_TIMEOUT(set) && \
+ ip_set_timeout_expired(ext_timeout(d, set)))
+
#endif /* _IP_SET_HASH_GEN_H */
#ifndef MTYPE
@@ -205,10 +241,12 @@ htable_bits(u32 hashsize)
#undef mtype_test_cidrs
#undef mtype_test
#undef mtype_uref
-#undef mtype_expire
#undef mtype_resize
+#undef mtype_ext_size
+#undef mtype_resize_ad
#undef mtype_head
#undef mtype_list
+#undef mtype_gc_do
#undef mtype_gc
#undef mtype_gc_init
#undef mtype_variant
@@ -247,10 +285,12 @@ htable_bits(u32 hashsize)
#define mtype_test_cidrs IPSET_TOKEN(MTYPE, _test_cidrs)
#define mtype_test IPSET_TOKEN(MTYPE, _test)
#define mtype_uref IPSET_TOKEN(MTYPE, _uref)
-#define mtype_expire IPSET_TOKEN(MTYPE, _expire)
#define mtype_resize IPSET_TOKEN(MTYPE, _resize)
+#define mtype_ext_size IPSET_TOKEN(MTYPE, _ext_size)
+#define mtype_resize_ad IPSET_TOKEN(MTYPE, _resize_ad)
#define mtype_head IPSET_TOKEN(MTYPE, _head)
#define mtype_list IPSET_TOKEN(MTYPE, _list)
+#define mtype_gc_do IPSET_TOKEN(MTYPE, _gc_do)
#define mtype_gc IPSET_TOKEN(MTYPE, _gc)
#define mtype_gc_init IPSET_TOKEN(MTYPE, _gc_init)
#define mtype_variant IPSET_TOKEN(MTYPE, _variant)
@@ -275,8 +315,7 @@ htable_bits(u32 hashsize)
/* The generic hash structure */
struct htype {
struct htable __rcu *table; /* the hash table */
- struct timer_list gc; /* garbage collection when timeout enabled */
- struct ip_set *set; /* attached to this ip_set */
+ struct htable_gc gc; /* gc workqueue */
u32 maxelem; /* max elements in the hash */
u32 initval; /* random jhash init value */
#ifdef IP_SET_HASH_WITH_MARKMASK
@@ -288,21 +327,33 @@ struct htype {
#ifdef IP_SET_HASH_WITH_NETMASK
u8 netmask; /* netmask value for subnets to store */
#endif
+ struct list_head ad; /* Resize add|del backlist */
struct mtype_elem next; /* temporary storage for uadd */
#ifdef IP_SET_HASH_WITH_NETS
struct net_prefixes nets[NLEN]; /* book-keeping of prefixes */
#endif
};
+/* ADD|DEL entries saved during resize */
+struct mtype_resize_ad {
+ struct list_head list;
+ enum ipset_adt ad; /* ADD|DEL element */
+ struct mtype_elem d; /* Element value */
+ struct ip_set_ext ext; /* Extensions for ADD */
+ struct ip_set_ext mext; /* Target extensions for ADD */
+ u32 flags; /* Flags for ADD */
+};
+
#ifdef IP_SET_HASH_WITH_NETS
/* Network cidr size book keeping when the hash stores different
* sized networks. cidr == real cidr + 1 to support /0.
*/
static void
-mtype_add_cidr(struct htype *h, u8 cidr, u8 n)
+mtype_add_cidr(struct ip_set *set, struct htype *h, u8 cidr, u8 n)
{
int i, j;
+ spin_lock_bh(&set->lock);
/* Add in increasing prefix order, so larger cidr first */
for (i = 0, j = -1; i < NLEN && h->nets[i].cidr[n]; i++) {
if (j != -1) {
@@ -311,7 +362,7 @@ mtype_add_cidr(struct htype *h, u8 cidr, u8 n)
j = i;
} else if (h->nets[i].cidr[n] == cidr) {
h->nets[CIDR_POS(cidr)].nets[n]++;
- return;
+ goto unlock;
}
}
if (j != -1) {
@@ -320,24 +371,29 @@ mtype_add_cidr(struct htype *h, u8 cidr, u8 n)
}
h->nets[i].cidr[n] = cidr;
h->nets[CIDR_POS(cidr)].nets[n] = 1;
+unlock:
+ spin_unlock_bh(&set->lock);
}
static void
-mtype_del_cidr(struct htype *h, u8 cidr, u8 n)
+mtype_del_cidr(struct ip_set *set, struct htype *h, u8 cidr, u8 n)
{
u8 i, j, net_end = NLEN - 1;
+ spin_lock_bh(&set->lock);
for (i = 0; i < NLEN; i++) {
if (h->nets[i].cidr[n] != cidr)
continue;
h->nets[CIDR_POS(cidr)].nets[n]--;
if (h->nets[CIDR_POS(cidr)].nets[n] > 0)
- return;
+ goto unlock;
for (j = i; j < net_end && h->nets[j].cidr[n]; j++)
h->nets[j].cidr[n] = h->nets[j + 1].cidr[n];
h->nets[j].cidr[n] = 0;
- return;
+ goto unlock;
}
+unlock:
+ spin_unlock_bh(&set->lock);
}
#endif
@@ -345,7 +401,7 @@ mtype_del_cidr(struct htype *h, u8 cidr, u8 n)
static size_t
mtype_ahash_memsize(const struct htype *h, const struct htable *t)
{
- return sizeof(*h) + sizeof(*t);
+ return sizeof(*h) + sizeof(*t) + ahash_sizeof_regions(t->htable_bits);
}
/* Get the ith element from the array block n */
@@ -369,24 +425,29 @@ mtype_flush(struct ip_set *set)
struct htype *h = set->data;
struct htable *t;
struct hbucket *n;
- u32 i;
-
- t = ipset_dereference_protected(h->table, set);
- for (i = 0; i < jhash_size(t->htable_bits); i++) {
- n = __ipset_dereference_protected(hbucket(t, i), 1);
- if (!n)
- continue;
- if (set->extensions & IPSET_EXT_DESTROY)
- mtype_ext_cleanup(set, n);
- /* FIXME: use slab cache */
- rcu_assign_pointer(hbucket(t, i), NULL);
- kfree_rcu(n, rcu);
+ u32 r, i;
+
+ t = ipset_dereference_nfnl(h->table);
+ for (r = 0; r < ahash_numof_locks(t->htable_bits); r++) {
+ spin_lock_bh(&t->hregion[r].lock);
+ for (i = ahash_bucket_start(r, t->htable_bits);
+ i < ahash_bucket_end(r, t->htable_bits); i++) {
+ n = __ipset_dereference(hbucket(t, i));
+ if (!n)
+ continue;
+ if (set->extensions & IPSET_EXT_DESTROY)
+ mtype_ext_cleanup(set, n);
+ /* FIXME: use slab cache */
+ rcu_assign_pointer(hbucket(t, i), NULL);
+ kfree_rcu(n, rcu);
+ }
+ t->hregion[r].ext_size = 0;
+ t->hregion[r].elements = 0;
+ spin_unlock_bh(&t->hregion[r].lock);
}
#ifdef IP_SET_HASH_WITH_NETS
memset(h->nets, 0, sizeof(h->nets));
#endif
- set->elements = 0;
- set->ext_size = 0;
}
/* Destroy the hashtable part of the set */
@@ -397,7 +458,7 @@ mtype_ahash_destroy(struct ip_set *set, struct htable *t, bool ext_destroy)
u32 i;
for (i = 0; i < jhash_size(t->htable_bits); i++) {
- n = __ipset_dereference_protected(hbucket(t, i), 1);
+ n = __ipset_dereference(hbucket(t, i));
if (!n)
continue;
if (set->extensions & IPSET_EXT_DESTROY && ext_destroy)
@@ -406,6 +467,7 @@ mtype_ahash_destroy(struct ip_set *set, struct htable *t, bool ext_destroy)
kfree(n);
}
+ ip_set_free(t->hregion);
ip_set_free(t);
}
@@ -414,28 +476,21 @@ static void
mtype_destroy(struct ip_set *set)
{
struct htype *h = set->data;
+ struct list_head *l, *lt;
if (SET_WITH_TIMEOUT(set))
- del_timer_sync(&h->gc);
+ cancel_delayed_work_sync(&h->gc.dwork);
- mtype_ahash_destroy(set,
- __ipset_dereference_protected(h->table, 1), true);
+ mtype_ahash_destroy(set, ipset_dereference_nfnl(h->table), true);
+ list_for_each_safe(l, lt, &h->ad) {
+ list_del(l);
+ kfree(l);
+ }
kfree(h);
set->data = NULL;
}
-static void
-mtype_gc_init(struct ip_set *set, void (*gc)(struct timer_list *t))
-{
- struct htype *h = set->data;
-
- timer_setup(&h->gc, gc, 0);
- mod_timer(&h->gc, jiffies + IPSET_GC_PERIOD(set->timeout) * HZ);
- pr_debug("gc initialized, run in every %u\n",
- IPSET_GC_PERIOD(set->timeout));
-}
-
static bool
mtype_same_set(const struct ip_set *a, const struct ip_set *b)
{
@@ -454,11 +509,9 @@ mtype_same_set(const struct ip_set *a, const struct ip_set *b)
a->extensions == b->extensions;
}
-/* Delete expired elements from the hashtable */
static void
-mtype_expire(struct ip_set *set, struct htype *h)
+mtype_gc_do(struct ip_set *set, struct htype *h, struct htable *t, u32 r)
{
- struct htable *t;
struct hbucket *n, *tmp;
struct mtype_elem *data;
u32 i, j, d;
@@ -466,10 +519,12 @@ mtype_expire(struct ip_set *set, struct htype *h)
#ifdef IP_SET_HASH_WITH_NETS
u8 k;
#endif
+ u8 htable_bits = t->htable_bits;
- t = ipset_dereference_protected(h->table, set);
- for (i = 0; i < jhash_size(t->htable_bits); i++) {
- n = __ipset_dereference_protected(hbucket(t, i), 1);
+ spin_lock_bh(&t->hregion[r].lock);
+ for (i = ahash_bucket_start(r, htable_bits);
+ i < ahash_bucket_end(r, htable_bits); i++) {
+ n = __ipset_dereference(hbucket(t, i));
if (!n)
continue;
for (j = 0, d = 0; j < n->pos; j++) {
@@ -485,58 +540,100 @@ mtype_expire(struct ip_set *set, struct htype *h)
smp_mb__after_atomic();
#ifdef IP_SET_HASH_WITH_NETS
for (k = 0; k < IPSET_NET_COUNT; k++)
- mtype_del_cidr(h,
+ mtype_del_cidr(set, h,
NCIDR_PUT(DCIDR_GET(data->cidr, k)),
k);
#endif
+ t->hregion[r].elements--;
ip_set_ext_destroy(set, data);
- set->elements--;
d++;
}
if (d >= AHASH_INIT_SIZE) {
if (d >= n->size) {
+ t->hregion[r].ext_size -=
+ ext_size(n->size, dsize);
rcu_assign_pointer(hbucket(t, i), NULL);
kfree_rcu(n, rcu);
continue;
}
tmp = kzalloc(sizeof(*tmp) +
- (n->size - AHASH_INIT_SIZE) * dsize,
- GFP_ATOMIC);
+ (n->size - AHASH_INIT_SIZE) * dsize,
+ GFP_ATOMIC);
if (!tmp)
- /* Still try to delete expired elements */
+ /* Still try to delete expired elements. */
continue;
tmp->size = n->size - AHASH_INIT_SIZE;
for (j = 0, d = 0; j < n->pos; j++) {
if (!test_bit(j, n->used))
continue;
data = ahash_data(n, j, dsize);
- memcpy(tmp->value + d * dsize, data, dsize);
+ memcpy(tmp->value + d * dsize,
+ data, dsize);
set_bit(d, tmp->used);
d++;
}
tmp->pos = d;
- set->ext_size -= ext_size(AHASH_INIT_SIZE, dsize);
+ t->hregion[r].ext_size -=
+ ext_size(AHASH_INIT_SIZE, dsize);
rcu_assign_pointer(hbucket(t, i), tmp);
kfree_rcu(n, rcu);
}
}
+ spin_unlock_bh(&t->hregion[r].lock);
}
static void
-mtype_gc(struct timer_list *t)
+mtype_gc(struct work_struct *work)
{
- struct htype *h = from_timer(h, t, gc);
- struct ip_set *set = h->set;
+ struct htable_gc *gc;
+ struct ip_set *set;
+ struct htype *h;
+ struct htable *t;
+ u32 r, numof_locks;
+ unsigned int next_run;
+
+ gc = container_of(work, struct htable_gc, dwork.work);
+ set = gc->set;
+ h = set->data;
- pr_debug("called\n");
spin_lock_bh(&set->lock);
- mtype_expire(set, h);
+ t = ipset_dereference_set(h->table, set);
+ atomic_inc(&t->uref);
+ numof_locks = ahash_numof_locks(t->htable_bits);
+ r = gc->region++;
+ if (r >= numof_locks) {
+ r = gc->region = 0;
+ }
+ next_run = (IPSET_GC_PERIOD(set->timeout) * HZ) / numof_locks;
+ if (next_run < HZ/10)
+ next_run = HZ/10;
spin_unlock_bh(&set->lock);
- h->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
- add_timer(&h->gc);
+ mtype_gc_do(set, h, t, r);
+
+ if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
+ pr_debug("Table destroy after resize by expire: %p\n", t);
+ mtype_ahash_destroy(set, t, false);
+ }
+
+ queue_delayed_work(system_power_efficient_wq, &gc->dwork, next_run);
+
+}
+
+static void
+mtype_gc_init(struct htable_gc *gc)
+{
+ INIT_DEFERRABLE_WORK(&gc->dwork, mtype_gc);
+ queue_delayed_work(system_power_efficient_wq, &gc->dwork, HZ);
}
+static int
+mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
+ struct ip_set_ext *mext, u32 flags);
+static int
+mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
+ struct ip_set_ext *mext, u32 flags);
+
/* Resize a hash: create a new hash table with doubling the hashsize
* and inserting the elements to it. Repeat until we succeed or
* fail due to memory pressures.
@@ -547,7 +644,7 @@ mtype_resize(struct ip_set *set, bool retried)
struct htype *h = set->data;
struct htable *t, *orig;
u8 htable_bits;
- size_t extsize, dsize = set->dsize;
+ size_t dsize = set->dsize;
#ifdef IP_SET_HASH_WITH_NETS
u8 flags;
struct mtype_elem *tmp;
@@ -555,7 +652,9 @@ mtype_resize(struct ip_set *set, bool retried)
struct mtype_elem *data;
struct mtype_elem *d;
struct hbucket *n, *m;
- u32 i, j, key;
+ struct list_head *l, *lt;
+ struct mtype_resize_ad *x;
+ u32 i, j, r, nr, key;
int ret;
#ifdef IP_SET_HASH_WITH_NETS
@@ -563,10 +662,8 @@ mtype_resize(struct ip_set *set, bool retried)
if (!tmp)
return -ENOMEM;
#endif
- rcu_read_lock_bh();
- orig = rcu_dereference_bh_nfnl(h->table);
+ orig = ipset_dereference_bh_nfnl(h->table);
htable_bits = orig->htable_bits;
- rcu_read_unlock_bh();
retry:
ret = 0;
@@ -583,88 +680,124 @@ retry:
ret = -ENOMEM;
goto out;
}
+ t->hregion = ip_set_alloc(ahash_sizeof_regions(htable_bits));
+ if (!t->hregion) {
+ kfree(t);
+ ret = -ENOMEM;
+ goto out;
+ }
t->htable_bits = htable_bits;
+ t->maxelem = h->maxelem / ahash_numof_locks(htable_bits);
+ for (i = 0; i < ahash_numof_locks(htable_bits); i++)
+ spin_lock_init(&t->hregion[i].lock);
- spin_lock_bh(&set->lock);
- orig = __ipset_dereference_protected(h->table, 1);
- /* There can't be another parallel resizing, but dumping is possible */
+ /* There can't be another parallel resizing,
+ * but dumping, gc, kernel side add/del are possible
+ */
+ orig = ipset_dereference_bh_nfnl(h->table);
atomic_set(&orig->ref, 1);
atomic_inc(&orig->uref);
- extsize = 0;
pr_debug("attempt to resize set %s from %u to %u, t %p\n",
set->name, orig->htable_bits, htable_bits, orig);
- for (i = 0; i < jhash_size(orig->htable_bits); i++) {
- n = __ipset_dereference_protected(hbucket(orig, i), 1);
- if (!n)
- continue;
- for (j = 0; j < n->pos; j++) {
- if (!test_bit(j, n->used))
+ for (r = 0; r < ahash_numof_locks(orig->htable_bits); r++) {
+ /* Expire may replace a hbucket with another one */
+ rcu_read_lock_bh();
+ for (i = ahash_bucket_start(r, orig->htable_bits);
+ i < ahash_bucket_end(r, orig->htable_bits); i++) {
+ n = __ipset_dereference(hbucket(orig, i));
+ if (!n)
continue;
- data = ahash_data(n, j, dsize);
+ for (j = 0; j < n->pos; j++) {
+ if (!test_bit(j, n->used))
+ continue;
+ data = ahash_data(n, j, dsize);
+ if (SET_ELEM_EXPIRED(set, data))
+ continue;
#ifdef IP_SET_HASH_WITH_NETS
- /* We have readers running parallel with us,
- * so the live data cannot be modified.
- */
- flags = 0;
- memcpy(tmp, data, dsize);
- data = tmp;
- mtype_data_reset_flags(data, &flags);
+ /* We have readers running parallel with us,
+ * so the live data cannot be modified.
+ */
+ flags = 0;
+ memcpy(tmp, data, dsize);
+ data = tmp;
+ mtype_data_reset_flags(data, &flags);
#endif
- key = HKEY(data, h->initval, htable_bits);
- m = __ipset_dereference_protected(hbucket(t, key), 1);
- if (!m) {
- m = kzalloc(sizeof(*m) +
+ key = HKEY(data, h->initval, htable_bits);
+ m = __ipset_dereference(hbucket(t, key));
+ nr = ahash_region(key, htable_bits);
+ if (!m) {
+ m = kzalloc(sizeof(*m) +
AHASH_INIT_SIZE * dsize,
GFP_ATOMIC);
- if (!m) {
- ret = -ENOMEM;
- goto cleanup;
- }
- m->size = AHASH_INIT_SIZE;
- extsize += ext_size(AHASH_INIT_SIZE, dsize);
- RCU_INIT_POINTER(hbucket(t, key), m);
- } else if (m->pos >= m->size) {
- struct hbucket *ht;
-
- if (m->size >= AHASH_MAX(h)) {
- ret = -EAGAIN;
- } else {
- ht = kzalloc(sizeof(*ht) +
+ if (!m) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ m->size = AHASH_INIT_SIZE;
+ t->hregion[nr].ext_size +=
+ ext_size(AHASH_INIT_SIZE,
+ dsize);
+ RCU_INIT_POINTER(hbucket(t, key), m);
+ } else if (m->pos >= m->size) {
+ struct hbucket *ht;
+
+ if (m->size >= AHASH_MAX(h)) {
+ ret = -EAGAIN;
+ } else {
+ ht = kzalloc(sizeof(*ht) +
(m->size + AHASH_INIT_SIZE)
* dsize,
GFP_ATOMIC);
- if (!ht)
- ret = -ENOMEM;
+ if (!ht)
+ ret = -ENOMEM;
+ }
+ if (ret < 0)
+ goto cleanup;
+ memcpy(ht, m, sizeof(struct hbucket) +
+ m->size * dsize);
+ ht->size = m->size + AHASH_INIT_SIZE;
+ t->hregion[nr].ext_size +=
+ ext_size(AHASH_INIT_SIZE,
+ dsize);
+ kfree(m);
+ m = ht;
+ RCU_INIT_POINTER(hbucket(t, key), ht);
}
- if (ret < 0)
- goto cleanup;
- memcpy(ht, m, sizeof(struct hbucket) +
- m->size * dsize);
- ht->size = m->size + AHASH_INIT_SIZE;
- extsize += ext_size(AHASH_INIT_SIZE, dsize);
- kfree(m);
- m = ht;
- RCU_INIT_POINTER(hbucket(t, key), ht);
- }
- d = ahash_data(m, m->pos, dsize);
- memcpy(d, data, dsize);
- set_bit(m->pos++, m->used);
+ d = ahash_data(m, m->pos, dsize);
+ memcpy(d, data, dsize);
+ set_bit(m->pos++, m->used);
+ t->hregion[nr].elements++;
#ifdef IP_SET_HASH_WITH_NETS
- mtype_data_reset_flags(d, &flags);
+ mtype_data_reset_flags(d, &flags);
#endif
+ }
}
+ rcu_read_unlock_bh();
}
- rcu_assign_pointer(h->table, t);
- set->ext_size = extsize;
- spin_unlock_bh(&set->lock);
+ /* There can't be any other writer. */
+ rcu_assign_pointer(h->table, t);
/* Give time to other readers of the set */
synchronize_rcu();
pr_debug("set %s resized from %u (%p) to %u (%p)\n", set->name,
orig->htable_bits, orig, t->htable_bits, t);
- /* If there's nobody else dumping the table, destroy it */
+ /* Add/delete elements processed by the SET target during resize.
+ * Kernel-side add cannot trigger a resize and userspace actions
+ * are serialized by the mutex.
+ */
+ list_for_each_safe(l, lt, &h->ad) {
+ x = list_entry(l, struct mtype_resize_ad, list);
+ if (x->ad == IPSET_ADD) {
+ mtype_add(set, &x->d, &x->ext, &x->mext, x->flags);
+ } else {
+ mtype_del(set, &x->d, NULL, NULL, 0);
+ }
+ list_del(l);
+ kfree(l);
+ }
+ /* If there's nobody else using the table, destroy it */
if (atomic_dec_and_test(&orig->uref)) {
pr_debug("Table destroy by resize %p\n", orig);
mtype_ahash_destroy(set, orig, false);
@@ -677,15 +810,44 @@ out:
return ret;
cleanup:
+ rcu_read_unlock_bh();
atomic_set(&orig->ref, 0);
atomic_dec(&orig->uref);
- spin_unlock_bh(&set->lock);
mtype_ahash_destroy(set, t, false);
if (ret == -EAGAIN)
goto retry;
goto out;
}
+/* Get the current number of elements and ext_size in the set */
+static void
+mtype_ext_size(struct ip_set *set, u32 *elements, size_t *ext_size)
+{
+ struct htype *h = set->data;
+ const struct htable *t;
+ u32 i, j, r;
+ struct hbucket *n;
+ struct mtype_elem *data;
+
+ t = rcu_dereference_bh(h->table);
+ for (r = 0; r < ahash_numof_locks(t->htable_bits); r++) {
+ for (i = ahash_bucket_start(r, t->htable_bits);
+ i < ahash_bucket_end(r, t->htable_bits); i++) {
+ n = rcu_dereference_bh(hbucket(t, i));
+ if (!n)
+ continue;
+ for (j = 0; j < n->pos; j++) {
+ if (!test_bit(j, n->used))
+ continue;
+ data = ahash_data(n, j, set->dsize);
+ if (!SET_ELEM_EXPIRED(set, data))
+ (*elements)++;
+ }
+ }
+ *ext_size += t->hregion[r].ext_size;
+ }
+}
+
/* Add an element to a hash and update the internal counters when succeeded,
* otherwise report the proper error code.
*/
@@ -698,32 +860,49 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
const struct mtype_elem *d = value;
struct mtype_elem *data;
struct hbucket *n, *old = ERR_PTR(-ENOENT);
- int i, j = -1;
+ int i, j = -1, ret;
bool flag_exist = flags & IPSET_FLAG_EXIST;
bool deleted = false, forceadd = false, reuse = false;
- u32 key, multi = 0;
+ u32 r, key, multi = 0, elements, maxelem;
- if (set->elements >= h->maxelem) {
- if (SET_WITH_TIMEOUT(set))
- /* FIXME: when set is full, we slow down here */
- mtype_expire(set, h);
- if (set->elements >= h->maxelem && SET_WITH_FORCEADD(set))
+ rcu_read_lock_bh();
+ t = rcu_dereference_bh(h->table);
+ key = HKEY(value, h->initval, t->htable_bits);
+ r = ahash_region(key, t->htable_bits);
+ atomic_inc(&t->uref);
+ elements = t->hregion[r].elements;
+ maxelem = t->maxelem;
+ if (elements >= maxelem) {
+ u32 e;
+ if (SET_WITH_TIMEOUT(set)) {
+ rcu_read_unlock_bh();
+ mtype_gc_do(set, h, t, r);
+ rcu_read_lock_bh();
+ }
+ maxelem = h->maxelem;
+ elements = 0;
+ for (e = 0; e < ahash_numof_locks(t->htable_bits); e++)
+ elements += t->hregion[e].elements;
+ if (elements >= maxelem && SET_WITH_FORCEADD(set))
forceadd = true;
}
+ rcu_read_unlock_bh();
- t = ipset_dereference_protected(h->table, set);
- key = HKEY(value, h->initval, t->htable_bits);
- n = __ipset_dereference_protected(hbucket(t, key), 1);
+ spin_lock_bh(&t->hregion[r].lock);
+ n = rcu_dereference_bh(hbucket(t, key));
if (!n) {
- if (forceadd || set->elements >= h->maxelem)
+ if (forceadd || elements >= maxelem)
goto set_full;
old = NULL;
n = kzalloc(sizeof(*n) + AHASH_INIT_SIZE * set->dsize,
GFP_ATOMIC);
- if (!n)
- return -ENOMEM;
+ if (!n) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
n->size = AHASH_INIT_SIZE;
- set->ext_size += ext_size(AHASH_INIT_SIZE, set->dsize);
+ t->hregion[r].ext_size +=
+ ext_size(AHASH_INIT_SIZE, set->dsize);
goto copy_elem;
}
for (i = 0; i < n->pos; i++) {
@@ -737,38 +916,37 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
}
data = ahash_data(n, i, set->dsize);
if (mtype_data_equal(data, d, &multi)) {
- if (flag_exist ||
- (SET_WITH_TIMEOUT(set) &&
- ip_set_timeout_expired(ext_timeout(data, set)))) {
+ if (flag_exist || SET_ELEM_EXPIRED(set, data)) {
/* Just the extensions could be overwritten */
j = i;
goto overwrite_extensions;
}
- return -IPSET_ERR_EXIST;
+ ret = -IPSET_ERR_EXIST;
+ goto unlock;
}
/* Reuse first timed out entry */
- if (SET_WITH_TIMEOUT(set) &&
- ip_set_timeout_expired(ext_timeout(data, set)) &&
- j == -1) {
+ if (SET_ELEM_EXPIRED(set, data) && j == -1) {
j = i;
reuse = true;
}
}
if (reuse || forceadd) {
+ if (j == -1)
+ j = 0;
data = ahash_data(n, j, set->dsize);
if (!deleted) {
#ifdef IP_SET_HASH_WITH_NETS
for (i = 0; i < IPSET_NET_COUNT; i++)
- mtype_del_cidr(h,
+ mtype_del_cidr(set, h,
NCIDR_PUT(DCIDR_GET(data->cidr, i)),
i);
#endif
ip_set_ext_destroy(set, data);
- set->elements--;
+ t->hregion[r].elements--;
}
goto copy_data;
}
- if (set->elements >= h->maxelem)
+ if (elements >= maxelem)
goto set_full;
/* Create a new slot */
if (n->pos >= n->size) {
@@ -776,28 +954,32 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
if (n->size >= AHASH_MAX(h)) {
/* Trigger rehashing */
mtype_data_next(&h->next, d);
- return -EAGAIN;
+ ret = -EAGAIN;
+ goto resize;
}
old = n;
n = kzalloc(sizeof(*n) +
(old->size + AHASH_INIT_SIZE) * set->dsize,
GFP_ATOMIC);
- if (!n)
- return -ENOMEM;
+ if (!n) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
memcpy(n, old, sizeof(struct hbucket) +
old->size * set->dsize);
n->size = old->size + AHASH_INIT_SIZE;
- set->ext_size += ext_size(AHASH_INIT_SIZE, set->dsize);
+ t->hregion[r].ext_size +=
+ ext_size(AHASH_INIT_SIZE, set->dsize);
}
copy_elem:
j = n->pos++;
data = ahash_data(n, j, set->dsize);
copy_data:
- set->elements++;
+ t->hregion[r].elements++;
#ifdef IP_SET_HASH_WITH_NETS
for (i = 0; i < IPSET_NET_COUNT; i++)
- mtype_add_cidr(h, NCIDR_PUT(DCIDR_GET(d->cidr, i)), i);
+ mtype_add_cidr(set, h, NCIDR_PUT(DCIDR_GET(d->cidr, i)), i);
#endif
memcpy(data, d, sizeof(struct mtype_elem));
overwrite_extensions:
@@ -820,13 +1002,41 @@ overwrite_extensions:
if (old)
kfree_rcu(old, rcu);
}
+ ret = 0;
+resize:
+ spin_unlock_bh(&t->hregion[r].lock);
+ if (atomic_read(&t->ref) && ext->target) {
+ /* Resize is in process and kernel side add, save values */
+ struct mtype_resize_ad *x;
+
+ x = kzalloc(sizeof(struct mtype_resize_ad), GFP_ATOMIC);
+ if (!x)
+ /* Don't bother */
+ goto out;
+ x->ad = IPSET_ADD;
+ memcpy(&x->d, value, sizeof(struct mtype_elem));
+ memcpy(&x->ext, ext, sizeof(struct ip_set_ext));
+ memcpy(&x->mext, mext, sizeof(struct ip_set_ext));
+ x->flags = flags;
+ spin_lock_bh(&set->lock);
+ list_add_tail(&x->list, &h->ad);
+ spin_unlock_bh(&set->lock);
+ }
+ goto out;
- return 0;
set_full:
if (net_ratelimit())
pr_warn("Set %s is full, maxelem %u reached\n",
- set->name, h->maxelem);
- return -IPSET_ERR_HASH_FULL;
+ set->name, maxelem);
+ ret = -IPSET_ERR_HASH_FULL;
+unlock:
+ spin_unlock_bh(&t->hregion[r].lock);
+out:
+ if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
+ pr_debug("Table destroy after resize by add: %p\n", t);
+ mtype_ahash_destroy(set, t, false);
+ }
+ return ret;
}
/* Delete an element from the hash and free up space if possible.
@@ -840,13 +1050,23 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
const struct mtype_elem *d = value;
struct mtype_elem *data;
struct hbucket *n;
- int i, j, k, ret = -IPSET_ERR_EXIST;
+ struct mtype_resize_ad *x = NULL;
+ int i, j, k, r, ret = -IPSET_ERR_EXIST;
u32 key, multi = 0;
size_t dsize = set->dsize;
- t = ipset_dereference_protected(h->table, set);
+ /* Userspace add and resize is excluded by the mutex.
+ * Kernespace add does not trigger resize.
+ */
+ rcu_read_lock_bh();
+ t = rcu_dereference_bh(h->table);
key = HKEY(value, h->initval, t->htable_bits);
- n = __ipset_dereference_protected(hbucket(t, key), 1);
+ r = ahash_region(key, t->htable_bits);
+ atomic_inc(&t->uref);
+ rcu_read_unlock_bh();
+
+ spin_lock_bh(&t->hregion[r].lock);
+ n = rcu_dereference_bh(hbucket(t, key));
if (!n)
goto out;
for (i = 0, k = 0; i < n->pos; i++) {
@@ -857,8 +1077,7 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
data = ahash_data(n, i, dsize);
if (!mtype_data_equal(data, d, &multi))
continue;
- if (SET_WITH_TIMEOUT(set) &&
- ip_set_timeout_expired(ext_timeout(data, set)))
+ if (SET_ELEM_EXPIRED(set, data))
goto out;
ret = 0;
@@ -866,20 +1085,33 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
smp_mb__after_atomic();
if (i + 1 == n->pos)
n->pos--;
- set->elements--;
+ t->hregion[r].elements--;
#ifdef IP_SET_HASH_WITH_NETS
for (j = 0; j < IPSET_NET_COUNT; j++)
- mtype_del_cidr(h, NCIDR_PUT(DCIDR_GET(d->cidr, j)),
- j);
+ mtype_del_cidr(set, h,
+ NCIDR_PUT(DCIDR_GET(d->cidr, j)), j);
#endif
ip_set_ext_destroy(set, data);
+ if (atomic_read(&t->ref) && ext->target) {
+ /* Resize is in process and kernel side del,
+ * save values
+ */
+ x = kzalloc(sizeof(struct mtype_resize_ad),
+ GFP_ATOMIC);
+ if (x) {
+ x->ad = IPSET_DEL;
+ memcpy(&x->d, value,
+ sizeof(struct mtype_elem));
+ x->flags = flags;
+ }
+ }
for (; i < n->pos; i++) {
if (!test_bit(i, n->used))
k++;
}
if (n->pos == 0 && k == 0) {
- set->ext_size -= ext_size(n->size, dsize);
+ t->hregion[r].ext_size -= ext_size(n->size, dsize);
rcu_assign_pointer(hbucket(t, key), NULL);
kfree_rcu(n, rcu);
} else if (k >= AHASH_INIT_SIZE) {
@@ -898,7 +1130,8 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
k++;
}
tmp->pos = k;
- set->ext_size -= ext_size(AHASH_INIT_SIZE, dsize);
+ t->hregion[r].ext_size -=
+ ext_size(AHASH_INIT_SIZE, dsize);
rcu_assign_pointer(hbucket(t, key), tmp);
kfree_rcu(n, rcu);
}
@@ -906,6 +1139,16 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
}
out:
+ spin_unlock_bh(&t->hregion[r].lock);
+ if (x) {
+ spin_lock_bh(&set->lock);
+ list_add(&x->list, &h->ad);
+ spin_unlock_bh(&set->lock);
+ }
+ if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
+ pr_debug("Table destroy after resize by del: %p\n", t);
+ mtype_ahash_destroy(set, t, false);
+ }
return ret;
}
@@ -991,6 +1234,7 @@ mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext,
int i, ret = 0;
u32 key, multi = 0;
+ rcu_read_lock_bh();
t = rcu_dereference_bh(h->table);
#ifdef IP_SET_HASH_WITH_NETS
/* If we test an IP address and not a network address,
@@ -1022,6 +1266,7 @@ mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext,
goto out;
}
out:
+ rcu_read_unlock_bh();
return ret;
}
@@ -1033,23 +1278,14 @@ mtype_head(struct ip_set *set, struct sk_buff *skb)
const struct htable *t;
struct nlattr *nested;
size_t memsize;
+ u32 elements = 0;
+ size_t ext_size = 0;
u8 htable_bits;
- /* If any members have expired, set->elements will be wrong
- * mytype_expire function will update it with the right count.
- * we do not hold set->lock here, so grab it first.
- * set->elements can still be incorrect in the case of a huge set,
- * because elements might time out during the listing.
- */
- if (SET_WITH_TIMEOUT(set)) {
- spin_lock_bh(&set->lock);
- mtype_expire(set, h);
- spin_unlock_bh(&set->lock);
- }
-
rcu_read_lock_bh();
- t = rcu_dereference_bh_nfnl(h->table);
- memsize = mtype_ahash_memsize(h, t) + set->ext_size;
+ t = rcu_dereference_bh(h->table);
+ mtype_ext_size(set, &elements, &ext_size);
+ memsize = mtype_ahash_memsize(h, t) + ext_size + set->ext_size;
htable_bits = t->htable_bits;
rcu_read_unlock_bh();
@@ -1071,7 +1307,7 @@ mtype_head(struct ip_set *set, struct sk_buff *skb)
#endif
if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) ||
nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)) ||
- nla_put_net32(skb, IPSET_ATTR_ELEMENTS, htonl(set->elements)))
+ nla_put_net32(skb, IPSET_ATTR_ELEMENTS, htonl(elements)))
goto nla_put_failure;
if (unlikely(ip_set_put_flags(skb, set)))
goto nla_put_failure;
@@ -1091,15 +1327,15 @@ mtype_uref(struct ip_set *set, struct netlink_callback *cb, bool start)
if (start) {
rcu_read_lock_bh();
- t = rcu_dereference_bh_nfnl(h->table);
+ t = ipset_dereference_bh_nfnl(h->table);
atomic_inc(&t->uref);
cb->args[IPSET_CB_PRIVATE] = (unsigned long)t;
rcu_read_unlock_bh();
} else if (cb->args[IPSET_CB_PRIVATE]) {
t = (struct htable *)cb->args[IPSET_CB_PRIVATE];
if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
- /* Resizing didn't destroy the hash table */
- pr_debug("Table destroy by dump: %p\n", t);
+ pr_debug("Table destroy after resize "
+ " by dump: %p\n", t);
mtype_ahash_destroy(set, t, false);
}
cb->args[IPSET_CB_PRIVATE] = 0;
@@ -1141,8 +1377,7 @@ mtype_list(const struct ip_set *set,
if (!test_bit(i, n->used))
continue;
e = ahash_data(n, i, set->dsize);
- if (SET_WITH_TIMEOUT(set) &&
- ip_set_timeout_expired(ext_timeout(e, set)))
+ if (SET_ELEM_EXPIRED(set, e))
continue;
pr_debug("list hash %lu hbucket %p i %u, data %p\n",
cb->args[IPSET_CB_ARG0], n, i, e);
@@ -1208,6 +1443,7 @@ static const struct ip_set_type_variant mtype_variant = {
.uref = mtype_uref,
.resize = mtype_resize,
.same_set = mtype_same_set,
+ .region_lock = true,
};
#ifdef IP_SET_EMIT_CREATE
@@ -1226,6 +1462,7 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
size_t hsize;
struct htype *h;
struct htable *t;
+ u32 i;
pr_debug("Create set %s with family %s\n",
set->name, set->family == NFPROTO_IPV4 ? "inet" : "inet6");
@@ -1294,6 +1531,15 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
kfree(h);
return -ENOMEM;
}
+ t->hregion = ip_set_alloc(ahash_sizeof_regions(hbits));
+ if (!t->hregion) {
+ kfree(t);
+ kfree(h);
+ return -ENOMEM;
+ }
+ h->gc.set = set;
+ for (i = 0; i < ahash_numof_locks(hbits); i++)
+ spin_lock_init(&t->hregion[i].lock);
h->maxelem = maxelem;
#ifdef IP_SET_HASH_WITH_NETMASK
h->netmask = netmask;
@@ -1304,9 +1550,10 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
get_random_bytes(&h->initval, sizeof(h->initval));
t->htable_bits = hbits;
+ t->maxelem = h->maxelem / ahash_numof_locks(hbits);
RCU_INIT_POINTER(h->table, t);
- h->set = set;
+ INIT_LIST_HEAD(&h->ad);
set->data = h;
#ifndef IP_SET_PROTO_UNDEF
if (set->family == NFPROTO_IPV4) {
@@ -1329,12 +1576,10 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
#ifndef IP_SET_PROTO_UNDEF
if (set->family == NFPROTO_IPV4)
#endif
- IPSET_TOKEN(HTYPE, 4_gc_init)(set,
- IPSET_TOKEN(HTYPE, 4_gc));
+ IPSET_TOKEN(HTYPE, 4_gc_init)(&h->gc);
#ifndef IP_SET_PROTO_UNDEF
else
- IPSET_TOKEN(HTYPE, 6_gc_init)(set,
- IPSET_TOKEN(HTYPE, 6_gc));
+ IPSET_TOKEN(HTYPE, 6_gc_init)(&h->gc);
#endif
}
pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 8dc892a9dc91..605e0f68f8bd 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -1239,7 +1239,7 @@ static void ip_vs_process_message(struct netns_ipvs *ipvs, __u8 *buffer,
p = msg_end;
if (p + sizeof(s->v4) > buffer+buflen) {
- IP_VS_ERR_RL("BACKUP, Dropping buffer, to small\n");
+ IP_VS_ERR_RL("BACKUP, Dropping buffer, too small\n");
return;
}
s = (union ip_vs_sync_conn *)p;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index f475fec84536..1927fc296f95 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -894,32 +894,175 @@ static void nf_ct_acct_merge(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
}
}
-/* Resolve race on insertion if this protocol allows this. */
+static void __nf_conntrack_insert_prepare(struct nf_conn *ct)
+{
+ struct nf_conn_tstamp *tstamp;
+
+ atomic_inc(&ct->ct_general.use);
+ ct->status |= IPS_CONFIRMED;
+
+ /* set conntrack timestamp, if enabled. */
+ tstamp = nf_conn_tstamp_find(ct);
+ if (tstamp)
+ tstamp->start = ktime_get_real_ns();
+}
+
+static int __nf_ct_resolve_clash(struct sk_buff *skb,
+ struct nf_conntrack_tuple_hash *h)
+{
+ /* This is the conntrack entry already in hashes that won race. */
+ struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *loser_ct;
+
+ loser_ct = nf_ct_get(skb, &ctinfo);
+
+ if (nf_ct_is_dying(ct))
+ return NF_DROP;
+
+ if (!atomic_inc_not_zero(&ct->ct_general.use))
+ return NF_DROP;
+
+ if (((ct->status & IPS_NAT_DONE_MASK) == 0) ||
+ nf_ct_match(ct, loser_ct)) {
+ struct net *net = nf_ct_net(ct);
+
+ nf_ct_acct_merge(ct, ctinfo, loser_ct);
+ nf_ct_add_to_dying_list(loser_ct);
+ nf_conntrack_put(&loser_ct->ct_general);
+ nf_ct_set(skb, ct, ctinfo);
+
+ NF_CT_STAT_INC(net, insert_failed);
+ return NF_ACCEPT;
+ }
+
+ nf_ct_put(ct);
+ return NF_DROP;
+}
+
+/**
+ * nf_ct_resolve_clash_harder - attempt to insert clashing conntrack entry
+ *
+ * @skb: skb that causes the collision
+ * @repl_idx: hash slot for reply direction
+ *
+ * Called when origin or reply direction had a clash.
+ * The skb can be handled without packet drop provided the reply direction
+ * is unique or there the existing entry has the identical tuple in both
+ * directions.
+ *
+ * Caller must hold conntrack table locks to prevent concurrent updates.
+ *
+ * Returns NF_DROP if the clash could not be handled.
+ */
+static int nf_ct_resolve_clash_harder(struct sk_buff *skb, u32 repl_idx)
+{
+ struct nf_conn *loser_ct = (struct nf_conn *)skb_nfct(skb);
+ const struct nf_conntrack_zone *zone;
+ struct nf_conntrack_tuple_hash *h;
+ struct hlist_nulls_node *n;
+ struct net *net;
+
+ zone = nf_ct_zone(loser_ct);
+ net = nf_ct_net(loser_ct);
+
+ /* Reply direction must never result in a clash, unless both origin
+ * and reply tuples are identical.
+ */
+ hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[repl_idx], hnnode) {
+ if (nf_ct_key_equal(h,
+ &loser_ct->tuplehash[IP_CT_DIR_REPLY].tuple,
+ zone, net))
+ return __nf_ct_resolve_clash(skb, h);
+ }
+
+ /* We want the clashing entry to go away real soon: 1 second timeout. */
+ loser_ct->timeout = nfct_time_stamp + HZ;
+
+ /* IPS_NAT_CLASH removes the entry automatically on the first
+ * reply. Also prevents UDP tracker from moving the entry to
+ * ASSURED state, i.e. the entry can always be evicted under
+ * pressure.
+ */
+ loser_ct->status |= IPS_FIXED_TIMEOUT | IPS_NAT_CLASH;
+
+ __nf_conntrack_insert_prepare(loser_ct);
+
+ /* fake add for ORIGINAL dir: we want lookups to only find the entry
+ * already in the table. This also hides the clashing entry from
+ * ctnetlink iteration, i.e. conntrack -L won't show them.
+ */
+ hlist_nulls_add_fake(&loser_ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
+
+ hlist_nulls_add_head_rcu(&loser_ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
+ &nf_conntrack_hash[repl_idx]);
+ return NF_ACCEPT;
+}
+
+/**
+ * nf_ct_resolve_clash - attempt to handle clash without packet drop
+ *
+ * @skb: skb that causes the clash
+ * @h: tuplehash of the clashing entry already in table
+ * @hash_reply: hash slot for reply direction
+ *
+ * A conntrack entry can be inserted to the connection tracking table
+ * if there is no existing entry with an identical tuple.
+ *
+ * If there is one, @skb (and the assocated, unconfirmed conntrack) has
+ * to be dropped. In case @skb is retransmitted, next conntrack lookup
+ * will find the already-existing entry.
+ *
+ * The major problem with such packet drop is the extra delay added by
+ * the packet loss -- it will take some time for a retransmit to occur
+ * (or the sender to time out when waiting for a reply).
+ *
+ * This function attempts to handle the situation without packet drop.
+ *
+ * If @skb has no NAT transformation or if the colliding entries are
+ * exactly the same, only the to-be-confirmed conntrack entry is discarded
+ * and @skb is associated with the conntrack entry already in the table.
+ *
+ * Failing that, the new, unconfirmed conntrack is still added to the table
+ * provided that the collision only occurs in the ORIGINAL direction.
+ * The new entry will be added after the existing one in the hash list,
+ * so packets in the ORIGINAL direction will continue to match the existing
+ * entry. The new entry will also have a fixed timeout so it expires --
+ * due to the collision, it will not see bidirectional traffic.
+ *
+ * Returns NF_DROP if the clash could not be resolved.
+ */
static __cold noinline int
-nf_ct_resolve_clash(struct net *net, struct sk_buff *skb,
- enum ip_conntrack_info ctinfo,
- struct nf_conntrack_tuple_hash *h)
+nf_ct_resolve_clash(struct sk_buff *skb, struct nf_conntrack_tuple_hash *h,
+ u32 reply_hash)
{
/* This is the conntrack entry already in hashes that won race. */
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
const struct nf_conntrack_l4proto *l4proto;
- enum ip_conntrack_info oldinfo;
- struct nf_conn *loser_ct = nf_ct_get(skb, &oldinfo);
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *loser_ct;
+ struct net *net;
+ int ret;
+
+ loser_ct = nf_ct_get(skb, &ctinfo);
+ net = nf_ct_net(loser_ct);
l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
- if (l4proto->allow_clash &&
- !nf_ct_is_dying(ct) &&
- atomic_inc_not_zero(&ct->ct_general.use)) {
- if (((ct->status & IPS_NAT_DONE_MASK) == 0) ||
- nf_ct_match(ct, loser_ct)) {
- nf_ct_acct_merge(ct, ctinfo, loser_ct);
- nf_conntrack_put(&loser_ct->ct_general);
- nf_ct_set(skb, ct, oldinfo);
- return NF_ACCEPT;
- }
- nf_ct_put(ct);
- }
+ if (!l4proto->allow_clash)
+ goto drop;
+
+ ret = __nf_ct_resolve_clash(skb, h);
+ if (ret == NF_ACCEPT)
+ return ret;
+
+ ret = nf_ct_resolve_clash_harder(skb, reply_hash);
+ if (ret == NF_ACCEPT)
+ return ret;
+
+drop:
+ nf_ct_add_to_dying_list(loser_ct);
NF_CT_STAT_INC(net, drop);
+ NF_CT_STAT_INC(net, insert_failed);
return NF_DROP;
}
@@ -932,7 +1075,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
struct nf_conn_help *help;
- struct nf_conn_tstamp *tstamp;
struct hlist_nulls_node *n;
enum ip_conntrack_info ctinfo;
struct net *net;
@@ -989,6 +1131,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
if (unlikely(nf_ct_is_dying(ct))) {
nf_ct_add_to_dying_list(ct);
+ NF_CT_STAT_INC(net, insert_failed);
goto dying;
}
@@ -1009,13 +1152,8 @@ __nf_conntrack_confirm(struct sk_buff *skb)
setting time, otherwise we'd get timer wrap in
weird delay cases. */
ct->timeout += nfct_time_stamp;
- atomic_inc(&ct->ct_general.use);
- ct->status |= IPS_CONFIRMED;
- /* set conntrack timestamp, if enabled. */
- tstamp = nf_conn_tstamp_find(ct);
- if (tstamp)
- tstamp->start = ktime_get_real_ns();
+ __nf_conntrack_insert_prepare(ct);
/* Since the lookup is lockless, hash insertion must be done after
* starting the timer and setting the CONFIRMED bit. The RCU barriers
@@ -1035,11 +1173,9 @@ __nf_conntrack_confirm(struct sk_buff *skb)
return NF_ACCEPT;
out:
- nf_ct_add_to_dying_list(ct);
- ret = nf_ct_resolve_clash(net, skb, ctinfo, h);
+ ret = nf_ct_resolve_clash(skb, h, reply_hash);
dying:
nf_conntrack_double_unlock(hash, reply_hash);
- NF_CT_STAT_INC(net, insert_failed);
local_bh_enable();
return ret;
}
@@ -2248,8 +2384,7 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
- hash = kvmalloc_array(nr_slots, sizeof(struct hlist_nulls_head),
- GFP_KERNEL | __GFP_ZERO);
+ hash = kvcalloc(nr_slots, sizeof(struct hlist_nulls_head), GFP_KERNEL);
if (hash && nulls)
for (i = 0; i < nr_slots; i++)
@@ -2334,7 +2469,6 @@ int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp)
return nf_conntrack_hash_resize(hashsize);
}
-EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
static __always_inline unsigned int total_extension_size(void)
{
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index c24e5b64b00c..3dbe2329c3f1 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -37,7 +37,6 @@ void nf_ct_ext_destroy(struct nf_conn *ct)
kfree(ct->ext);
}
-EXPORT_SYMBOL(nf_ct_ext_destroy);
void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
{
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 0399ae8f1188..4f897b14b606 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -114,7 +114,7 @@ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
{
/* ORIGINAL */
/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA */
-/* init */ {sCW, sCW, sCW, sCE, sES, sSS, sSR, sSA, sCW, sHA},
+/* init */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCW, sHA},
/* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},
/* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
/* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL, sSS},
@@ -130,7 +130,7 @@ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
/* REPLY */
/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA */
/* init */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},/* INIT in sCL Big TODO */
-/* init_ack */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},
+/* init_ack */ {sIV, sCW, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},
/* abort */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV, sCL},
/* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV, sSR},
/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV, sHA},
@@ -316,7 +316,7 @@ sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = sh->vtag;
}
- ct->proto.sctp.state = new_state;
+ ct->proto.sctp.state = SCTP_CONNTRACK_NONE;
}
return true;
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 7365b43f8f98..760ca2422816 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -81,6 +81,18 @@ static bool udp_error(struct sk_buff *skb,
return false;
}
+static void nf_conntrack_udp_refresh_unreplied(struct nf_conn *ct,
+ struct sk_buff *skb,
+ enum ip_conntrack_info ctinfo,
+ u32 extra_jiffies)
+{
+ if (unlikely(ctinfo == IP_CT_ESTABLISHED_REPLY &&
+ ct->status & IPS_NAT_CLASH))
+ nf_ct_kill(ct);
+ else
+ nf_ct_refresh_acct(ct, ctinfo, skb, extra_jiffies);
+}
+
/* Returns verdict for packet, and may modify conntracktype */
int nf_conntrack_udp_packet(struct nf_conn *ct,
struct sk_buff *skb,
@@ -116,8 +128,8 @@ int nf_conntrack_udp_packet(struct nf_conn *ct,
if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
nf_conntrack_event_cache(IPCT_ASSURED, ct);
} else {
- nf_ct_refresh_acct(ct, ctinfo, skb,
- timeouts[UDP_CT_UNREPLIED]);
+ nf_conntrack_udp_refresh_unreplied(ct, skb, ctinfo,
+ timeouts[UDP_CT_UNREPLIED]);
}
return NF_ACCEPT;
}
@@ -198,8 +210,8 @@ int nf_conntrack_udplite_packet(struct nf_conn *ct,
if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
nf_conntrack_event_cache(IPCT_ASSURED, ct);
} else {
- nf_ct_refresh_acct(ct, ctinfo, skb,
- timeouts[UDP_CT_UNREPLIED]);
+ nf_conntrack_udp_refresh_unreplied(ct, skb, ctinfo,
+ timeouts[UDP_CT_UNREPLIED]);
}
return NF_ACCEPT;
}
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 410809c669e1..4912069627b6 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -411,7 +411,7 @@ static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
*pos = cpu + 1;
return per_cpu_ptr(net->ct.stat, cpu);
}
-
+ (*pos)++;
return NULL;
}
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index e33a73cb1f42..8af28e10b4e6 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -61,9 +61,9 @@ struct flow_offload *flow_offload_alloc(struct nf_conn *ct)
flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_REPLY);
if (ct->status & IPS_SRC_NAT)
- flow->flags |= FLOW_OFFLOAD_SNAT;
+ __set_bit(NF_FLOW_SNAT, &flow->flags);
if (ct->status & IPS_DST_NAT)
- flow->flags |= FLOW_OFFLOAD_DNAT;
+ __set_bit(NF_FLOW_DNAT, &flow->flags);
return flow;
@@ -182,8 +182,6 @@ void flow_offload_free(struct flow_offload *flow)
default:
break;
}
- if (flow->flags & FLOW_OFFLOAD_DYING)
- nf_ct_delete(flow->ct, 0, 0);
nf_ct_put(flow->ct);
kfree_rcu(flow, rcu_head);
}
@@ -245,8 +243,10 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
return err;
}
- if (flow_table->flags & NF_FLOWTABLE_HW_OFFLOAD)
+ if (nf_flowtable_hw_offload(flow_table)) {
+ __set_bit(NF_FLOW_HW, &flow->flags);
nf_flow_offload_add(flow_table, flow);
+ }
return 0;
}
@@ -271,7 +271,7 @@ static void flow_offload_del(struct nf_flowtable *flow_table,
if (nf_flow_has_expired(flow))
flow_offload_fixup_ct(flow->ct);
- else if (flow->flags & FLOW_OFFLOAD_TEARDOWN)
+ else if (test_bit(NF_FLOW_TEARDOWN, &flow->flags))
flow_offload_fixup_ct_timeout(flow->ct);
flow_offload_free(flow);
@@ -279,7 +279,7 @@ static void flow_offload_del(struct nf_flowtable *flow_table,
void flow_offload_teardown(struct flow_offload *flow)
{
- flow->flags |= FLOW_OFFLOAD_TEARDOWN;
+ set_bit(NF_FLOW_TEARDOWN, &flow->flags);
flow_offload_fixup_ct_state(flow->ct);
}
@@ -300,7 +300,7 @@ flow_offload_lookup(struct nf_flowtable *flow_table,
dir = tuplehash->tuple.dir;
flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
- if (flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN))
+ if (test_bit(NF_FLOW_TEARDOWN, &flow->flags))
return NULL;
if (unlikely(nf_ct_is_dying(flow->ct)))
@@ -348,19 +348,18 @@ static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
{
struct nf_flowtable *flow_table = data;
- if (flow->flags & FLOW_OFFLOAD_HW)
- nf_flow_offload_stats(flow_table, flow);
-
if (nf_flow_has_expired(flow) || nf_ct_is_dying(flow->ct) ||
- (flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN))) {
- if (flow->flags & FLOW_OFFLOAD_HW) {
- if (!(flow->flags & FLOW_OFFLOAD_HW_DYING))
+ test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
+ if (test_bit(NF_FLOW_HW, &flow->flags)) {
+ if (!test_bit(NF_FLOW_HW_DYING, &flow->flags))
nf_flow_offload_del(flow_table, flow);
- else if (flow->flags & FLOW_OFFLOAD_HW_DEAD)
+ else if (test_bit(NF_FLOW_HW_DEAD, &flow->flags))
flow_offload_del(flow_table, flow);
} else {
flow_offload_del(flow_table, flow);
}
+ } else if (test_bit(NF_FLOW_HW, &flow->flags)) {
+ nf_flow_offload_stats(flow_table, flow);
}
}
@@ -524,15 +523,15 @@ static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
if (net_eq(nf_ct_net(flow->ct), dev_net(dev)) &&
(flow->tuplehash[0].tuple.iifidx == dev->ifindex ||
flow->tuplehash[1].tuple.iifidx == dev->ifindex))
- flow_offload_dead(flow);
+ flow_offload_teardown(flow);
}
static void nf_flow_table_iterate_cleanup(struct nf_flowtable *flowtable,
struct net_device *dev)
{
- nf_flow_table_offload_flush(flowtable);
nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, dev);
flush_delayed_work(&flowtable->gc_work);
+ nf_flow_table_offload_flush(flowtable);
}
void nf_flow_table_cleanup(struct net_device *dev)
@@ -554,6 +553,7 @@ void nf_flow_table_free(struct nf_flowtable *flow_table)
cancel_delayed_work_sync(&flow_table->gc_work);
nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
+ nf_flow_table_offload_flush(flow_table);
rhashtable_destroy(&flow_table->rhashtable);
}
EXPORT_SYMBOL_GPL(nf_flow_table_free);
diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
index 7ea2ddc2aa93..9e563fd3da0f 100644
--- a/net/netfilter/nf_flow_table_ip.c
+++ b/net/netfilter/nf_flow_table_ip.c
@@ -144,11 +144,11 @@ static int nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb,
{
struct iphdr *iph = ip_hdr(skb);
- if (flow->flags & FLOW_OFFLOAD_SNAT &&
+ if (test_bit(NF_FLOW_SNAT, &flow->flags) &&
(nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
nf_flow_snat_ip(flow, skb, iph, thoff, dir) < 0))
return -1;
- if (flow->flags & FLOW_OFFLOAD_DNAT &&
+ if (test_bit(NF_FLOW_DNAT, &flow->flags) &&
(nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
nf_flow_dnat_ip(flow, skb, iph, thoff, dir) < 0))
return -1;
@@ -232,6 +232,13 @@ static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
return NF_STOLEN;
}
+static bool nf_flow_offload_refresh(struct nf_flowtable *flow_table,
+ struct flow_offload *flow)
+{
+ return nf_flowtable_hw_offload(flow_table) &&
+ test_and_clear_bit(NF_FLOW_HW_REFRESH, &flow->flags);
+}
+
unsigned int
nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
@@ -272,6 +279,9 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
if (nf_flow_state_check(flow, ip_hdr(skb)->protocol, skb, thoff))
return NF_ACCEPT;
+ if (unlikely(nf_flow_offload_refresh(flow_table, flow)))
+ nf_flow_offload_add(flow_table, flow);
+
if (nf_flow_offload_dst_check(&rt->dst)) {
flow_offload_teardown(flow);
return NF_ACCEPT;
@@ -414,11 +424,11 @@ static int nf_flow_nat_ipv6(const struct flow_offload *flow,
struct ipv6hdr *ip6h = ipv6_hdr(skb);
unsigned int thoff = sizeof(*ip6h);
- if (flow->flags & FLOW_OFFLOAD_SNAT &&
+ if (test_bit(NF_FLOW_SNAT, &flow->flags) &&
(nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
return -1;
- if (flow->flags & FLOW_OFFLOAD_DNAT &&
+ if (test_bit(NF_FLOW_DNAT, &flow->flags) &&
(nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
return -1;
@@ -498,6 +508,9 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
sizeof(*ip6h)))
return NF_ACCEPT;
+ if (unlikely(nf_flow_offload_refresh(flow_table, flow)))
+ nf_flow_offload_add(flow_table, flow);
+
if (nf_flow_offload_dst_check(&rt->dst)) {
flow_offload_teardown(flow);
return NF_ACCEPT;
diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
index d06969af1085..06f00cdc3891 100644
--- a/net/netfilter/nf_flow_table_offload.c
+++ b/net/netfilter/nf_flow_table_offload.c
@@ -24,6 +24,7 @@ struct flow_offload_work {
};
struct nf_flow_key {
+ struct flow_dissector_key_meta meta;
struct flow_dissector_key_control control;
struct flow_dissector_key_basic basic;
union {
@@ -55,6 +56,7 @@ static int nf_flow_rule_match(struct nf_flow_match *match,
struct nf_flow_key *mask = &match->mask;
struct nf_flow_key *key = &match->key;
+ NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_META, meta);
NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_CONTROL, control);
NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_BASIC, basic);
NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
@@ -62,6 +64,9 @@ static int nf_flow_rule_match(struct nf_flow_match *match,
NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_TCP, tcp);
NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_PORTS, tp);
+ key->meta.ingress_ifindex = tuple->iifidx;
+ mask->meta.ingress_ifindex = 0xffffffff;
+
switch (tuple->l3proto) {
case AF_INET:
key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
@@ -105,7 +110,8 @@ static int nf_flow_rule_match(struct nf_flow_match *match,
key->tp.dst = tuple->dst_port;
mask->tp.dst = 0xffff;
- match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_META) |
+ BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_PORTS);
return 0;
@@ -444,16 +450,16 @@ int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow,
flow_offload_eth_dst(net, flow, dir, flow_rule) < 0)
return -1;
- if (flow->flags & FLOW_OFFLOAD_SNAT) {
+ if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
flow_offload_ipv4_snat(net, flow, dir, flow_rule);
flow_offload_port_snat(net, flow, dir, flow_rule);
}
- if (flow->flags & FLOW_OFFLOAD_DNAT) {
+ if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
flow_offload_ipv4_dnat(net, flow, dir, flow_rule);
flow_offload_port_dnat(net, flow, dir, flow_rule);
}
- if (flow->flags & FLOW_OFFLOAD_SNAT ||
- flow->flags & FLOW_OFFLOAD_DNAT)
+ if (test_bit(NF_FLOW_SNAT, &flow->flags) ||
+ test_bit(NF_FLOW_DNAT, &flow->flags))
flow_offload_ipv4_checksum(net, flow, flow_rule);
flow_offload_redirect(flow, dir, flow_rule);
@@ -470,11 +476,11 @@ int nf_flow_rule_route_ipv6(struct net *net, const struct flow_offload *flow,
flow_offload_eth_dst(net, flow, dir, flow_rule) < 0)
return -1;
- if (flow->flags & FLOW_OFFLOAD_SNAT) {
+ if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
flow_offload_ipv6_snat(net, flow, dir, flow_rule);
flow_offload_port_snat(net, flow, dir, flow_rule);
}
- if (flow->flags & FLOW_OFFLOAD_DNAT) {
+ if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
flow_offload_ipv6_dnat(net, flow, dir, flow_rule);
flow_offload_port_dnat(net, flow, dir, flow_rule);
}
@@ -586,23 +592,25 @@ static void nf_flow_offload_init(struct flow_cls_offload *cls_flow,
cls_flow->cookie = (unsigned long)tuple;
}
-static int flow_offload_tuple_add(struct flow_offload_work *offload,
- struct nf_flow_rule *flow_rule,
- enum flow_offload_tuple_dir dir)
+static int nf_flow_offload_tuple(struct nf_flowtable *flowtable,
+ struct flow_offload *flow,
+ struct nf_flow_rule *flow_rule,
+ enum flow_offload_tuple_dir dir,
+ int priority, int cmd,
+ struct list_head *block_cb_list)
{
- struct nf_flowtable *flowtable = offload->flowtable;
struct flow_cls_offload cls_flow = {};
struct flow_block_cb *block_cb;
struct netlink_ext_ack extack;
__be16 proto = ETH_P_ALL;
int err, i = 0;
- nf_flow_offload_init(&cls_flow, proto, offload->priority,
- FLOW_CLS_REPLACE,
- &offload->flow->tuplehash[dir].tuple, &extack);
- cls_flow.rule = flow_rule->rule;
+ nf_flow_offload_init(&cls_flow, proto, priority, cmd,
+ &flow->tuplehash[dir].tuple, &extack);
+ if (cmd == FLOW_CLS_REPLACE)
+ cls_flow.rule = flow_rule->rule;
- list_for_each_entry(block_cb, &flowtable->flow_block.cb_list, list) {
+ list_for_each_entry(block_cb, block_cb_list, list) {
err = block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow,
block_cb->cb_priv);
if (err < 0)
@@ -614,23 +622,22 @@ static int flow_offload_tuple_add(struct flow_offload_work *offload,
return i;
}
+static int flow_offload_tuple_add(struct flow_offload_work *offload,
+ struct nf_flow_rule *flow_rule,
+ enum flow_offload_tuple_dir dir)
+{
+ return nf_flow_offload_tuple(offload->flowtable, offload->flow,
+ flow_rule, dir, offload->priority,
+ FLOW_CLS_REPLACE,
+ &offload->flowtable->flow_block.cb_list);
+}
+
static void flow_offload_tuple_del(struct flow_offload_work *offload,
enum flow_offload_tuple_dir dir)
{
- struct nf_flowtable *flowtable = offload->flowtable;
- struct flow_cls_offload cls_flow = {};
- struct flow_block_cb *block_cb;
- struct netlink_ext_ack extack;
- __be16 proto = ETH_P_ALL;
-
- nf_flow_offload_init(&cls_flow, proto, offload->priority,
- FLOW_CLS_DESTROY,
- &offload->flow->tuplehash[dir].tuple, &extack);
-
- list_for_each_entry(block_cb, &flowtable->flow_block.cb_list, list)
- block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow, block_cb->cb_priv);
-
- offload->flow->flags |= FLOW_OFFLOAD_HW_DEAD;
+ nf_flow_offload_tuple(offload->flowtable, offload->flow, NULL, dir,
+ offload->priority, FLOW_CLS_DESTROY,
+ &offload->flowtable->flow_block.cb_list);
}
static int flow_offload_rule_add(struct flow_offload_work *offload,
@@ -648,26 +655,27 @@ static int flow_offload_rule_add(struct flow_offload_work *offload,
return 0;
}
-static int flow_offload_work_add(struct flow_offload_work *offload)
+static void flow_offload_work_add(struct flow_offload_work *offload)
{
struct nf_flow_rule *flow_rule[FLOW_OFFLOAD_DIR_MAX];
int err;
err = nf_flow_offload_alloc(offload, flow_rule);
if (err < 0)
- return -ENOMEM;
+ return;
err = flow_offload_rule_add(offload, flow_rule);
+ if (err < 0)
+ set_bit(NF_FLOW_HW_REFRESH, &offload->flow->flags);
nf_flow_offload_destroy(flow_rule);
-
- return err;
}
static void flow_offload_work_del(struct flow_offload_work *offload)
{
flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_ORIGINAL);
flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_REPLY);
+ set_bit(NF_FLOW_HW_DEAD, &offload->flow->flags);
}
static void flow_offload_tuple_stats(struct flow_offload_work *offload,
@@ -706,7 +714,6 @@ static void flow_offload_work_handler(struct work_struct *work)
{
struct flow_offload_work *offload, *next;
LIST_HEAD(offload_pending_list);
- int ret;
spin_lock_bh(&flow_offload_pending_list_lock);
list_replace_init(&flow_offload_pending_list, &offload_pending_list);
@@ -715,9 +722,7 @@ static void flow_offload_work_handler(struct work_struct *work)
list_for_each_entry_safe(offload, next, &offload_pending_list, list) {
switch (offload->cmd) {
case FLOW_CLS_REPLACE:
- ret = flow_offload_work_add(offload);
- if (ret < 0)
- offload->flow->flags &= ~FLOW_OFFLOAD_HW;
+ flow_offload_work_add(offload);
break;
case FLOW_CLS_DESTROY:
flow_offload_work_del(offload);
@@ -742,20 +747,33 @@ static void flow_offload_queue_work(struct flow_offload_work *offload)
schedule_work(&nf_flow_offload_work);
}
-void nf_flow_offload_add(struct nf_flowtable *flowtable,
- struct flow_offload *flow)
+static struct flow_offload_work *
+nf_flow_offload_work_alloc(struct nf_flowtable *flowtable,
+ struct flow_offload *flow, unsigned int cmd)
{
struct flow_offload_work *offload;
offload = kmalloc(sizeof(struct flow_offload_work), GFP_ATOMIC);
if (!offload)
- return;
+ return NULL;
- offload->cmd = FLOW_CLS_REPLACE;
+ offload->cmd = cmd;
offload->flow = flow;
offload->priority = flowtable->priority;
offload->flowtable = flowtable;
- flow->flags |= FLOW_OFFLOAD_HW;
+
+ return offload;
+}
+
+
+void nf_flow_offload_add(struct nf_flowtable *flowtable,
+ struct flow_offload *flow)
+{
+ struct flow_offload_work *offload;
+
+ offload = nf_flow_offload_work_alloc(flowtable, flow, FLOW_CLS_REPLACE);
+ if (!offload)
+ return;
flow_offload_queue_work(offload);
}
@@ -765,15 +783,11 @@ void nf_flow_offload_del(struct nf_flowtable *flowtable,
{
struct flow_offload_work *offload;
- offload = kzalloc(sizeof(struct flow_offload_work), GFP_ATOMIC);
+ offload = nf_flow_offload_work_alloc(flowtable, flow, FLOW_CLS_DESTROY);
if (!offload)
return;
- offload->cmd = FLOW_CLS_DESTROY;
- offload->flow = flow;
- offload->flow->flags |= FLOW_OFFLOAD_HW_DYING;
- offload->flowtable = flowtable;
-
+ set_bit(NF_FLOW_HW_DYING, &flow->flags);
flow_offload_queue_work(offload);
}
@@ -784,24 +798,19 @@ void nf_flow_offload_stats(struct nf_flowtable *flowtable,
__s32 delta;
delta = nf_flow_timeout_delta(flow->timeout);
- if ((delta >= (9 * NF_FLOW_TIMEOUT) / 10) ||
- flow->flags & FLOW_OFFLOAD_HW_DYING)
+ if ((delta >= (9 * NF_FLOW_TIMEOUT) / 10))
return;
- offload = kzalloc(sizeof(struct flow_offload_work), GFP_ATOMIC);
+ offload = nf_flow_offload_work_alloc(flowtable, flow, FLOW_CLS_STATS);
if (!offload)
return;
- offload->cmd = FLOW_CLS_STATS;
- offload->flow = flow;
- offload->flowtable = flowtable;
-
flow_offload_queue_work(offload);
}
void nf_flow_table_offload_flush(struct nf_flowtable *flowtable)
{
- if (flowtable->flags & NF_FLOWTABLE_HW_OFFLOAD)
+ if (nf_flowtable_hw_offload(flowtable))
flush_work(&nf_flow_offload_work);
}
@@ -830,28 +839,44 @@ static int nf_flow_table_block_setup(struct nf_flowtable *flowtable,
return err;
}
+static int nf_flow_table_offload_cmd(struct flow_block_offload *bo,
+ struct nf_flowtable *flowtable,
+ struct net_device *dev,
+ enum flow_block_command cmd,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ if (!dev->netdev_ops->ndo_setup_tc)
+ return -EOPNOTSUPP;
+
+ memset(bo, 0, sizeof(*bo));
+ bo->net = dev_net(dev);
+ bo->block = &flowtable->flow_block;
+ bo->command = cmd;
+ bo->binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
+ bo->extack = extack;
+ INIT_LIST_HEAD(&bo->cb_list);
+
+ err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_FT, bo);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
struct net_device *dev,
enum flow_block_command cmd)
{
struct netlink_ext_ack extack = {};
- struct flow_block_offload bo = {};
+ struct flow_block_offload bo;
int err;
- if (!(flowtable->flags & NF_FLOWTABLE_HW_OFFLOAD))
+ if (!nf_flowtable_hw_offload(flowtable))
return 0;
- if (!dev->netdev_ops->ndo_setup_tc)
- return -EOPNOTSUPP;
-
- bo.net = dev_net(dev);
- bo.block = &flowtable->flow_block;
- bo.command = cmd;
- bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
- bo.extack = &extack;
- INIT_LIST_HEAD(&bo.cb_list);
-
- err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_FT, &bo);
+ err = nf_flow_table_offload_cmd(&bo, flowtable, dev, cmd, &extack);
if (err < 0)
return err;
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index b0930d4aba22..b9cbe1e2453e 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -267,7 +267,7 @@ static void *synproxy_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
*pos = cpu + 1;
return per_cpu_ptr(snet->stats, cpu);
}
-
+ (*pos)++;
return NULL;
}
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 65f51a2e9c2a..38c680f28f15 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -553,47 +553,70 @@ static inline u64 nf_tables_alloc_handle(struct nft_table *table)
static const struct nft_chain_type *chain_type[NFPROTO_NUMPROTO][NFT_CHAIN_T_MAX];
static const struct nft_chain_type *
+__nft_chain_type_get(u8 family, enum nft_chain_types type)
+{
+ if (family >= NFPROTO_NUMPROTO ||
+ type >= NFT_CHAIN_T_MAX)
+ return NULL;
+
+ return chain_type[family][type];
+}
+
+static const struct nft_chain_type *
__nf_tables_chain_type_lookup(const struct nlattr *nla, u8 family)
{
+ const struct nft_chain_type *type;
int i;
for (i = 0; i < NFT_CHAIN_T_MAX; i++) {
- if (chain_type[family][i] != NULL &&
- !nla_strcmp(nla, chain_type[family][i]->name))
- return chain_type[family][i];
+ type = __nft_chain_type_get(family, i);
+ if (!type)
+ continue;
+ if (!nla_strcmp(nla, type->name))
+ return type;
}
return NULL;
}
-/*
- * Loading a module requires dropping mutex that guards the transaction.
- * A different client might race to start a new transaction meanwhile. Zap the
- * list of pending transaction and then restore it once the mutex is grabbed
- * again. Users of this function return EAGAIN which implicitly triggers the
- * transaction abort path to clean up the list of pending transactions.
- */
+struct nft_module_request {
+ struct list_head list;
+ char module[MODULE_NAME_LEN];
+ bool done;
+};
+
#ifdef CONFIG_MODULES
-static void nft_request_module(struct net *net, const char *fmt, ...)
+static int nft_request_module(struct net *net, const char *fmt, ...)
{
char module_name[MODULE_NAME_LEN];
- LIST_HEAD(commit_list);
+ struct nft_module_request *req;
va_list args;
int ret;
- list_splice_init(&net->nft.commit_list, &commit_list);
-
va_start(args, fmt);
ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
va_end(args);
if (ret >= MODULE_NAME_LEN)
- return;
+ return 0;
- mutex_unlock(&net->nft.commit_mutex);
- request_module("%s", module_name);
- mutex_lock(&net->nft.commit_mutex);
+ list_for_each_entry(req, &net->nft.module_list, list) {
+ if (!strcmp(req->module, module_name)) {
+ if (req->done)
+ return 0;
- WARN_ON_ONCE(!list_empty(&net->nft.commit_list));
- list_splice(&commit_list, &net->nft.commit_list);
+ /* A request to load this module already exists. */
+ return -EAGAIN;
+ }
+ }
+
+ req = kmalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ req->done = false;
+ strlcpy(req->module, module_name, MODULE_NAME_LEN);
+ list_add_tail(&req->list, &net->nft.module_list);
+
+ return -EAGAIN;
}
#endif
@@ -617,10 +640,9 @@ nf_tables_chain_type_lookup(struct net *net, const struct nlattr *nla,
lockdep_nfnl_nft_mutex_not_held();
#ifdef CONFIG_MODULES
if (autoload) {
- nft_request_module(net, "nft-chain-%u-%.*s", family,
- nla_len(nla), (const char *)nla_data(nla));
- type = __nf_tables_chain_type_lookup(nla, family);
- if (type != NULL)
+ if (nft_request_module(net, "nft-chain-%u-%.*s", family,
+ nla_len(nla),
+ (const char *)nla_data(nla)) == -EAGAIN)
return ERR_PTR(-EAGAIN);
}
#endif
@@ -1162,11 +1184,8 @@ static void nf_tables_table_destroy(struct nft_ctx *ctx)
void nft_register_chain_type(const struct nft_chain_type *ctype)
{
- if (WARN_ON(ctype->family >= NFPROTO_NUMPROTO))
- return;
-
nfnl_lock(NFNL_SUBSYS_NFTABLES);
- if (WARN_ON(chain_type[ctype->family][ctype->type] != NULL)) {
+ if (WARN_ON(__nft_chain_type_get(ctype->family, ctype->type))) {
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
return;
}
@@ -1386,6 +1405,11 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
lockdep_commit_lock_is_held(net));
if (nft_dump_stats(skb, stats))
goto nla_put_failure;
+
+ if ((chain->flags & NFT_CHAIN_HW_OFFLOAD) &&
+ nla_put_be32(skb, NFTA_CHAIN_FLAGS,
+ htonl(NFT_CHAIN_HW_OFFLOAD)))
+ goto nla_put_failure;
}
if (nla_put_be32(skb, NFTA_CHAIN_USE, htonl(chain->use)))
@@ -1768,7 +1792,10 @@ static int nft_chain_parse_hook(struct net *net,
hook->num = ntohl(nla_get_be32(ha[NFTA_HOOK_HOOKNUM]));
hook->priority = ntohl(nla_get_be32(ha[NFTA_HOOK_PRIORITY]));
- type = chain_type[family][NFT_CHAIN_T_DEFAULT];
+ type = __nft_chain_type_get(family, NFT_CHAIN_T_DEFAULT);
+ if (!type)
+ return -EOPNOTSUPP;
+
if (nla[NFTA_CHAIN_TYPE]) {
type = nf_tables_chain_type_lookup(net, nla[NFTA_CHAIN_TYPE],
family, autoload);
@@ -2328,9 +2355,8 @@ static const struct nft_expr_type *__nft_expr_type_get(u8 family,
static int nft_expr_type_request_module(struct net *net, u8 family,
struct nlattr *nla)
{
- nft_request_module(net, "nft-expr-%u-%.*s", family,
- nla_len(nla), (char *)nla_data(nla));
- if (__nft_expr_type_get(family, nla))
+ if (nft_request_module(net, "nft-expr-%u-%.*s", family,
+ nla_len(nla), (char *)nla_data(nla)) == -EAGAIN)
return -EAGAIN;
return 0;
@@ -2356,9 +2382,9 @@ static const struct nft_expr_type *nft_expr_type_get(struct net *net,
if (nft_expr_type_request_module(net, family, nla) == -EAGAIN)
return ERR_PTR(-EAGAIN);
- nft_request_module(net, "nft-expr-%.*s",
- nla_len(nla), (char *)nla_data(nla));
- if (__nft_expr_type_get(family, nla))
+ if (nft_request_module(net, "nft-expr-%.*s",
+ nla_len(nla),
+ (char *)nla_data(nla)) == -EAGAIN)
return ERR_PTR(-EAGAIN);
}
#endif
@@ -2449,9 +2475,10 @@ static int nf_tables_expr_parse(const struct nft_ctx *ctx,
err = PTR_ERR(ops);
#ifdef CONFIG_MODULES
if (err == -EAGAIN)
- nft_expr_type_request_module(ctx->net,
- ctx->family,
- tb[NFTA_EXPR_NAME]);
+ if (nft_expr_type_request_module(ctx->net,
+ ctx->family,
+ tb[NFTA_EXPR_NAME]) != -EAGAIN)
+ err = -ENOENT;
#endif
goto err1;
}
@@ -3288,8 +3315,7 @@ nft_select_set_ops(const struct nft_ctx *ctx,
lockdep_nfnl_nft_mutex_not_held();
#ifdef CONFIG_MODULES
if (list_empty(&nf_tables_set_types)) {
- nft_request_module(ctx->net, "nft-set");
- if (!list_empty(&nf_tables_set_types))
+ if (nft_request_module(ctx->net, "nft-set") == -EAGAIN)
return ERR_PTR(-EAGAIN);
}
#endif
@@ -3370,6 +3396,7 @@ static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = {
static const struct nla_policy nft_set_desc_policy[NFTA_SET_DESC_MAX + 1] = {
[NFTA_SET_DESC_SIZE] = { .type = NLA_U32 },
+ [NFTA_SET_DESC_CONCAT] = { .type = NLA_NESTED },
};
static int nft_ctx_init_from_setattr(struct nft_ctx *ctx, struct net *net,
@@ -3536,6 +3563,33 @@ static __be64 nf_jiffies64_to_msecs(u64 input)
return cpu_to_be64(jiffies64_to_msecs(input));
}
+static int nf_tables_fill_set_concat(struct sk_buff *skb,
+ const struct nft_set *set)
+{
+ struct nlattr *concat, *field;
+ int i;
+
+ concat = nla_nest_start_noflag(skb, NFTA_SET_DESC_CONCAT);
+ if (!concat)
+ return -ENOMEM;
+
+ for (i = 0; i < set->field_count; i++) {
+ field = nla_nest_start_noflag(skb, NFTA_LIST_ELEM);
+ if (!field)
+ return -ENOMEM;
+
+ if (nla_put_be32(skb, NFTA_SET_FIELD_LEN,
+ htonl(set->field_len[i])))
+ return -ENOMEM;
+
+ nla_nest_end(skb, field);
+ }
+
+ nla_nest_end(skb, concat);
+
+ return 0;
+}
+
static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
const struct nft_set *set, u16 event, u16 flags)
{
@@ -3599,11 +3653,17 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
goto nla_put_failure;
desc = nla_nest_start_noflag(skb, NFTA_SET_DESC);
+
if (desc == NULL)
goto nla_put_failure;
if (set->size &&
nla_put_be32(skb, NFTA_SET_DESC_SIZE, htonl(set->size)))
goto nla_put_failure;
+
+ if (set->field_count > 1 &&
+ nf_tables_fill_set_concat(skb, set))
+ goto nla_put_failure;
+
nla_nest_end(skb, desc);
nlmsg_end(skb, nlh);
@@ -3776,6 +3836,53 @@ err:
return err;
}
+static const struct nla_policy nft_concat_policy[NFTA_SET_FIELD_MAX + 1] = {
+ [NFTA_SET_FIELD_LEN] = { .type = NLA_U32 },
+};
+
+static int nft_set_desc_concat_parse(const struct nlattr *attr,
+ struct nft_set_desc *desc)
+{
+ struct nlattr *tb[NFTA_SET_FIELD_MAX + 1];
+ u32 len;
+ int err;
+
+ err = nla_parse_nested_deprecated(tb, NFTA_SET_FIELD_MAX, attr,
+ nft_concat_policy, NULL);
+ if (err < 0)
+ return err;
+
+ if (!tb[NFTA_SET_FIELD_LEN])
+ return -EINVAL;
+
+ len = ntohl(nla_get_be32(tb[NFTA_SET_FIELD_LEN]));
+
+ if (len * BITS_PER_BYTE / 32 > NFT_REG32_COUNT)
+ return -E2BIG;
+
+ desc->field_len[desc->field_count++] = len;
+
+ return 0;
+}
+
+static int nft_set_desc_concat(struct nft_set_desc *desc,
+ const struct nlattr *nla)
+{
+ struct nlattr *attr;
+ int rem, err;
+
+ nla_for_each_nested(attr, nla, rem) {
+ if (nla_type(attr) != NFTA_LIST_ELEM)
+ return -EINVAL;
+
+ err = nft_set_desc_concat_parse(attr, desc);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
static int nf_tables_set_desc_parse(struct nft_set_desc *desc,
const struct nlattr *nla)
{
@@ -3789,8 +3896,10 @@ static int nf_tables_set_desc_parse(struct nft_set_desc *desc,
if (da[NFTA_SET_DESC_SIZE] != NULL)
desc->size = ntohl(nla_get_be32(da[NFTA_SET_DESC_SIZE]));
+ if (da[NFTA_SET_DESC_CONCAT])
+ err = nft_set_desc_concat(desc, da[NFTA_SET_DESC_CONCAT]);
- return 0;
+ return err;
}
static int nf_tables_newset(struct net *net, struct sock *nlsk,
@@ -3813,6 +3922,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
unsigned char *udata;
u16 udlen;
int err;
+ int i;
if (nla[NFTA_SET_TABLE] == NULL ||
nla[NFTA_SET_NAME] == NULL ||
@@ -3991,6 +4101,10 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
set->gc_int = gc_int;
set->handle = nf_tables_alloc_handle(table);
+ set->field_count = desc.field_count;
+ for (i = 0; i < desc.field_count; i++)
+ set->field_len[i] = desc.field_len[i];
+
err = ops->init(set, &desc, nla);
if (err < 0)
goto err3;
@@ -4194,6 +4308,9 @@ const struct nft_set_ext_type nft_set_ext_types[] = {
.len = sizeof(struct nft_userdata),
.align = __alignof__(struct nft_userdata),
},
+ [NFT_SET_EXT_KEY_END] = {
+ .align = __alignof__(u32),
+ },
};
EXPORT_SYMBOL_GPL(nft_set_ext_types);
@@ -4212,6 +4329,7 @@ static const struct nla_policy nft_set_elem_policy[NFTA_SET_ELEM_MAX + 1] = {
[NFTA_SET_ELEM_EXPR] = { .type = NLA_NESTED },
[NFTA_SET_ELEM_OBJREF] = { .type = NLA_STRING,
.len = NFT_OBJ_MAXNAMELEN - 1 },
+ [NFTA_SET_ELEM_KEY_END] = { .type = NLA_NESTED },
};
static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + 1] = {
@@ -4261,6 +4379,11 @@ static int nf_tables_fill_setelem(struct sk_buff *skb,
NFT_DATA_VALUE, set->klen) < 0)
goto nla_put_failure;
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_KEY_END) &&
+ nft_data_dump(skb, NFTA_SET_ELEM_KEY_END, nft_set_ext_key_end(ext),
+ NFT_DATA_VALUE, set->klen) < 0)
+ goto nla_put_failure;
+
if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA) &&
nft_data_dump(skb, NFTA_SET_ELEM_DATA, nft_set_ext_data(ext),
set->dtype == NFT_DATA_VERDICT ? NFT_DATA_VERDICT : NFT_DATA_VALUE,
@@ -4503,11 +4626,28 @@ static int nft_setelem_parse_flags(const struct nft_set *set,
return 0;
}
+static int nft_setelem_parse_key(struct nft_ctx *ctx, struct nft_set *set,
+ struct nft_data *key, struct nlattr *attr)
+{
+ struct nft_data_desc desc;
+ int err;
+
+ err = nft_data_init(ctx, key, NFT_DATA_VALUE_MAXLEN, &desc, attr);
+ if (err < 0)
+ return err;
+
+ if (desc.type != NFT_DATA_VALUE || desc.len != set->klen) {
+ nft_data_release(key, desc.type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int nft_get_set_elem(struct nft_ctx *ctx, struct nft_set *set,
const struct nlattr *attr)
{
struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
- struct nft_data_desc desc;
struct nft_set_elem elem;
struct sk_buff *skb;
uint32_t flags = 0;
@@ -4526,15 +4666,16 @@ static int nft_get_set_elem(struct nft_ctx *ctx, struct nft_set *set,
if (err < 0)
return err;
- err = nft_data_init(ctx, &elem.key.val, sizeof(elem.key), &desc,
- nla[NFTA_SET_ELEM_KEY]);
+ err = nft_setelem_parse_key(ctx, set, &elem.key.val,
+ nla[NFTA_SET_ELEM_KEY]);
if (err < 0)
return err;
- err = -EINVAL;
- if (desc.type != NFT_DATA_VALUE || desc.len != set->klen) {
- nft_data_release(&elem.key.val, desc.type);
- return err;
+ if (nla[NFTA_SET_ELEM_KEY_END]) {
+ err = nft_setelem_parse_key(ctx, set, &elem.key_end.val,
+ nla[NFTA_SET_ELEM_KEY_END]);
+ if (err < 0)
+ return err;
}
priv = set->ops->get(ctx->net, set, &elem, flags);
@@ -4662,8 +4803,8 @@ static struct nft_trans *nft_trans_elem_alloc(struct nft_ctx *ctx,
void *nft_set_elem_init(const struct nft_set *set,
const struct nft_set_ext_tmpl *tmpl,
- const u32 *key, const u32 *data,
- u64 timeout, u64 expiration, gfp_t gfp)
+ const u32 *key, const u32 *key_end,
+ const u32 *data, u64 timeout, u64 expiration, gfp_t gfp)
{
struct nft_set_ext *ext;
void *elem;
@@ -4676,6 +4817,8 @@ void *nft_set_elem_init(const struct nft_set *set,
nft_set_ext_init(ext, tmpl);
memcpy(nft_set_ext_key(ext), key, set->klen);
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_KEY_END))
+ memcpy(nft_set_ext_key_end(ext), key_end, set->klen);
if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
memcpy(nft_set_ext_data(ext), data, set->dlen);
if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION)) {
@@ -4735,13 +4878,13 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
{
struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
u8 genmask = nft_genmask_next(ctx->net);
- struct nft_data_desc d1, d2;
struct nft_set_ext_tmpl tmpl;
struct nft_set_ext *ext, *ext2;
struct nft_set_elem elem;
struct nft_set_binding *binding;
struct nft_object *obj = NULL;
struct nft_userdata *udata;
+ struct nft_data_desc desc;
struct nft_data data;
enum nft_registers dreg;
struct nft_trans *trans;
@@ -4807,15 +4950,22 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
return err;
}
- err = nft_data_init(ctx, &elem.key.val, sizeof(elem.key), &d1,
- nla[NFTA_SET_ELEM_KEY]);
+ err = nft_setelem_parse_key(ctx, set, &elem.key.val,
+ nla[NFTA_SET_ELEM_KEY]);
if (err < 0)
- goto err1;
- err = -EINVAL;
- if (d1.type != NFT_DATA_VALUE || d1.len != set->klen)
- goto err2;
+ return err;
+
+ nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen);
+
+ if (nla[NFTA_SET_ELEM_KEY_END]) {
+ err = nft_setelem_parse_key(ctx, set, &elem.key_end.val,
+ nla[NFTA_SET_ELEM_KEY_END]);
+ if (err < 0)
+ goto err_parse_key;
+
+ nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY_END, set->klen);
+ }
- nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, d1.len);
if (timeout > 0) {
nft_set_ext_add(&tmpl, NFT_SET_EXT_EXPIRATION);
if (timeout != set->timeout)
@@ -4825,27 +4975,27 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
if (nla[NFTA_SET_ELEM_OBJREF] != NULL) {
if (!(set->flags & NFT_SET_OBJECT)) {
err = -EINVAL;
- goto err2;
+ goto err_parse_key_end;
}
obj = nft_obj_lookup(ctx->net, ctx->table,
nla[NFTA_SET_ELEM_OBJREF],
set->objtype, genmask);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
- goto err2;
+ goto err_parse_key_end;
}
nft_set_ext_add(&tmpl, NFT_SET_EXT_OBJREF);
}
if (nla[NFTA_SET_ELEM_DATA] != NULL) {
- err = nft_data_init(ctx, &data, sizeof(data), &d2,
+ err = nft_data_init(ctx, &data, sizeof(data), &desc,
nla[NFTA_SET_ELEM_DATA]);
if (err < 0)
- goto err2;
+ goto err_parse_key_end;
err = -EINVAL;
- if (set->dtype != NFT_DATA_VERDICT && d2.len != set->dlen)
- goto err3;
+ if (set->dtype != NFT_DATA_VERDICT && desc.len != set->dlen)
+ goto err_parse_data;
dreg = nft_type_to_reg(set->dtype);
list_for_each_entry(binding, &set->bindings, list) {
@@ -4861,18 +5011,18 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
err = nft_validate_register_store(&bind_ctx, dreg,
&data,
- d2.type, d2.len);
+ desc.type, desc.len);
if (err < 0)
- goto err3;
+ goto err_parse_data;
- if (d2.type == NFT_DATA_VERDICT &&
+ if (desc.type == NFT_DATA_VERDICT &&
(data.verdict.code == NFT_GOTO ||
data.verdict.code == NFT_JUMP))
nft_validate_state_update(ctx->net,
NFT_VALIDATE_NEED);
}
- nft_set_ext_add_length(&tmpl, NFT_SET_EXT_DATA, d2.len);
+ nft_set_ext_add_length(&tmpl, NFT_SET_EXT_DATA, desc.len);
}
/* The full maximum length of userdata can exceed the maximum
@@ -4888,10 +5038,11 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
}
err = -ENOMEM;
- elem.priv = nft_set_elem_init(set, &tmpl, elem.key.val.data, data.data,
+ elem.priv = nft_set_elem_init(set, &tmpl, elem.key.val.data,
+ elem.key_end.val.data, data.data,
timeout, expiration, GFP_KERNEL);
if (elem.priv == NULL)
- goto err3;
+ goto err_parse_data;
ext = nft_set_elem_ext(set, elem.priv);
if (flags)
@@ -4908,7 +5059,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
trans = nft_trans_elem_alloc(ctx, NFT_MSG_NEWSETELEM, set);
if (trans == NULL)
- goto err4;
+ goto err_trans;
ext->genmask = nft_genmask_cur(ctx->net) | NFT_SET_ELEM_BUSY_MASK;
err = set->ops->insert(ctx->net, set, &elem, &ext2);
@@ -4919,7 +5070,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF) ^
nft_set_ext_exists(ext2, NFT_SET_EXT_OBJREF)) {
err = -EBUSY;
- goto err5;
+ goto err_element_clash;
}
if ((nft_set_ext_exists(ext, NFT_SET_EXT_DATA) &&
nft_set_ext_exists(ext2, NFT_SET_EXT_DATA) &&
@@ -4932,33 +5083,35 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
else if (!(nlmsg_flags & NLM_F_EXCL))
err = 0;
}
- goto err5;
+ goto err_element_clash;
}
if (set->size &&
!atomic_add_unless(&set->nelems, 1, set->size + set->ndeact)) {
err = -ENFILE;
- goto err6;
+ goto err_set_full;
}
nft_trans_elem(trans) = elem;
list_add_tail(&trans->list, &ctx->net->nft.commit_list);
return 0;
-err6:
+err_set_full:
set->ops->remove(ctx->net, set, &elem);
-err5:
+err_element_clash:
kfree(trans);
-err4:
+err_trans:
if (obj)
obj->use--;
kfree(elem.priv);
-err3:
+err_parse_data:
if (nla[NFTA_SET_ELEM_DATA] != NULL)
- nft_data_release(&data, d2.type);
-err2:
- nft_data_release(&elem.key.val, d1.type);
-err1:
+ nft_data_release(&data, desc.type);
+err_parse_key_end:
+ nft_data_release(&elem.key_end.val, NFT_DATA_VALUE);
+err_parse_key:
+ nft_data_release(&elem.key.val, NFT_DATA_VALUE);
+
return err;
}
@@ -5053,7 +5206,6 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
{
struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
struct nft_set_ext_tmpl tmpl;
- struct nft_data_desc desc;
struct nft_set_elem elem;
struct nft_set_ext *ext;
struct nft_trans *trans;
@@ -5064,11 +5216,10 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
err = nla_parse_nested_deprecated(nla, NFTA_SET_ELEM_MAX, attr,
nft_set_elem_policy, NULL);
if (err < 0)
- goto err1;
+ return err;
- err = -EINVAL;
if (nla[NFTA_SET_ELEM_KEY] == NULL)
- goto err1;
+ return -EINVAL;
nft_set_ext_prepare(&tmpl);
@@ -5078,37 +5229,41 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
if (flags != 0)
nft_set_ext_add(&tmpl, NFT_SET_EXT_FLAGS);
- err = nft_data_init(ctx, &elem.key.val, sizeof(elem.key), &desc,
- nla[NFTA_SET_ELEM_KEY]);
+ err = nft_setelem_parse_key(ctx, set, &elem.key.val,
+ nla[NFTA_SET_ELEM_KEY]);
if (err < 0)
- goto err1;
+ return err;
- err = -EINVAL;
- if (desc.type != NFT_DATA_VALUE || desc.len != set->klen)
- goto err2;
+ nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen);
+
+ if (nla[NFTA_SET_ELEM_KEY_END]) {
+ err = nft_setelem_parse_key(ctx, set, &elem.key_end.val,
+ nla[NFTA_SET_ELEM_KEY_END]);
+ if (err < 0)
+ return err;
- nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, desc.len);
+ nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY_END, set->klen);
+ }
err = -ENOMEM;
- elem.priv = nft_set_elem_init(set, &tmpl, elem.key.val.data, NULL, 0,
- 0, GFP_KERNEL);
+ elem.priv = nft_set_elem_init(set, &tmpl, elem.key.val.data,
+ elem.key_end.val.data, NULL, 0, 0,
+ GFP_KERNEL);
if (elem.priv == NULL)
- goto err2;
+ goto fail_elem;
ext = nft_set_elem_ext(set, elem.priv);
if (flags)
*nft_set_ext_flags(ext) = flags;
trans = nft_trans_elem_alloc(ctx, NFT_MSG_DELSETELEM, set);
- if (trans == NULL) {
- err = -ENOMEM;
- goto err3;
- }
+ if (trans == NULL)
+ goto fail_trans;
priv = set->ops->deactivate(ctx->net, set, &elem);
if (priv == NULL) {
err = -ENOENT;
- goto err4;
+ goto fail_ops;
}
kfree(elem.priv);
elem.priv = priv;
@@ -5119,13 +5274,12 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
list_add_tail(&trans->list, &ctx->net->nft.commit_list);
return 0;
-err4:
+fail_ops:
kfree(trans);
-err3:
+fail_trans:
kfree(elem.priv);
-err2:
- nft_data_release(&elem.key.val, desc.type);
-err1:
+fail_elem:
+ nft_data_release(&elem.key.val, NFT_DATA_VALUE);
return err;
}
@@ -5415,8 +5569,7 @@ nft_obj_type_get(struct net *net, u32 objtype)
lockdep_nfnl_nft_mutex_not_held();
#ifdef CONFIG_MODULES
if (type == NULL) {
- nft_request_module(net, "nft-obj-%u", objtype);
- if (__nft_obj_type_get(objtype))
+ if (nft_request_module(net, "nft-obj-%u", objtype) == -EAGAIN)
return ERR_PTR(-EAGAIN);
}
#endif
@@ -5989,8 +6142,7 @@ nft_flowtable_type_get(struct net *net, u8 family)
lockdep_nfnl_nft_mutex_not_held();
#ifdef CONFIG_MODULES
if (type == NULL) {
- nft_request_module(net, "nf-flowtable-%u", family);
- if (__nft_flowtable_type_get(family))
+ if (nft_request_module(net, "nf-flowtable-%u", family) == -EAGAIN)
return ERR_PTR(-EAGAIN);
}
#endif
@@ -6153,8 +6305,13 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
goto err4;
err = nft_register_flowtable_net_hooks(ctx.net, table, flowtable);
- if (err < 0)
+ if (err < 0) {
+ list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) {
+ list_del_rcu(&hook->list);
+ kfree_rcu(hook, rcu);
+ }
goto err4;
+ }
err = nft_trans_flowtable_add(&ctx, NFT_MSG_NEWFLOWTABLE, flowtable);
if (err < 0)
@@ -6992,6 +7149,18 @@ static void nft_chain_del(struct nft_chain *chain)
list_del_rcu(&chain->list);
}
+static void nf_tables_module_autoload_cleanup(struct net *net)
+{
+ struct nft_module_request *req, *next;
+
+ WARN_ON_ONCE(!list_empty(&net->nft.commit_list));
+ list_for_each_entry_safe(req, next, &net->nft.module_list, list) {
+ WARN_ON_ONCE(!req->done);
+ list_del(&req->list);
+ kfree(req);
+ }
+}
+
static void nf_tables_commit_release(struct net *net)
{
struct nft_trans *trans;
@@ -7004,6 +7173,7 @@ static void nf_tables_commit_release(struct net *net)
* to prevent expensive synchronize_rcu() in commit phase.
*/
if (list_empty(&net->nft.commit_list)) {
+ nf_tables_module_autoload_cleanup(net);
mutex_unlock(&net->nft.commit_mutex);
return;
}
@@ -7018,6 +7188,7 @@ static void nf_tables_commit_release(struct net *net)
list_splice_tail_init(&net->nft.commit_list, &nf_tables_destroy_list);
spin_unlock(&nf_tables_destroy_list_lock);
+ nf_tables_module_autoload_cleanup(net);
mutex_unlock(&net->nft.commit_mutex);
schedule_work(&trans_destroy_work);
@@ -7209,6 +7380,21 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
return 0;
}
+static void nf_tables_module_autoload(struct net *net)
+{
+ struct nft_module_request *req, *next;
+ LIST_HEAD(module_list);
+
+ list_splice_init(&net->nft.module_list, &module_list);
+ mutex_unlock(&net->nft.commit_mutex);
+ list_for_each_entry_safe(req, next, &module_list, list) {
+ request_module("%s", req->module);
+ req->done = true;
+ }
+ mutex_lock(&net->nft.commit_mutex);
+ list_splice(&module_list, &net->nft.module_list);
+}
+
static void nf_tables_abort_release(struct nft_trans *trans)
{
switch (trans->msg_type) {
@@ -7238,7 +7424,7 @@ static void nf_tables_abort_release(struct nft_trans *trans)
kfree(trans);
}
-static int __nf_tables_abort(struct net *net)
+static int __nf_tables_abort(struct net *net, bool autoload)
{
struct nft_trans *trans, *next;
struct nft_trans_elem *te;
@@ -7360,6 +7546,11 @@ static int __nf_tables_abort(struct net *net)
nf_tables_abort_release(trans);
}
+ if (autoload)
+ nf_tables_module_autoload(net);
+ else
+ nf_tables_module_autoload_cleanup(net);
+
return 0;
}
@@ -7368,9 +7559,9 @@ static void nf_tables_cleanup(struct net *net)
nft_validate_state_update(net, NFT_VALIDATE_SKIP);
}
-static int nf_tables_abort(struct net *net, struct sk_buff *skb)
+static int nf_tables_abort(struct net *net, struct sk_buff *skb, bool autoload)
{
- int ret = __nf_tables_abort(net);
+ int ret = __nf_tables_abort(net, autoload);
mutex_unlock(&net->nft.commit_mutex);
@@ -7965,6 +8156,7 @@ static int __net_init nf_tables_init_net(struct net *net)
{
INIT_LIST_HEAD(&net->nft.tables);
INIT_LIST_HEAD(&net->nft.commit_list);
+ INIT_LIST_HEAD(&net->nft.module_list);
mutex_init(&net->nft.commit_mutex);
net->nft.base_seq = 1;
net->nft.validate_state = NFT_VALIDATE_SKIP;
@@ -7976,10 +8168,11 @@ static void __net_exit nf_tables_exit_net(struct net *net)
{
mutex_lock(&net->nft.commit_mutex);
if (!list_empty(&net->nft.commit_list))
- __nf_tables_abort(net);
+ __nf_tables_abort(net, false);
__nft_release_tables(net);
mutex_unlock(&net->nft.commit_mutex);
WARN_ON_ONCE(!list_empty(&net->nft.tables));
+ WARN_ON_ONCE(!list_empty(&net->nft.module_list));
}
static struct pernet_operations nf_tables_net_ops = {
diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
index a9ea29afb09f..2bb28483af22 100644
--- a/net/netfilter/nf_tables_offload.c
+++ b/net/netfilter/nf_tables_offload.c
@@ -564,7 +564,7 @@ static void nft_indr_block_cb(struct net_device *dev,
mutex_lock(&net->nft.commit_mutex);
chain = __nft_offload_get_chain(dev);
- if (chain) {
+ if (chain && chain->flags & NFT_CHAIN_HW_OFFLOAD) {
struct nft_base_chain *basechain;
basechain = nft_base_chain(chain);
diff --git a/net/netfilter/nf_tables_set_core.c b/net/netfilter/nf_tables_set_core.c
index a9fce8d10051..586b621007eb 100644
--- a/net/netfilter/nf_tables_set_core.c
+++ b/net/netfilter/nf_tables_set_core.c
@@ -9,12 +9,14 @@ static int __init nf_tables_set_module_init(void)
nft_register_set(&nft_set_rhash_type);
nft_register_set(&nft_set_bitmap_type);
nft_register_set(&nft_set_rbtree_type);
+ nft_register_set(&nft_set_pipapo_type);
return 0;
}
static void __exit nf_tables_set_module_exit(void)
{
+ nft_unregister_set(&nft_set_pipapo_type);
nft_unregister_set(&nft_set_rbtree_type);
nft_unregister_set(&nft_set_bitmap_type);
nft_unregister_set(&nft_set_rhash_type);
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 4abbb452cf6c..99127e2d95a8 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -476,7 +476,7 @@ ack:
}
done:
if (status & NFNL_BATCH_REPLAY) {
- ss->abort(net, oskb);
+ ss->abort(net, oskb, true);
nfnl_err_reset(&err_list);
kfree_skb(skb);
module_put(ss->owner);
@@ -487,11 +487,11 @@ done:
status |= NFNL_BATCH_REPLAY;
goto done;
} else if (err) {
- ss->abort(net, oskb);
+ ss->abort(net, oskb, false);
netlink_ack(oskb, nlmsg_hdr(oskb), err, NULL);
}
} else {
- ss->abort(net, oskb);
+ ss->abort(net, oskb, false);
}
if (ss->cleanup)
ss->cleanup(net);
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
index de3a9596b7f1..a5f294aa8e4c 100644
--- a/net/netfilter/nfnetlink_cthelper.c
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -742,6 +742,8 @@ static const struct nla_policy nfnl_cthelper_policy[NFCTH_MAX+1] = {
[NFCTH_NAME] = { .type = NLA_NUL_STRING,
.len = NF_CT_HELPER_NAME_LEN-1 },
[NFCTH_QUEUE_NUM] = { .type = NLA_U32, },
+ [NFCTH_PRIV_DATA_LEN] = { .type = NLA_U32, },
+ [NFCTH_STATUS] = { .type = NLA_U32, },
};
static const struct nfnl_callback nfnl_cthelper_cb[NFNL_MSG_CTHELPER_MAX] = {
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index feabdfb22920..76535fd9278c 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -778,7 +778,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
{
unsigned int queued;
struct nfqnl_instance *queue;
- struct sk_buff *skb, *segs;
+ struct sk_buff *skb, *segs, *nskb;
int err = -ENOBUFS;
struct net *net = entry->state.net;
struct nfnl_queue_net *q = nfnl_queue_pernet(net);
@@ -815,8 +815,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
goto out_err;
queued = 0;
err = 0;
- do {
- struct sk_buff *nskb = segs->next;
+ skb_list_walk_safe(segs, segs, nskb) {
if (err == 0)
err = __nfqnl_enqueue_packet_gso(net, queue,
segs, entry);
@@ -824,8 +823,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
queued++;
else
kfree_skb(segs);
- segs = nskb;
- } while (segs);
+ }
if (queued) {
if (err) /* some segments are already queued */
diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c
index 10e9d50e4e19..0ed2281f03be 100644
--- a/net/netfilter/nft_bitwise.c
+++ b/net/netfilter/nft_bitwise.c
@@ -18,21 +18,66 @@
struct nft_bitwise {
enum nft_registers sreg:8;
enum nft_registers dreg:8;
+ enum nft_bitwise_ops op:8;
u8 len;
struct nft_data mask;
struct nft_data xor;
+ struct nft_data data;
};
+static void nft_bitwise_eval_bool(u32 *dst, const u32 *src,
+ const struct nft_bitwise *priv)
+{
+ unsigned int i;
+
+ for (i = 0; i < DIV_ROUND_UP(priv->len, 4); i++)
+ dst[i] = (src[i] & priv->mask.data[i]) ^ priv->xor.data[i];
+}
+
+static void nft_bitwise_eval_lshift(u32 *dst, const u32 *src,
+ const struct nft_bitwise *priv)
+{
+ u32 shift = priv->data.data[0];
+ unsigned int i;
+ u32 carry = 0;
+
+ for (i = DIV_ROUND_UP(priv->len, sizeof(u32)); i > 0; i--) {
+ dst[i - 1] = (src[i - 1] << shift) | carry;
+ carry = src[i - 1] >> (BITS_PER_TYPE(u32) - shift);
+ }
+}
+
+static void nft_bitwise_eval_rshift(u32 *dst, const u32 *src,
+ const struct nft_bitwise *priv)
+{
+ u32 shift = priv->data.data[0];
+ unsigned int i;
+ u32 carry = 0;
+
+ for (i = 0; i < DIV_ROUND_UP(priv->len, sizeof(u32)); i++) {
+ dst[i] = carry | (src[i] >> shift);
+ carry = src[i] << (BITS_PER_TYPE(u32) - shift);
+ }
+}
+
void nft_bitwise_eval(const struct nft_expr *expr,
struct nft_regs *regs, const struct nft_pktinfo *pkt)
{
const struct nft_bitwise *priv = nft_expr_priv(expr);
const u32 *src = &regs->data[priv->sreg];
u32 *dst = &regs->data[priv->dreg];
- unsigned int i;
- for (i = 0; i < DIV_ROUND_UP(priv->len, 4); i++)
- dst[i] = (src[i] & priv->mask.data[i]) ^ priv->xor.data[i];
+ switch (priv->op) {
+ case NFT_BITWISE_BOOL:
+ nft_bitwise_eval_bool(dst, src, priv);
+ break;
+ case NFT_BITWISE_LSHIFT:
+ nft_bitwise_eval_lshift(dst, src, priv);
+ break;
+ case NFT_BITWISE_RSHIFT:
+ nft_bitwise_eval_rshift(dst, src, priv);
+ break;
+ }
}
static const struct nla_policy nft_bitwise_policy[NFTA_BITWISE_MAX + 1] = {
@@ -41,40 +86,22 @@ static const struct nla_policy nft_bitwise_policy[NFTA_BITWISE_MAX + 1] = {
[NFTA_BITWISE_LEN] = { .type = NLA_U32 },
[NFTA_BITWISE_MASK] = { .type = NLA_NESTED },
[NFTA_BITWISE_XOR] = { .type = NLA_NESTED },
+ [NFTA_BITWISE_OP] = { .type = NLA_U32 },
+ [NFTA_BITWISE_DATA] = { .type = NLA_NESTED },
};
-static int nft_bitwise_init(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nlattr * const tb[])
+static int nft_bitwise_init_bool(struct nft_bitwise *priv,
+ const struct nlattr *const tb[])
{
- struct nft_bitwise *priv = nft_expr_priv(expr);
struct nft_data_desc d1, d2;
- u32 len;
int err;
- if (tb[NFTA_BITWISE_SREG] == NULL ||
- tb[NFTA_BITWISE_DREG] == NULL ||
- tb[NFTA_BITWISE_LEN] == NULL ||
- tb[NFTA_BITWISE_MASK] == NULL ||
- tb[NFTA_BITWISE_XOR] == NULL)
+ if (tb[NFTA_BITWISE_DATA])
return -EINVAL;
- err = nft_parse_u32_check(tb[NFTA_BITWISE_LEN], U8_MAX, &len);
- if (err < 0)
- return err;
-
- priv->len = len;
-
- priv->sreg = nft_parse_register(tb[NFTA_BITWISE_SREG]);
- err = nft_validate_register_load(priv->sreg, priv->len);
- if (err < 0)
- return err;
-
- priv->dreg = nft_parse_register(tb[NFTA_BITWISE_DREG]);
- err = nft_validate_register_store(ctx, priv->dreg, NULL,
- NFT_DATA_VALUE, priv->len);
- if (err < 0)
- return err;
+ if (!tb[NFTA_BITWISE_MASK] ||
+ !tb[NFTA_BITWISE_XOR])
+ return -EINVAL;
err = nft_data_init(NULL, &priv->mask, sizeof(priv->mask), &d1,
tb[NFTA_BITWISE_MASK]);
@@ -102,40 +129,151 @@ err1:
return err;
}
-static int nft_bitwise_dump(struct sk_buff *skb, const struct nft_expr *expr)
+static int nft_bitwise_init_shift(struct nft_bitwise *priv,
+ const struct nlattr *const tb[])
{
- const struct nft_bitwise *priv = nft_expr_priv(expr);
+ struct nft_data_desc d;
+ int err;
- if (nft_dump_register(skb, NFTA_BITWISE_SREG, priv->sreg))
- goto nla_put_failure;
- if (nft_dump_register(skb, NFTA_BITWISE_DREG, priv->dreg))
- goto nla_put_failure;
- if (nla_put_be32(skb, NFTA_BITWISE_LEN, htonl(priv->len)))
- goto nla_put_failure;
+ if (tb[NFTA_BITWISE_MASK] ||
+ tb[NFTA_BITWISE_XOR])
+ return -EINVAL;
+
+ if (!tb[NFTA_BITWISE_DATA])
+ return -EINVAL;
+ err = nft_data_init(NULL, &priv->data, sizeof(priv->data), &d,
+ tb[NFTA_BITWISE_DATA]);
+ if (err < 0)
+ return err;
+ if (d.type != NFT_DATA_VALUE || d.len != sizeof(u32) ||
+ priv->data.data[0] >= BITS_PER_TYPE(u32)) {
+ nft_data_release(&priv->data, d.type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int nft_bitwise_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_bitwise *priv = nft_expr_priv(expr);
+ u32 len;
+ int err;
+
+ if (!tb[NFTA_BITWISE_SREG] ||
+ !tb[NFTA_BITWISE_DREG] ||
+ !tb[NFTA_BITWISE_LEN])
+ return -EINVAL;
+
+ err = nft_parse_u32_check(tb[NFTA_BITWISE_LEN], U8_MAX, &len);
+ if (err < 0)
+ return err;
+
+ priv->len = len;
+
+ priv->sreg = nft_parse_register(tb[NFTA_BITWISE_SREG]);
+ err = nft_validate_register_load(priv->sreg, priv->len);
+ if (err < 0)
+ return err;
+
+ priv->dreg = nft_parse_register(tb[NFTA_BITWISE_DREG]);
+ err = nft_validate_register_store(ctx, priv->dreg, NULL,
+ NFT_DATA_VALUE, priv->len);
+ if (err < 0)
+ return err;
+
+ if (tb[NFTA_BITWISE_OP]) {
+ priv->op = ntohl(nla_get_be32(tb[NFTA_BITWISE_OP]));
+ switch (priv->op) {
+ case NFT_BITWISE_BOOL:
+ case NFT_BITWISE_LSHIFT:
+ case NFT_BITWISE_RSHIFT:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ } else {
+ priv->op = NFT_BITWISE_BOOL;
+ }
+
+ switch(priv->op) {
+ case NFT_BITWISE_BOOL:
+ err = nft_bitwise_init_bool(priv, tb);
+ break;
+ case NFT_BITWISE_LSHIFT:
+ case NFT_BITWISE_RSHIFT:
+ err = nft_bitwise_init_shift(priv, tb);
+ break;
+ }
+
+ return err;
+}
+
+static int nft_bitwise_dump_bool(struct sk_buff *skb,
+ const struct nft_bitwise *priv)
+{
if (nft_data_dump(skb, NFTA_BITWISE_MASK, &priv->mask,
NFT_DATA_VALUE, priv->len) < 0)
- goto nla_put_failure;
+ return -1;
if (nft_data_dump(skb, NFTA_BITWISE_XOR, &priv->xor,
NFT_DATA_VALUE, priv->len) < 0)
- goto nla_put_failure;
+ return -1;
return 0;
+}
+
+static int nft_bitwise_dump_shift(struct sk_buff *skb,
+ const struct nft_bitwise *priv)
+{
+ if (nft_data_dump(skb, NFTA_BITWISE_DATA, &priv->data,
+ NFT_DATA_VALUE, sizeof(u32)) < 0)
+ return -1;
+ return 0;
+}
+
+static int nft_bitwise_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ const struct nft_bitwise *priv = nft_expr_priv(expr);
+ int err = 0;
+
+ if (nft_dump_register(skb, NFTA_BITWISE_SREG, priv->sreg))
+ return -1;
+ if (nft_dump_register(skb, NFTA_BITWISE_DREG, priv->dreg))
+ return -1;
+ if (nla_put_be32(skb, NFTA_BITWISE_LEN, htonl(priv->len)))
+ return -1;
+ if (nla_put_be32(skb, NFTA_BITWISE_OP, htonl(priv->op)))
+ return -1;
+
+ switch (priv->op) {
+ case NFT_BITWISE_BOOL:
+ err = nft_bitwise_dump_bool(skb, priv);
+ break;
+ case NFT_BITWISE_LSHIFT:
+ case NFT_BITWISE_RSHIFT:
+ err = nft_bitwise_dump_shift(skb, priv);
+ break;
+ }
-nla_put_failure:
- return -1;
+ return err;
}
static struct nft_data zero;
static int nft_bitwise_offload(struct nft_offload_ctx *ctx,
- struct nft_flow_rule *flow,
- const struct nft_expr *expr)
+ struct nft_flow_rule *flow,
+ const struct nft_expr *expr)
{
const struct nft_bitwise *priv = nft_expr_priv(expr);
struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
+ if (priv->op != NFT_BITWISE_BOOL)
+ return -EOPNOTSUPP;
+
if (memcmp(&priv->xor, &zero, sizeof(priv->xor)) ||
priv->sreg != priv->dreg || priv->len != reg->len)
return -EOPNOTSUPP;
diff --git a/net/netfilter/nft_chain_nat.c b/net/netfilter/nft_chain_nat.c
index ff9ac8ae0031..eac4a901233f 100644
--- a/net/netfilter/nft_chain_nat.c
+++ b/net/netfilter/nft_chain_nat.c
@@ -89,6 +89,7 @@ static const struct nft_chain_type nft_chain_nat_inet = {
.name = "nat",
.type = NFT_CHAIN_T_NAT,
.family = NFPROTO_INET,
+ .owner = THIS_MODULE,
.hook_mask = (1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_LOCAL_OUT) |
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 8887295414dc..683785225a3e 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -54,7 +54,7 @@ static void *nft_dynset_new(struct nft_set *set, const struct nft_expr *expr,
timeout = priv->timeout ? : set->timeout;
elem = nft_set_elem_init(set, &priv->tmpl,
- &regs->data[priv->sreg_key],
+ &regs->data[priv->sreg_key], NULL,
&regs->data[priv->sreg_data],
timeout, 0, GFP_ATOMIC);
if (elem == NULL)
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 9740b554fdb3..951b6e87ed5d 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -17,6 +17,7 @@
#include <linux/smp.h>
#include <linux/static_key.h>
#include <net/dst.h>
+#include <net/ip.h>
#include <net/sock.h>
#include <net/tcp_states.h> /* for TCP_TIME_WAIT */
#include <net/netfilter/nf_tables.h>
@@ -33,8 +34,9 @@
static DEFINE_PER_CPU(struct rnd_state, nft_prandom_state);
-static u8 nft_meta_weekday(time64_t secs)
+static u8 nft_meta_weekday(void)
{
+ time64_t secs = ktime_get_real_seconds();
unsigned int dse;
u8 wday;
@@ -56,14 +58,264 @@ static u32 nft_meta_hour(time64_t secs)
+ tm.tm_sec;
}
+static noinline_for_stack void
+nft_meta_get_eval_time(enum nft_meta_keys key,
+ u32 *dest)
+{
+ switch (key) {
+ case NFT_META_TIME_NS:
+ nft_reg_store64(dest, ktime_get_real_ns());
+ break;
+ case NFT_META_TIME_DAY:
+ nft_reg_store8(dest, nft_meta_weekday());
+ break;
+ case NFT_META_TIME_HOUR:
+ *dest = nft_meta_hour(ktime_get_real_seconds());
+ break;
+ default:
+ break;
+ }
+}
+
+static noinline bool
+nft_meta_get_eval_pkttype_lo(const struct nft_pktinfo *pkt,
+ u32 *dest)
+{
+ const struct sk_buff *skb = pkt->skb;
+
+ switch (nft_pf(pkt)) {
+ case NFPROTO_IPV4:
+ if (ipv4_is_multicast(ip_hdr(skb)->daddr))
+ nft_reg_store8(dest, PACKET_MULTICAST);
+ else
+ nft_reg_store8(dest, PACKET_BROADCAST);
+ break;
+ case NFPROTO_IPV6:
+ nft_reg_store8(dest, PACKET_MULTICAST);
+ break;
+ case NFPROTO_NETDEV:
+ switch (skb->protocol) {
+ case htons(ETH_P_IP): {
+ int noff = skb_network_offset(skb);
+ struct iphdr *iph, _iph;
+
+ iph = skb_header_pointer(skb, noff,
+ sizeof(_iph), &_iph);
+ if (!iph)
+ return false;
+
+ if (ipv4_is_multicast(iph->daddr))
+ nft_reg_store8(dest, PACKET_MULTICAST);
+ else
+ nft_reg_store8(dest, PACKET_BROADCAST);
+
+ break;
+ }
+ case htons(ETH_P_IPV6):
+ nft_reg_store8(dest, PACKET_MULTICAST);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return false;
+ }
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return false;
+ }
+
+ return true;
+}
+
+static noinline bool
+nft_meta_get_eval_skugid(enum nft_meta_keys key,
+ u32 *dest,
+ const struct nft_pktinfo *pkt)
+{
+ struct sock *sk = skb_to_full_sk(pkt->skb);
+ struct socket *sock;
+
+ if (!sk || !sk_fullsock(sk) || !net_eq(nft_net(pkt), sock_net(sk)))
+ return false;
+
+ read_lock_bh(&sk->sk_callback_lock);
+ sock = sk->sk_socket;
+ if (!sock || !sock->file) {
+ read_unlock_bh(&sk->sk_callback_lock);
+ return false;
+ }
+
+ switch (key) {
+ case NFT_META_SKUID:
+ *dest = from_kuid_munged(&init_user_ns,
+ sock->file->f_cred->fsuid);
+ break;
+ case NFT_META_SKGID:
+ *dest = from_kgid_munged(&init_user_ns,
+ sock->file->f_cred->fsgid);
+ break;
+ default:
+ break;
+ }
+
+ read_unlock_bh(&sk->sk_callback_lock);
+ return true;
+}
+
+#ifdef CONFIG_CGROUP_NET_CLASSID
+static noinline bool
+nft_meta_get_eval_cgroup(u32 *dest, const struct nft_pktinfo *pkt)
+{
+ struct sock *sk = skb_to_full_sk(pkt->skb);
+
+ if (!sk || !sk_fullsock(sk) || !net_eq(nft_net(pkt), sock_net(sk)))
+ return false;
+
+ *dest = sock_cgroup_classid(&sk->sk_cgrp_data);
+ return true;
+}
+#endif
+
+static noinline bool nft_meta_get_eval_kind(enum nft_meta_keys key,
+ u32 *dest,
+ const struct nft_pktinfo *pkt)
+{
+ const struct net_device *in = nft_in(pkt), *out = nft_out(pkt);
+
+ switch (key) {
+ case NFT_META_IIFKIND:
+ if (!in || !in->rtnl_link_ops)
+ return false;
+ strncpy((char *)dest, in->rtnl_link_ops->kind, IFNAMSIZ);
+ break;
+ case NFT_META_OIFKIND:
+ if (!out || !out->rtnl_link_ops)
+ return false;
+ strncpy((char *)dest, out->rtnl_link_ops->kind, IFNAMSIZ);
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+static void nft_meta_store_ifindex(u32 *dest, const struct net_device *dev)
+{
+ *dest = dev ? dev->ifindex : 0;
+}
+
+static void nft_meta_store_ifname(u32 *dest, const struct net_device *dev)
+{
+ strncpy((char *)dest, dev ? dev->name : "", IFNAMSIZ);
+}
+
+static bool nft_meta_store_iftype(u32 *dest, const struct net_device *dev)
+{
+ if (!dev)
+ return false;
+
+ nft_reg_store16(dest, dev->type);
+ return true;
+}
+
+static bool nft_meta_store_ifgroup(u32 *dest, const struct net_device *dev)
+{
+ if (!dev)
+ return false;
+
+ *dest = dev->group;
+ return true;
+}
+
+static bool nft_meta_get_eval_ifname(enum nft_meta_keys key, u32 *dest,
+ const struct nft_pktinfo *pkt)
+{
+ switch (key) {
+ case NFT_META_IIFNAME:
+ nft_meta_store_ifname(dest, nft_in(pkt));
+ break;
+ case NFT_META_OIFNAME:
+ nft_meta_store_ifname(dest, nft_out(pkt));
+ break;
+ case NFT_META_IIF:
+ nft_meta_store_ifindex(dest, nft_in(pkt));
+ break;
+ case NFT_META_OIF:
+ nft_meta_store_ifindex(dest, nft_out(pkt));
+ break;
+ case NFT_META_IIFTYPE:
+ if (!nft_meta_store_iftype(dest, nft_in(pkt)))
+ return false;
+ break;
+ case NFT_META_OIFTYPE:
+ if (!nft_meta_store_iftype(dest, nft_out(pkt)))
+ return false;
+ break;
+ case NFT_META_IIFGROUP:
+ if (!nft_meta_store_ifgroup(dest, nft_out(pkt)))
+ return false;
+ break;
+ case NFT_META_OIFGROUP:
+ if (!nft_meta_store_ifgroup(dest, nft_out(pkt)))
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+static noinline u32 nft_prandom_u32(void)
+{
+ struct rnd_state *state = this_cpu_ptr(&nft_prandom_state);
+
+ return prandom_u32_state(state);
+}
+
+#ifdef CONFIG_IP_ROUTE_CLASSID
+static noinline bool
+nft_meta_get_eval_rtclassid(const struct sk_buff *skb, u32 *dest)
+{
+ const struct dst_entry *dst = skb_dst(skb);
+
+ if (!dst)
+ return false;
+
+ *dest = dst->tclassid;
+ return true;
+}
+#endif
+
+static noinline u32 nft_meta_get_eval_sdif(const struct nft_pktinfo *pkt)
+{
+ switch (nft_pf(pkt)) {
+ case NFPROTO_IPV4:
+ return inet_sdif(pkt->skb);
+ case NFPROTO_IPV6:
+ return inet6_sdif(pkt->skb);
+ }
+
+ return 0;
+}
+
+static noinline void
+nft_meta_get_eval_sdifname(u32 *dest, const struct nft_pktinfo *pkt)
+{
+ u32 sdif = nft_meta_get_eval_sdif(pkt);
+ const struct net_device *dev;
+
+ dev = sdif ? dev_get_by_index_rcu(nft_net(pkt), sdif) : NULL;
+ nft_meta_store_ifname(dest, dev);
+}
+
void nft_meta_get_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_meta *priv = nft_expr_priv(expr);
const struct sk_buff *skb = pkt->skb;
- const struct net_device *in = nft_in(pkt), *out = nft_out(pkt);
- struct sock *sk;
u32 *dest = &regs->data[priv->dreg];
switch (priv->key) {
@@ -88,69 +340,26 @@ void nft_meta_get_eval(const struct nft_expr *expr,
*dest = skb->mark;
break;
case NFT_META_IIF:
- *dest = in ? in->ifindex : 0;
- break;
case NFT_META_OIF:
- *dest = out ? out->ifindex : 0;
- break;
case NFT_META_IIFNAME:
- strncpy((char *)dest, in ? in->name : "", IFNAMSIZ);
- break;
case NFT_META_OIFNAME:
- strncpy((char *)dest, out ? out->name : "", IFNAMSIZ);
- break;
case NFT_META_IIFTYPE:
- if (in == NULL)
- goto err;
- nft_reg_store16(dest, in->type);
- break;
case NFT_META_OIFTYPE:
- if (out == NULL)
+ case NFT_META_IIFGROUP:
+ case NFT_META_OIFGROUP:
+ if (!nft_meta_get_eval_ifname(priv->key, dest, pkt))
goto err;
- nft_reg_store16(dest, out->type);
break;
case NFT_META_SKUID:
- sk = skb_to_full_sk(skb);
- if (!sk || !sk_fullsock(sk) ||
- !net_eq(nft_net(pkt), sock_net(sk)))
- goto err;
-
- read_lock_bh(&sk->sk_callback_lock);
- if (sk->sk_socket == NULL ||
- sk->sk_socket->file == NULL) {
- read_unlock_bh(&sk->sk_callback_lock);
- goto err;
- }
-
- *dest = from_kuid_munged(&init_user_ns,
- sk->sk_socket->file->f_cred->fsuid);
- read_unlock_bh(&sk->sk_callback_lock);
- break;
case NFT_META_SKGID:
- sk = skb_to_full_sk(skb);
- if (!sk || !sk_fullsock(sk) ||
- !net_eq(nft_net(pkt), sock_net(sk)))
+ if (!nft_meta_get_eval_skugid(priv->key, dest, pkt))
goto err;
-
- read_lock_bh(&sk->sk_callback_lock);
- if (sk->sk_socket == NULL ||
- sk->sk_socket->file == NULL) {
- read_unlock_bh(&sk->sk_callback_lock);
- goto err;
- }
- *dest = from_kgid_munged(&init_user_ns,
- sk->sk_socket->file->f_cred->fsgid);
- read_unlock_bh(&sk->sk_callback_lock);
break;
#ifdef CONFIG_IP_ROUTE_CLASSID
- case NFT_META_RTCLASSID: {
- const struct dst_entry *dst = skb_dst(skb);
-
- if (dst == NULL)
+ case NFT_META_RTCLASSID:
+ if (!nft_meta_get_eval_rtclassid(skb, dest))
goto err;
- *dest = dst->tclassid;
break;
- }
#endif
#ifdef CONFIG_NETWORK_SECMARK
case NFT_META_SECMARK:
@@ -163,97 +372,41 @@ void nft_meta_get_eval(const struct nft_expr *expr,
break;
}
- switch (nft_pf(pkt)) {
- case NFPROTO_IPV4:
- if (ipv4_is_multicast(ip_hdr(skb)->daddr))
- nft_reg_store8(dest, PACKET_MULTICAST);
- else
- nft_reg_store8(dest, PACKET_BROADCAST);
- break;
- case NFPROTO_IPV6:
- nft_reg_store8(dest, PACKET_MULTICAST);
- break;
- case NFPROTO_NETDEV:
- switch (skb->protocol) {
- case htons(ETH_P_IP): {
- int noff = skb_network_offset(skb);
- struct iphdr *iph, _iph;
-
- iph = skb_header_pointer(skb, noff,
- sizeof(_iph), &_iph);
- if (!iph)
- goto err;
-
- if (ipv4_is_multicast(iph->daddr))
- nft_reg_store8(dest, PACKET_MULTICAST);
- else
- nft_reg_store8(dest, PACKET_BROADCAST);
-
- break;
- }
- case htons(ETH_P_IPV6):
- nft_reg_store8(dest, PACKET_MULTICAST);
- break;
- default:
- WARN_ON_ONCE(1);
- goto err;
- }
- break;
- default:
- WARN_ON_ONCE(1);
+ if (!nft_meta_get_eval_pkttype_lo(pkt, dest))
goto err;
- }
break;
case NFT_META_CPU:
*dest = raw_smp_processor_id();
break;
- case NFT_META_IIFGROUP:
- if (in == NULL)
- goto err;
- *dest = in->group;
- break;
- case NFT_META_OIFGROUP:
- if (out == NULL)
- goto err;
- *dest = out->group;
- break;
#ifdef CONFIG_CGROUP_NET_CLASSID
case NFT_META_CGROUP:
- sk = skb_to_full_sk(skb);
- if (!sk || !sk_fullsock(sk) ||
- !net_eq(nft_net(pkt), sock_net(sk)))
+ if (!nft_meta_get_eval_cgroup(dest, pkt))
goto err;
- *dest = sock_cgroup_classid(&sk->sk_cgrp_data);
break;
#endif
- case NFT_META_PRANDOM: {
- struct rnd_state *state = this_cpu_ptr(&nft_prandom_state);
- *dest = prandom_u32_state(state);
+ case NFT_META_PRANDOM:
+ *dest = nft_prandom_u32();
break;
- }
#ifdef CONFIG_XFRM
case NFT_META_SECPATH:
nft_reg_store8(dest, secpath_exists(skb));
break;
#endif
case NFT_META_IIFKIND:
- if (in == NULL || in->rtnl_link_ops == NULL)
- goto err;
- strncpy((char *)dest, in->rtnl_link_ops->kind, IFNAMSIZ);
- break;
case NFT_META_OIFKIND:
- if (out == NULL || out->rtnl_link_ops == NULL)
+ if (!nft_meta_get_eval_kind(priv->key, dest, pkt))
goto err;
- strncpy((char *)dest, out->rtnl_link_ops->kind, IFNAMSIZ);
break;
case NFT_META_TIME_NS:
- nft_reg_store64(dest, ktime_get_real_ns());
- break;
case NFT_META_TIME_DAY:
- nft_reg_store8(dest, nft_meta_weekday(ktime_get_real_seconds()));
- break;
case NFT_META_TIME_HOUR:
- *dest = nft_meta_hour(ktime_get_real_seconds());
+ nft_meta_get_eval_time(priv->key, dest);
+ break;
+ case NFT_META_SDIF:
+ *dest = nft_meta_get_eval_sdif(pkt);
+ break;
+ case NFT_META_SDIFNAME:
+ nft_meta_get_eval_sdifname(dest, pkt);
break;
default:
WARN_ON(1);
@@ -335,6 +488,7 @@ int nft_meta_get_init(const struct nft_ctx *ctx,
case NFT_META_MARK:
case NFT_META_IIF:
case NFT_META_OIF:
+ case NFT_META_SDIF:
case NFT_META_SKUID:
case NFT_META_SKGID:
#ifdef CONFIG_IP_ROUTE_CLASSID
@@ -356,6 +510,7 @@ int nft_meta_get_init(const struct nft_ctx *ctx,
case NFT_META_OIFNAME:
case NFT_META_IIFKIND:
case NFT_META_OIFKIND:
+ case NFT_META_SDIFNAME:
len = IFNAMSIZ;
break;
case NFT_META_PRANDOM:
@@ -386,16 +541,28 @@ int nft_meta_get_init(const struct nft_ctx *ctx,
}
EXPORT_SYMBOL_GPL(nft_meta_get_init);
-static int nft_meta_get_validate(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nft_data **data)
+static int nft_meta_get_validate_sdif(const struct nft_ctx *ctx)
{
-#ifdef CONFIG_XFRM
- const struct nft_meta *priv = nft_expr_priv(expr);
unsigned int hooks;
- if (priv->key != NFT_META_SECPATH)
- return 0;
+ switch (ctx->family) {
+ case NFPROTO_IPV4:
+ case NFPROTO_IPV6:
+ case NFPROTO_INET:
+ hooks = (1 << NF_INET_LOCAL_IN) |
+ (1 << NF_INET_FORWARD);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return nft_chain_validate_hooks(ctx->chain, hooks);
+}
+
+static int nft_meta_get_validate_xfrm(const struct nft_ctx *ctx)
+{
+#ifdef CONFIG_XFRM
+ unsigned int hooks;
switch (ctx->family) {
case NFPROTO_NETDEV:
@@ -418,6 +585,25 @@ static int nft_meta_get_validate(const struct nft_ctx *ctx,
#endif
}
+static int nft_meta_get_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data)
+{
+ const struct nft_meta *priv = nft_expr_priv(expr);
+
+ switch (priv->key) {
+ case NFT_META_SECPATH:
+ return nft_meta_get_validate_xfrm(ctx);
+ case NFT_META_SDIF:
+ case NFT_META_SDIFNAME:
+ return nft_meta_get_validate_sdif(ctx);
+ default:
+ break;
+ }
+
+ return 0;
+}
+
int nft_meta_set_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data)
diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c
index f54d6ae15bb1..b42247aa48a9 100644
--- a/net/netfilter/nft_osf.c
+++ b/net/netfilter/nft_osf.c
@@ -61,6 +61,9 @@ static int nft_osf_init(const struct nft_ctx *ctx,
int err;
u8 ttl;
+ if (!tb[NFTA_OSF_DREG])
+ return -EINVAL;
+
if (tb[NFTA_OSF_TTL]) {
ttl = nla_get_u8(tb[NFTA_OSF_TTL]);
if (ttl > 2)
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index 1993af3a2979..a7de3a58f553 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -129,6 +129,7 @@ static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
[NFTA_PAYLOAD_LEN] = { .type = NLA_U32 },
[NFTA_PAYLOAD_CSUM_TYPE] = { .type = NLA_U32 },
[NFTA_PAYLOAD_CSUM_OFFSET] = { .type = NLA_U32 },
+ [NFTA_PAYLOAD_CSUM_FLAGS] = { .type = NLA_U32 },
};
static int nft_payload_init(const struct nft_ctx *ctx,
diff --git a/net/netfilter/nft_set_bitmap.c b/net/netfilter/nft_set_bitmap.c
index 087a056e34d1..87e8d9ba0c9b 100644
--- a/net/netfilter/nft_set_bitmap.c
+++ b/net/netfilter/nft_set_bitmap.c
@@ -259,8 +259,8 @@ static u64 nft_bitmap_privsize(const struct nlattr * const nla[],
}
static int nft_bitmap_init(const struct nft_set *set,
- const struct nft_set_desc *desc,
- const struct nlattr * const nla[])
+ const struct nft_set_desc *desc,
+ const struct nlattr * const nla[])
{
struct nft_bitmap *priv = nft_set_priv(set);
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
index b331a3c9a3a8..d350a7cd3af0 100644
--- a/net/netfilter/nft_set_hash.c
+++ b/net/netfilter/nft_set_hash.c
@@ -645,7 +645,7 @@ static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features,
}
static bool nft_hash_fast_estimate(const struct nft_set_desc *desc, u32 features,
- struct nft_set_estimate *est)
+ struct nft_set_estimate *est)
{
if (!desc->size)
return false;
diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
new file mode 100644
index 000000000000..4fc0c924ed5d
--- /dev/null
+++ b/net/netfilter/nft_set_pipapo.c
@@ -0,0 +1,2104 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/* PIPAPO: PIle PAcket POlicies: set for arbitrary concatenations of ranges
+ *
+ * Copyright (c) 2019-2020 Red Hat GmbH
+ *
+ * Author: Stefano Brivio <sbrivio@redhat.com>
+ */
+
+/**
+ * DOC: Theory of Operation
+ *
+ *
+ * Problem
+ * -------
+ *
+ * Match packet bytes against entries composed of ranged or non-ranged packet
+ * field specifiers, mapping them to arbitrary references. For example:
+ *
+ * ::
+ *
+ * --- fields --->
+ * | [net],[port],[net]... => [reference]
+ * entries [net],[port],[net]... => [reference]
+ * | [net],[port],[net]... => [reference]
+ * V ...
+ *
+ * where [net] fields can be IP ranges or netmasks, and [port] fields are port
+ * ranges. Arbitrary packet fields can be matched.
+ *
+ *
+ * Algorithm Overview
+ * ------------------
+ *
+ * This algorithm is loosely inspired by [Ligatti 2010], and fundamentally
+ * relies on the consideration that every contiguous range in a space of b bits
+ * can be converted into b * 2 netmasks, from Theorem 3 in [Rottenstreich 2010],
+ * as also illustrated in Section 9 of [Kogan 2014].
+ *
+ * Classification against a number of entries, that require matching given bits
+ * of a packet field, is performed by grouping those bits in sets of arbitrary
+ * size, and classifying packet bits one group at a time.
+ *
+ * Example:
+ * to match the source port (16 bits) of a packet, we can divide those 16 bits
+ * in 4 groups of 4 bits each. Given the entry:
+ * 0000 0001 0101 1001
+ * and a packet with source port:
+ * 0000 0001 1010 1001
+ * first and second groups match, but the third doesn't. We conclude that the
+ * packet doesn't match the given entry.
+ *
+ * Translate the set to a sequence of lookup tables, one per field. Each table
+ * has two dimensions: bit groups to be matched for a single packet field, and
+ * all the possible values of said groups (buckets). Input entries are
+ * represented as one or more rules, depending on the number of composing
+ * netmasks for the given field specifier, and a group match is indicated as a
+ * set bit, with number corresponding to the rule index, in all the buckets
+ * whose value matches the entry for a given group.
+ *
+ * Rules are mapped between fields through an array of x, n pairs, with each
+ * item mapping a matched rule to one or more rules. The position of the pair in
+ * the array indicates the matched rule to be mapped to the next field, x
+ * indicates the first rule index in the next field, and n the amount of
+ * next-field rules the current rule maps to.
+ *
+ * The mapping array for the last field maps to the desired references.
+ *
+ * To match, we perform table lookups using the values of grouped packet bits,
+ * and use a sequence of bitwise operations to progressively evaluate rule
+ * matching.
+ *
+ * A stand-alone, reference implementation, also including notes about possible
+ * future optimisations, is available at:
+ * https://pipapo.lameexcu.se/
+ *
+ * Insertion
+ * ---------
+ *
+ * - For each packet field:
+ *
+ * - divide the b packet bits we want to classify into groups of size t,
+ * obtaining ceil(b / t) groups
+ *
+ * Example: match on destination IP address, with t = 4: 32 bits, 8 groups
+ * of 4 bits each
+ *
+ * - allocate a lookup table with one column ("bucket") for each possible
+ * value of a group, and with one row for each group
+ *
+ * Example: 8 groups, 2^4 buckets:
+ *
+ * ::
+ *
+ * bucket
+ * group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ * 0
+ * 1
+ * 2
+ * 3
+ * 4
+ * 5
+ * 6
+ * 7
+ *
+ * - map the bits we want to classify for the current field, for a given
+ * entry, to a single rule for non-ranged and netmask set items, and to one
+ * or multiple rules for ranges. Ranges are expanded to composing netmasks
+ * by pipapo_expand().
+ *
+ * Example: 2 entries, 10.0.0.5:1024 and 192.168.1.0-192.168.2.1:2048
+ * - rule #0: 10.0.0.5
+ * - rule #1: 192.168.1.0/24
+ * - rule #2: 192.168.2.0/31
+ *
+ * - insert references to the rules in the lookup table, selecting buckets
+ * according to bit values of a rule in the given group. This is done by
+ * pipapo_insert().
+ *
+ * Example: given:
+ * - rule #0: 10.0.0.5 mapping to buckets
+ * < 0 10 0 0 0 0 0 5 >
+ * - rule #1: 192.168.1.0/24 mapping to buckets
+ * < 12 0 10 8 0 1 < 0..15 > < 0..15 > >
+ * - rule #2: 192.168.2.0/31 mapping to buckets
+ * < 12 0 10 8 0 2 0 < 0..1 > >
+ *
+ * these bits are set in the lookup table:
+ *
+ * ::
+ *
+ * bucket
+ * group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ * 0 0 1,2
+ * 1 1,2 0
+ * 2 0 1,2
+ * 3 0 1,2
+ * 4 0,1,2
+ * 5 0 1 2
+ * 6 0,1,2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
+ * 7 1,2 1,2 1 1 1 0,1 1 1 1 1 1 1 1 1 1 1
+ *
+ * - if this is not the last field in the set, fill a mapping array that maps
+ * rules from the lookup table to rules belonging to the same entry in
+ * the next lookup table, done by pipapo_map().
+ *
+ * Note that as rules map to contiguous ranges of rules, given how netmask
+ * expansion and insertion is performed, &union nft_pipapo_map_bucket stores
+ * this information as pairs of first rule index, rule count.
+ *
+ * Example: 2 entries, 10.0.0.5:1024 and 192.168.1.0-192.168.2.1:2048,
+ * given lookup table #0 for field 0 (see example above):
+ *
+ * ::
+ *
+ * bucket
+ * group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ * 0 0 1,2
+ * 1 1,2 0
+ * 2 0 1,2
+ * 3 0 1,2
+ * 4 0,1,2
+ * 5 0 1 2
+ * 6 0,1,2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
+ * 7 1,2 1,2 1 1 1 0,1 1 1 1 1 1 1 1 1 1 1
+ *
+ * and lookup table #1 for field 1 with:
+ * - rule #0: 1024 mapping to buckets
+ * < 0 0 4 0 >
+ * - rule #1: 2048 mapping to buckets
+ * < 0 0 5 0 >
+ *
+ * ::
+ *
+ * bucket
+ * group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ * 0 0,1
+ * 1 0,1
+ * 2 0 1
+ * 3 0,1
+ *
+ * we need to map rules for 10.0.0.5 in lookup table #0 (rule #0) to 1024
+ * in lookup table #1 (rule #0) and rules for 192.168.1.0-192.168.2.1
+ * (rules #1, #2) to 2048 in lookup table #2 (rule #1):
+ *
+ * ::
+ *
+ * rule indices in current field: 0 1 2
+ * map to rules in next field: 0 1 1
+ *
+ * - if this is the last field in the set, fill a mapping array that maps
+ * rules from the last lookup table to element pointers, also done by
+ * pipapo_map().
+ *
+ * Note that, in this implementation, we have two elements (start, end) for
+ * each entry. The pointer to the end element is stored in this array, and
+ * the pointer to the start element is linked from it.
+ *
+ * Example: entry 10.0.0.5:1024 has a corresponding &struct nft_pipapo_elem
+ * pointer, 0x66, and element for 192.168.1.0-192.168.2.1:2048 is at 0x42.
+ * From the rules of lookup table #1 as mapped above:
+ *
+ * ::
+ *
+ * rule indices in last field: 0 1
+ * map to elements: 0x66 0x42
+ *
+ *
+ * Matching
+ * --------
+ *
+ * We use a result bitmap, with the size of a single lookup table bucket, to
+ * represent the matching state that applies at every algorithm step. This is
+ * done by pipapo_lookup().
+ *
+ * - For each packet field:
+ *
+ * - start with an all-ones result bitmap (res_map in pipapo_lookup())
+ *
+ * - perform a lookup into the table corresponding to the current field,
+ * for each group, and at every group, AND the current result bitmap with
+ * the value from the lookup table bucket
+ *
+ * ::
+ *
+ * Example: 192.168.1.5 < 12 0 10 8 0 1 0 5 >, with lookup table from
+ * insertion examples.
+ * Lookup table buckets are at least 3 bits wide, we'll assume 8 bits for
+ * convenience in this example. Initial result bitmap is 0xff, the steps
+ * below show the value of the result bitmap after each group is processed:
+ *
+ * bucket
+ * group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ * 0 0 1,2
+ * result bitmap is now: 0xff & 0x6 [bucket 12] = 0x6
+ *
+ * 1 1,2 0
+ * result bitmap is now: 0x6 & 0x6 [bucket 0] = 0x6
+ *
+ * 2 0 1,2
+ * result bitmap is now: 0x6 & 0x6 [bucket 10] = 0x6
+ *
+ * 3 0 1,2
+ * result bitmap is now: 0x6 & 0x6 [bucket 8] = 0x6
+ *
+ * 4 0,1,2
+ * result bitmap is now: 0x6 & 0x7 [bucket 0] = 0x6
+ *
+ * 5 0 1 2
+ * result bitmap is now: 0x6 & 0x2 [bucket 1] = 0x2
+ *
+ * 6 0,1,2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
+ * result bitmap is now: 0x2 & 0x7 [bucket 0] = 0x2
+ *
+ * 7 1,2 1,2 1 1 1 0,1 1 1 1 1 1 1 1 1 1 1
+ * final result bitmap for this field is: 0x2 & 0x3 [bucket 5] = 0x2
+ *
+ * - at the next field, start with a new, all-zeroes result bitmap. For each
+ * bit set in the previous result bitmap, fill the new result bitmap
+ * (fill_map in pipapo_lookup()) with the rule indices from the
+ * corresponding buckets of the mapping field for this field, done by
+ * pipapo_refill()
+ *
+ * Example: with mapping table from insertion examples, with the current
+ * result bitmap from the previous example, 0x02:
+ *
+ * ::
+ *
+ * rule indices in current field: 0 1 2
+ * map to rules in next field: 0 1 1
+ *
+ * the new result bitmap will be 0x02: rule 1 was set, and rule 1 will be
+ * set.
+ *
+ * We can now extend this example to cover the second iteration of the step
+ * above (lookup and AND bitmap): assuming the port field is
+ * 2048 < 0 0 5 0 >, with starting result bitmap 0x2, and lookup table
+ * for "port" field from pre-computation example:
+ *
+ * ::
+ *
+ * bucket
+ * group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ * 0 0,1
+ * 1 0,1
+ * 2 0 1
+ * 3 0,1
+ *
+ * operations are: 0x2 & 0x3 [bucket 0] & 0x3 [bucket 0] & 0x2 [bucket 5]
+ * & 0x3 [bucket 0], resulting bitmap is 0x2.
+ *
+ * - if this is the last field in the set, look up the value from the mapping
+ * array corresponding to the final result bitmap
+ *
+ * Example: 0x2 resulting bitmap from 192.168.1.5:2048, mapping array for
+ * last field from insertion example:
+ *
+ * ::
+ *
+ * rule indices in last field: 0 1
+ * map to elements: 0x66 0x42
+ *
+ * the matching element is at 0x42.
+ *
+ *
+ * References
+ * ----------
+ *
+ * [Ligatti 2010]
+ * A Packet-classification Algorithm for Arbitrary Bitmask Rules, with
+ * Automatic Time-space Tradeoffs
+ * Jay Ligatti, Josh Kuhn, and Chris Gage.
+ * Proceedings of the IEEE International Conference on Computer
+ * Communication Networks (ICCCN), August 2010.
+ * http://www.cse.usf.edu/~ligatti/papers/grouper-conf.pdf
+ *
+ * [Rottenstreich 2010]
+ * Worst-Case TCAM Rule Expansion
+ * Ori Rottenstreich and Isaac Keslassy.
+ * 2010 Proceedings IEEE INFOCOM, San Diego, CA, 2010.
+ * http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.212.4592&rep=rep1&type=pdf
+ *
+ * [Kogan 2014]
+ * SAX-PAC (Scalable And eXpressive PAcket Classification)
+ * Kirill Kogan, Sergey Nikolenko, Ori Rottenstreich, William Culhane,
+ * and Patrick Eugster.
+ * Proceedings of the 2014 ACM conference on SIGCOMM, August 2014.
+ * http://www.sigcomm.org/sites/default/files/ccr/papers/2014/August/2619239-2626294.pdf
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <uapi/linux/netfilter/nf_tables.h>
+#include <net/ipv6.h> /* For the maximum length of a field */
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+
+/* Count of concatenated fields depends on count of 32-bit nftables registers */
+#define NFT_PIPAPO_MAX_FIELDS NFT_REG32_COUNT
+
+/* Largest supported field size */
+#define NFT_PIPAPO_MAX_BYTES (sizeof(struct in6_addr))
+#define NFT_PIPAPO_MAX_BITS (NFT_PIPAPO_MAX_BYTES * BITS_PER_BYTE)
+
+/* Number of bits to be grouped together in lookup table buckets, arbitrary */
+#define NFT_PIPAPO_GROUP_BITS 4
+#define NFT_PIPAPO_GROUPS_PER_BYTE (BITS_PER_BYTE / NFT_PIPAPO_GROUP_BITS)
+
+/* Fields are padded to 32 bits in input registers */
+#define NFT_PIPAPO_GROUPS_PADDED_SIZE(x) \
+ (round_up((x) / NFT_PIPAPO_GROUPS_PER_BYTE, sizeof(u32)))
+#define NFT_PIPAPO_GROUPS_PADDING(x) \
+ (NFT_PIPAPO_GROUPS_PADDED_SIZE((x)) - (x) / NFT_PIPAPO_GROUPS_PER_BYTE)
+
+/* Number of buckets, given by 2 ^ n, with n grouped bits */
+#define NFT_PIPAPO_BUCKETS (1 << NFT_PIPAPO_GROUP_BITS)
+
+/* Each n-bit range maps to up to n * 2 rules */
+#define NFT_PIPAPO_MAP_NBITS (const_ilog2(NFT_PIPAPO_MAX_BITS * 2))
+
+/* Use the rest of mapping table buckets for rule indices, but it makes no sense
+ * to exceed 32 bits
+ */
+#if BITS_PER_LONG == 64
+#define NFT_PIPAPO_MAP_TOBITS 32
+#else
+#define NFT_PIPAPO_MAP_TOBITS (BITS_PER_LONG - NFT_PIPAPO_MAP_NBITS)
+#endif
+
+/* ...which gives us the highest allowed index for a rule */
+#define NFT_PIPAPO_RULE0_MAX ((1UL << (NFT_PIPAPO_MAP_TOBITS - 1)) \
+ - (1UL << NFT_PIPAPO_MAP_NBITS))
+
+#define nft_pipapo_for_each_field(field, index, match) \
+ for ((field) = (match)->f, (index) = 0; \
+ (index) < (match)->field_count; \
+ (index)++, (field)++)
+
+/**
+ * union nft_pipapo_map_bucket - Bucket of mapping table
+ * @to: First rule number (in next field) this rule maps to
+ * @n: Number of rules (in next field) this rule maps to
+ * @e: If there's no next field, pointer to element this rule maps to
+ */
+union nft_pipapo_map_bucket {
+ struct {
+#if BITS_PER_LONG == 64
+ static_assert(NFT_PIPAPO_MAP_TOBITS <= 32);
+ u32 to;
+
+ static_assert(NFT_PIPAPO_MAP_NBITS <= 32);
+ u32 n;
+#else
+ unsigned long to:NFT_PIPAPO_MAP_TOBITS;
+ unsigned long n:NFT_PIPAPO_MAP_NBITS;
+#endif
+ };
+ struct nft_pipapo_elem *e;
+};
+
+/**
+ * struct nft_pipapo_field - Lookup, mapping tables and related data for a field
+ * @groups: Amount of 4-bit groups
+ * @rules: Number of inserted rules
+ * @bsize: Size of each bucket in lookup table, in longs
+ * @lt: Lookup table: 'groups' rows of NFT_PIPAPO_BUCKETS buckets
+ * @mt: Mapping table: one bucket per rule
+ */
+struct nft_pipapo_field {
+ int groups;
+ unsigned long rules;
+ size_t bsize;
+ unsigned long *lt;
+ union nft_pipapo_map_bucket *mt;
+};
+
+/**
+ * struct nft_pipapo_match - Data used for lookup and matching
+ * @field_count Amount of fields in set
+ * @scratch: Preallocated per-CPU maps for partial matching results
+ * @bsize_max: Maximum lookup table bucket size of all fields, in longs
+ * @rcu Matching data is swapped on commits
+ * @f: Fields, with lookup and mapping tables
+ */
+struct nft_pipapo_match {
+ int field_count;
+ unsigned long * __percpu *scratch;
+ size_t bsize_max;
+ struct rcu_head rcu;
+ struct nft_pipapo_field f[0];
+};
+
+/* Current working bitmap index, toggled between field matches */
+static DEFINE_PER_CPU(bool, nft_pipapo_scratch_index);
+
+/**
+ * struct nft_pipapo - Representation of a set
+ * @match: Currently in-use matching data
+ * @clone: Copy where pending insertions and deletions are kept
+ * @groups: Total amount of 4-bit groups for fields in this set
+ * @width: Total bytes to be matched for one packet, including padding
+ * @dirty: Working copy has pending insertions or deletions
+ * @last_gc: Timestamp of last garbage collection run, jiffies
+ */
+struct nft_pipapo {
+ struct nft_pipapo_match __rcu *match;
+ struct nft_pipapo_match *clone;
+ int groups;
+ int width;
+ bool dirty;
+ unsigned long last_gc;
+};
+
+struct nft_pipapo_elem;
+
+/**
+ * struct nft_pipapo_elem - API-facing representation of single set element
+ * @ext: nftables API extensions
+ */
+struct nft_pipapo_elem {
+ struct nft_set_ext ext;
+};
+
+/**
+ * pipapo_refill() - For each set bit, set bits from selected mapping table item
+ * @map: Bitmap to be scanned for set bits
+ * @len: Length of bitmap in longs
+ * @rules: Number of rules in field
+ * @dst: Destination bitmap
+ * @mt: Mapping table containing bit set specifiers
+ * @match_only: Find a single bit and return, don't fill
+ *
+ * Iteration over set bits with __builtin_ctzl(): Daniel Lemire, public domain.
+ *
+ * For each bit set in map, select the bucket from mapping table with index
+ * corresponding to the position of the bit set. Use start bit and amount of
+ * bits specified in bucket to fill region in dst.
+ *
+ * Return: -1 on no match, bit position on 'match_only', 0 otherwise.
+ */
+static int pipapo_refill(unsigned long *map, int len, int rules,
+ unsigned long *dst, union nft_pipapo_map_bucket *mt,
+ bool match_only)
+{
+ unsigned long bitset;
+ int k, ret = -1;
+
+ for (k = 0; k < len; k++) {
+ bitset = map[k];
+ while (bitset) {
+ unsigned long t = bitset & -bitset;
+ int r = __builtin_ctzl(bitset);
+ int i = k * BITS_PER_LONG + r;
+
+ if (unlikely(i >= rules)) {
+ map[k] = 0;
+ return -1;
+ }
+
+ if (match_only) {
+ bitmap_clear(map, i, 1);
+ return i;
+ }
+
+ ret = 0;
+
+ bitmap_set(dst, mt[i].to, mt[i].n);
+
+ bitset ^= t;
+ }
+ map[k] = 0;
+ }
+
+ return ret;
+}
+
+/**
+ * nft_pipapo_lookup() - Lookup function
+ * @net: Network namespace
+ * @set: nftables API set representation
+ * @elem: nftables API element representation containing key data
+ * @ext: nftables API extension pointer, filled with matching reference
+ *
+ * For more details, see DOC: Theory of Operation.
+ *
+ * Return: true on match, false otherwise.
+ */
+static bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext)
+{
+ struct nft_pipapo *priv = nft_set_priv(set);
+ unsigned long *res_map, *fill_map;
+ u8 genmask = nft_genmask_cur(net);
+ const u8 *rp = (const u8 *)key;
+ struct nft_pipapo_match *m;
+ struct nft_pipapo_field *f;
+ bool map_index;
+ int i;
+
+ local_bh_disable();
+
+ map_index = raw_cpu_read(nft_pipapo_scratch_index);
+
+ m = rcu_dereference(priv->match);
+
+ if (unlikely(!m || !*raw_cpu_ptr(m->scratch)))
+ goto out;
+
+ res_map = *raw_cpu_ptr(m->scratch) + (map_index ? m->bsize_max : 0);
+ fill_map = *raw_cpu_ptr(m->scratch) + (map_index ? 0 : m->bsize_max);
+
+ memset(res_map, 0xff, m->bsize_max * sizeof(*res_map));
+
+ nft_pipapo_for_each_field(f, i, m) {
+ bool last = i == m->field_count - 1;
+ unsigned long *lt = f->lt;
+ int b, group;
+
+ /* For each 4-bit group: select lookup table bucket depending on
+ * packet bytes value, then AND bucket value
+ */
+ for (group = 0; group < f->groups; group += 2) {
+ u8 v;
+
+ v = *rp >> 4;
+ __bitmap_and(res_map, res_map, lt + v * f->bsize,
+ f->bsize * BITS_PER_LONG);
+ lt += f->bsize * NFT_PIPAPO_BUCKETS;
+
+ v = *rp & 0x0f;
+ rp++;
+ __bitmap_and(res_map, res_map, lt + v * f->bsize,
+ f->bsize * BITS_PER_LONG);
+ lt += f->bsize * NFT_PIPAPO_BUCKETS;
+ }
+
+ /* Now populate the bitmap for the next field, unless this is
+ * the last field, in which case return the matched 'ext'
+ * pointer if any.
+ *
+ * Now res_map contains the matching bitmap, and fill_map is the
+ * bitmap for the next field.
+ */
+next_match:
+ b = pipapo_refill(res_map, f->bsize, f->rules, fill_map, f->mt,
+ last);
+ if (b < 0) {
+ raw_cpu_write(nft_pipapo_scratch_index, map_index);
+ local_bh_enable();
+
+ return false;
+ }
+
+ if (last) {
+ *ext = &f->mt[b].e->ext;
+ if (unlikely(nft_set_elem_expired(*ext) ||
+ !nft_set_elem_active(*ext, genmask)))
+ goto next_match;
+
+ /* Last field: we're just returning the key without
+ * filling the initial bitmap for the next field, so the
+ * current inactive bitmap is clean and can be reused as
+ * *next* bitmap (not initial) for the next packet.
+ */
+ raw_cpu_write(nft_pipapo_scratch_index, map_index);
+ local_bh_enable();
+
+ return true;
+ }
+
+ /* Swap bitmap indices: res_map is the initial bitmap for the
+ * next field, and fill_map is guaranteed to be all-zeroes at
+ * this point.
+ */
+ map_index = !map_index;
+ swap(res_map, fill_map);
+
+ rp += NFT_PIPAPO_GROUPS_PADDING(f->groups);
+ }
+
+out:
+ local_bh_enable();
+ return false;
+}
+
+/**
+ * pipapo_get() - Get matching element reference given key data
+ * @net: Network namespace
+ * @set: nftables API set representation
+ * @data: Key data to be matched against existing elements
+ * @genmask: If set, check that element is active in given genmask
+ *
+ * This is essentially the same as the lookup function, except that it matches
+ * key data against the uncommitted copy and doesn't use preallocated maps for
+ * bitmap results.
+ *
+ * Return: pointer to &struct nft_pipapo_elem on match, error pointer otherwise.
+ */
+static struct nft_pipapo_elem *pipapo_get(const struct net *net,
+ const struct nft_set *set,
+ const u8 *data, u8 genmask)
+{
+ struct nft_pipapo_elem *ret = ERR_PTR(-ENOENT);
+ struct nft_pipapo *priv = nft_set_priv(set);
+ struct nft_pipapo_match *m = priv->clone;
+ unsigned long *res_map, *fill_map = NULL;
+ struct nft_pipapo_field *f;
+ int i;
+
+ res_map = kmalloc_array(m->bsize_max, sizeof(*res_map), GFP_ATOMIC);
+ if (!res_map) {
+ ret = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ fill_map = kcalloc(m->bsize_max, sizeof(*res_map), GFP_ATOMIC);
+ if (!fill_map) {
+ ret = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ memset(res_map, 0xff, m->bsize_max * sizeof(*res_map));
+
+ nft_pipapo_for_each_field(f, i, m) {
+ bool last = i == m->field_count - 1;
+ unsigned long *lt = f->lt;
+ int b, group;
+
+ /* For each 4-bit group: select lookup table bucket depending on
+ * packet bytes value, then AND bucket value
+ */
+ for (group = 0; group < f->groups; group++) {
+ u8 v;
+
+ if (group % 2) {
+ v = *data & 0x0f;
+ data++;
+ } else {
+ v = *data >> 4;
+ }
+ __bitmap_and(res_map, res_map, lt + v * f->bsize,
+ f->bsize * BITS_PER_LONG);
+
+ lt += f->bsize * NFT_PIPAPO_BUCKETS;
+ }
+
+ /* Now populate the bitmap for the next field, unless this is
+ * the last field, in which case return the matched 'ext'
+ * pointer if any.
+ *
+ * Now res_map contains the matching bitmap, and fill_map is the
+ * bitmap for the next field.
+ */
+next_match:
+ b = pipapo_refill(res_map, f->bsize, f->rules, fill_map, f->mt,
+ last);
+ if (b < 0)
+ goto out;
+
+ if (last) {
+ if (nft_set_elem_expired(&f->mt[b].e->ext) ||
+ (genmask &&
+ !nft_set_elem_active(&f->mt[b].e->ext, genmask)))
+ goto next_match;
+
+ ret = f->mt[b].e;
+ goto out;
+ }
+
+ data += NFT_PIPAPO_GROUPS_PADDING(f->groups);
+
+ /* Swap bitmap indices: fill_map will be the initial bitmap for
+ * the next field (i.e. the new res_map), and res_map is
+ * guaranteed to be all-zeroes at this point, ready to be filled
+ * according to the next mapping table.
+ */
+ swap(res_map, fill_map);
+ }
+
+out:
+ kfree(fill_map);
+ kfree(res_map);
+ return ret;
+}
+
+/**
+ * nft_pipapo_get() - Get matching element reference given key data
+ * @net: Network namespace
+ * @set: nftables API set representation
+ * @elem: nftables API element representation containing key data
+ * @flags: Unused
+ */
+void *nft_pipapo_get(const struct net *net, const struct nft_set *set,
+ const struct nft_set_elem *elem, unsigned int flags)
+{
+ return pipapo_get(net, set, (const u8 *)elem->key.val.data,
+ nft_genmask_cur(net));
+}
+
+/**
+ * pipapo_resize() - Resize lookup or mapping table, or both
+ * @f: Field containing lookup and mapping tables
+ * @old_rules: Previous amount of rules in field
+ * @rules: New amount of rules
+ *
+ * Increase, decrease or maintain tables size depending on new amount of rules,
+ * and copy data over. In case the new size is smaller, throw away data for
+ * highest-numbered rules.
+ *
+ * Return: 0 on success, -ENOMEM on allocation failure.
+ */
+static int pipapo_resize(struct nft_pipapo_field *f, int old_rules, int rules)
+{
+ long *new_lt = NULL, *new_p, *old_lt = f->lt, *old_p;
+ union nft_pipapo_map_bucket *new_mt, *old_mt = f->mt;
+ size_t new_bucket_size, copy;
+ int group, bucket;
+
+ new_bucket_size = DIV_ROUND_UP(rules, BITS_PER_LONG);
+
+ if (new_bucket_size == f->bsize)
+ goto mt;
+
+ if (new_bucket_size > f->bsize)
+ copy = f->bsize;
+ else
+ copy = new_bucket_size;
+
+ new_lt = kvzalloc(f->groups * NFT_PIPAPO_BUCKETS * new_bucket_size *
+ sizeof(*new_lt), GFP_KERNEL);
+ if (!new_lt)
+ return -ENOMEM;
+
+ new_p = new_lt;
+ old_p = old_lt;
+ for (group = 0; group < f->groups; group++) {
+ for (bucket = 0; bucket < NFT_PIPAPO_BUCKETS; bucket++) {
+ memcpy(new_p, old_p, copy * sizeof(*new_p));
+ new_p += copy;
+ old_p += copy;
+
+ if (new_bucket_size > f->bsize)
+ new_p += new_bucket_size - f->bsize;
+ else
+ old_p += f->bsize - new_bucket_size;
+ }
+ }
+
+mt:
+ new_mt = kvmalloc(rules * sizeof(*new_mt), GFP_KERNEL);
+ if (!new_mt) {
+ kvfree(new_lt);
+ return -ENOMEM;
+ }
+
+ memcpy(new_mt, f->mt, min(old_rules, rules) * sizeof(*new_mt));
+ if (rules > old_rules) {
+ memset(new_mt + old_rules, 0,
+ (rules - old_rules) * sizeof(*new_mt));
+ }
+
+ if (new_lt) {
+ f->bsize = new_bucket_size;
+ f->lt = new_lt;
+ kvfree(old_lt);
+ }
+
+ f->mt = new_mt;
+ kvfree(old_mt);
+
+ return 0;
+}
+
+/**
+ * pipapo_bucket_set() - Set rule bit in bucket given group and group value
+ * @f: Field containing lookup table
+ * @rule: Rule index
+ * @group: Group index
+ * @v: Value of bit group
+ */
+static void pipapo_bucket_set(struct nft_pipapo_field *f, int rule, int group,
+ int v)
+{
+ unsigned long *pos;
+
+ pos = f->lt + f->bsize * NFT_PIPAPO_BUCKETS * group;
+ pos += f->bsize * v;
+
+ __set_bit(rule, pos);
+}
+
+/**
+ * pipapo_insert() - Insert new rule in field given input key and mask length
+ * @f: Field containing lookup table
+ * @k: Input key for classification, without nftables padding
+ * @mask_bits: Length of mask; matches field length for non-ranged entry
+ *
+ * Insert a new rule reference in lookup buckets corresponding to k and
+ * mask_bits.
+ *
+ * Return: 1 on success (one rule inserted), negative error code on failure.
+ */
+static int pipapo_insert(struct nft_pipapo_field *f, const uint8_t *k,
+ int mask_bits)
+{
+ int rule = f->rules++, group, ret;
+
+ ret = pipapo_resize(f, f->rules - 1, f->rules);
+ if (ret)
+ return ret;
+
+ for (group = 0; group < f->groups; group++) {
+ int i, v;
+ u8 mask;
+
+ if (group % 2)
+ v = k[group / 2] & 0x0f;
+ else
+ v = k[group / 2] >> 4;
+
+ if (mask_bits >= (group + 1) * 4) {
+ /* Not masked */
+ pipapo_bucket_set(f, rule, group, v);
+ } else if (mask_bits <= group * 4) {
+ /* Completely masked */
+ for (i = 0; i < NFT_PIPAPO_BUCKETS; i++)
+ pipapo_bucket_set(f, rule, group, i);
+ } else {
+ /* The mask limit falls on this group */
+ mask = 0x0f >> (mask_bits - group * 4);
+ for (i = 0; i < NFT_PIPAPO_BUCKETS; i++) {
+ if ((i & ~mask) == (v & ~mask))
+ pipapo_bucket_set(f, rule, group, i);
+ }
+ }
+ }
+
+ return 1;
+}
+
+/**
+ * pipapo_step_diff() - Check if setting @step bit in netmask would change it
+ * @base: Mask we are expanding
+ * @step: Step bit for given expansion step
+ * @len: Total length of mask space (set and unset bits), bytes
+ *
+ * Convenience function for mask expansion.
+ *
+ * Return: true if step bit changes mask (i.e. isn't set), false otherwise.
+ */
+static bool pipapo_step_diff(u8 *base, int step, int len)
+{
+ /* Network order, byte-addressed */
+#ifdef __BIG_ENDIAN__
+ return !(BIT(step % BITS_PER_BYTE) & base[step / BITS_PER_BYTE]);
+#else
+ return !(BIT(step % BITS_PER_BYTE) &
+ base[len - 1 - step / BITS_PER_BYTE]);
+#endif
+}
+
+/**
+ * pipapo_step_after_end() - Check if mask exceeds range end with given step
+ * @base: Mask we are expanding
+ * @end: End of range
+ * @step: Step bit for given expansion step, highest bit to be set
+ * @len: Total length of mask space (set and unset bits), bytes
+ *
+ * Convenience function for mask expansion.
+ *
+ * Return: true if mask exceeds range setting step bits, false otherwise.
+ */
+static bool pipapo_step_after_end(const u8 *base, const u8 *end, int step,
+ int len)
+{
+ u8 tmp[NFT_PIPAPO_MAX_BYTES];
+ int i;
+
+ memcpy(tmp, base, len);
+
+ /* Network order, byte-addressed */
+ for (i = 0; i <= step; i++)
+#ifdef __BIG_ENDIAN__
+ tmp[i / BITS_PER_BYTE] |= BIT(i % BITS_PER_BYTE);
+#else
+ tmp[len - 1 - i / BITS_PER_BYTE] |= BIT(i % BITS_PER_BYTE);
+#endif
+
+ return memcmp(tmp, end, len) > 0;
+}
+
+/**
+ * pipapo_base_sum() - Sum step bit to given len-sized netmask base with carry
+ * @base: Netmask base
+ * @step: Step bit to sum
+ * @len: Netmask length, bytes
+ */
+static void pipapo_base_sum(u8 *base, int step, int len)
+{
+ bool carry = false;
+ int i;
+
+ /* Network order, byte-addressed */
+#ifdef __BIG_ENDIAN__
+ for (i = step / BITS_PER_BYTE; i < len; i++) {
+#else
+ for (i = len - 1 - step / BITS_PER_BYTE; i >= 0; i--) {
+#endif
+ if (carry)
+ base[i]++;
+ else
+ base[i] += 1 << (step % BITS_PER_BYTE);
+
+ if (base[i])
+ break;
+
+ carry = true;
+ }
+}
+
+/**
+ * pipapo_expand() - Expand to composing netmasks, insert into lookup table
+ * @f: Field containing lookup table
+ * @start: Start of range
+ * @end: End of range
+ * @len: Length of value in bits
+ *
+ * Expand range to composing netmasks and insert corresponding rule references
+ * in lookup buckets.
+ *
+ * Return: number of inserted rules on success, negative error code on failure.
+ */
+static int pipapo_expand(struct nft_pipapo_field *f,
+ const u8 *start, const u8 *end, int len)
+{
+ int step, masks = 0, bytes = DIV_ROUND_UP(len, BITS_PER_BYTE);
+ u8 base[NFT_PIPAPO_MAX_BYTES];
+
+ memcpy(base, start, bytes);
+ while (memcmp(base, end, bytes) <= 0) {
+ int err;
+
+ step = 0;
+ while (pipapo_step_diff(base, step, bytes)) {
+ if (pipapo_step_after_end(base, end, step, bytes))
+ break;
+
+ step++;
+ if (step >= len) {
+ if (!masks) {
+ pipapo_insert(f, base, 0);
+ masks = 1;
+ }
+ goto out;
+ }
+ }
+
+ err = pipapo_insert(f, base, len - step);
+
+ if (err < 0)
+ return err;
+
+ masks++;
+ pipapo_base_sum(base, step, bytes);
+ }
+out:
+ return masks;
+}
+
+/**
+ * pipapo_map() - Insert rules in mapping tables, mapping them between fields
+ * @m: Matching data, including mapping table
+ * @map: Table of rule maps: array of first rule and amount of rules
+ * in next field a given rule maps to, for each field
+ * @ext: For last field, nft_set_ext pointer matching rules map to
+ */
+static void pipapo_map(struct nft_pipapo_match *m,
+ union nft_pipapo_map_bucket map[NFT_PIPAPO_MAX_FIELDS],
+ struct nft_pipapo_elem *e)
+{
+ struct nft_pipapo_field *f;
+ int i, j;
+
+ for (i = 0, f = m->f; i < m->field_count - 1; i++, f++) {
+ for (j = 0; j < map[i].n; j++) {
+ f->mt[map[i].to + j].to = map[i + 1].to;
+ f->mt[map[i].to + j].n = map[i + 1].n;
+ }
+ }
+
+ /* Last field: map to ext instead of mapping to next field */
+ for (j = 0; j < map[i].n; j++)
+ f->mt[map[i].to + j].e = e;
+}
+
+/**
+ * pipapo_realloc_scratch() - Reallocate scratch maps for partial match results
+ * @clone: Copy of matching data with pending insertions and deletions
+ * @bsize_max Maximum bucket size, scratch maps cover two buckets
+ *
+ * Return: 0 on success, -ENOMEM on failure.
+ */
+static int pipapo_realloc_scratch(struct nft_pipapo_match *clone,
+ unsigned long bsize_max)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ unsigned long *scratch;
+
+ scratch = kzalloc_node(bsize_max * sizeof(*scratch) * 2,
+ GFP_KERNEL, cpu_to_node(i));
+ if (!scratch) {
+ /* On failure, there's no need to undo previous
+ * allocations: this means that some scratch maps have
+ * a bigger allocated size now (this is only called on
+ * insertion), but the extra space won't be used by any
+ * CPU as new elements are not inserted and m->bsize_max
+ * is not updated.
+ */
+ return -ENOMEM;
+ }
+
+ kfree(*per_cpu_ptr(clone->scratch, i));
+
+ *per_cpu_ptr(clone->scratch, i) = scratch;
+ }
+
+ return 0;
+}
+
+/**
+ * nft_pipapo_insert() - Validate and insert ranged elements
+ * @net: Network namespace
+ * @set: nftables API set representation
+ * @elem: nftables API element representation containing key data
+ * @ext2: Filled with pointer to &struct nft_set_ext in inserted element
+ *
+ * Return: 0 on success, error pointer on failure.
+ */
+static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
+ const struct nft_set_elem *elem,
+ struct nft_set_ext **ext2)
+{
+ const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
+ union nft_pipapo_map_bucket rulemap[NFT_PIPAPO_MAX_FIELDS];
+ const u8 *start = (const u8 *)elem->key.val.data, *end;
+ struct nft_pipapo_elem *e = elem->priv, *dup;
+ struct nft_pipapo *priv = nft_set_priv(set);
+ struct nft_pipapo_match *m = priv->clone;
+ u8 genmask = nft_genmask_next(net);
+ struct nft_pipapo_field *f;
+ int i, bsize_max, err = 0;
+
+ dup = pipapo_get(net, set, start, genmask);
+ if (PTR_ERR(dup) == -ENOENT) {
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_KEY_END)) {
+ end = (const u8 *)nft_set_ext_key_end(ext)->data;
+ dup = pipapo_get(net, set, end, nft_genmask_next(net));
+ } else {
+ end = start;
+ }
+ }
+
+ if (PTR_ERR(dup) != -ENOENT) {
+ if (IS_ERR(dup))
+ return PTR_ERR(dup);
+ *ext2 = &dup->ext;
+ return -EEXIST;
+ }
+
+ /* Validate */
+ nft_pipapo_for_each_field(f, i, m) {
+ const u8 *start_p = start, *end_p = end;
+
+ if (f->rules >= (unsigned long)NFT_PIPAPO_RULE0_MAX)
+ return -ENOSPC;
+
+ if (memcmp(start_p, end_p,
+ f->groups / NFT_PIPAPO_GROUPS_PER_BYTE) > 0)
+ return -EINVAL;
+
+ start_p += NFT_PIPAPO_GROUPS_PADDED_SIZE(f->groups);
+ end_p += NFT_PIPAPO_GROUPS_PADDED_SIZE(f->groups);
+ }
+
+ /* Insert */
+ priv->dirty = true;
+
+ bsize_max = m->bsize_max;
+
+ nft_pipapo_for_each_field(f, i, m) {
+ int ret;
+
+ rulemap[i].to = f->rules;
+
+ ret = memcmp(start, end,
+ f->groups / NFT_PIPAPO_GROUPS_PER_BYTE);
+ if (!ret) {
+ ret = pipapo_insert(f, start,
+ f->groups * NFT_PIPAPO_GROUP_BITS);
+ } else {
+ ret = pipapo_expand(f, start, end,
+ f->groups * NFT_PIPAPO_GROUP_BITS);
+ }
+
+ if (f->bsize > bsize_max)
+ bsize_max = f->bsize;
+
+ rulemap[i].n = ret;
+
+ start += NFT_PIPAPO_GROUPS_PADDED_SIZE(f->groups);
+ end += NFT_PIPAPO_GROUPS_PADDED_SIZE(f->groups);
+ }
+
+ if (!*this_cpu_ptr(m->scratch) || bsize_max > m->bsize_max) {
+ err = pipapo_realloc_scratch(m, bsize_max);
+ if (err)
+ return err;
+
+ this_cpu_write(nft_pipapo_scratch_index, false);
+
+ m->bsize_max = bsize_max;
+ }
+
+ *ext2 = &e->ext;
+
+ pipapo_map(m, rulemap, e);
+
+ return 0;
+}
+
+/**
+ * pipapo_clone() - Clone matching data to create new working copy
+ * @old: Existing matching data
+ *
+ * Return: copy of matching data passed as 'old', error pointer on failure
+ */
+static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old)
+{
+ struct nft_pipapo_field *dst, *src;
+ struct nft_pipapo_match *new;
+ int i;
+
+ new = kmalloc(sizeof(*new) + sizeof(*dst) * old->field_count,
+ GFP_KERNEL);
+ if (!new)
+ return ERR_PTR(-ENOMEM);
+
+ new->field_count = old->field_count;
+ new->bsize_max = old->bsize_max;
+
+ new->scratch = alloc_percpu(*new->scratch);
+ if (!new->scratch)
+ goto out_scratch;
+
+ rcu_head_init(&new->rcu);
+
+ src = old->f;
+ dst = new->f;
+
+ for (i = 0; i < old->field_count; i++) {
+ memcpy(dst, src, offsetof(struct nft_pipapo_field, lt));
+
+ dst->lt = kvzalloc(src->groups * NFT_PIPAPO_BUCKETS *
+ src->bsize * sizeof(*dst->lt),
+ GFP_KERNEL);
+ if (!dst->lt)
+ goto out_lt;
+
+ memcpy(dst->lt, src->lt,
+ src->bsize * sizeof(*dst->lt) *
+ src->groups * NFT_PIPAPO_BUCKETS);
+
+ dst->mt = kvmalloc(src->rules * sizeof(*src->mt), GFP_KERNEL);
+ if (!dst->mt)
+ goto out_mt;
+
+ memcpy(dst->mt, src->mt, src->rules * sizeof(*src->mt));
+ src++;
+ dst++;
+ }
+
+ return new;
+
+out_mt:
+ kvfree(dst->lt);
+out_lt:
+ for (dst--; i > 0; i--) {
+ kvfree(dst->mt);
+ kvfree(dst->lt);
+ dst--;
+ }
+ free_percpu(new->scratch);
+out_scratch:
+ kfree(new);
+
+ return ERR_PTR(-ENOMEM);
+}
+
+/**
+ * pipapo_rules_same_key() - Get number of rules originated from the same entry
+ * @f: Field containing mapping table
+ * @first: Index of first rule in set of rules mapping to same entry
+ *
+ * Using the fact that all rules in a field that originated from the same entry
+ * will map to the same set of rules in the next field, or to the same element
+ * reference, return the cardinality of the set of rules that originated from
+ * the same entry as the rule with index @first, @first rule included.
+ *
+ * In pictures:
+ * rules
+ * field #0 0 1 2 3 4
+ * map to: 0 1 2-4 2-4 5-9
+ * . . ....... . ...
+ * | | | | \ \
+ * | | | | \ \
+ * | | | | \ \
+ * ' ' ' ' ' \
+ * in field #1 0 1 2 3 4 5 ...
+ *
+ * if this is called for rule 2 on field #0, it will return 3, as also rules 2
+ * and 3 in field 0 map to the same set of rules (2, 3, 4) in the next field.
+ *
+ * For the last field in a set, we can rely on associated entries to map to the
+ * same element references.
+ *
+ * Return: Number of rules that originated from the same entry as @first.
+ */
+static int pipapo_rules_same_key(struct nft_pipapo_field *f, int first)
+{
+ struct nft_pipapo_elem *e = NULL; /* Keep gcc happy */
+ int r;
+
+ for (r = first; r < f->rules; r++) {
+ if (r != first && e != f->mt[r].e)
+ return r - first;
+
+ e = f->mt[r].e;
+ }
+
+ if (r != first)
+ return r - first;
+
+ return 0;
+}
+
+/**
+ * pipapo_unmap() - Remove rules from mapping tables, renumber remaining ones
+ * @mt: Mapping array
+ * @rules: Original amount of rules in mapping table
+ * @start: First rule index to be removed
+ * @n: Amount of rules to be removed
+ * @to_offset: First rule index, in next field, this group of rules maps to
+ * @is_last: If this is the last field, delete reference from mapping array
+ *
+ * This is used to unmap rules from the mapping table for a single field,
+ * maintaining consistency and compactness for the existing ones.
+ *
+ * In pictures: let's assume that we want to delete rules 2 and 3 from the
+ * following mapping array:
+ *
+ * rules
+ * 0 1 2 3 4
+ * map to: 4-10 4-10 11-15 11-15 16-18
+ *
+ * the result will be:
+ *
+ * rules
+ * 0 1 2
+ * map to: 4-10 4-10 11-13
+ *
+ * for fields before the last one. In case this is the mapping table for the
+ * last field in a set, and rules map to pointers to &struct nft_pipapo_elem:
+ *
+ * rules
+ * 0 1 2 3 4
+ * element pointers: 0x42 0x42 0x33 0x33 0x44
+ *
+ * the result will be:
+ *
+ * rules
+ * 0 1 2
+ * element pointers: 0x42 0x42 0x44
+ */
+static void pipapo_unmap(union nft_pipapo_map_bucket *mt, int rules,
+ int start, int n, int to_offset, bool is_last)
+{
+ int i;
+
+ memmove(mt + start, mt + start + n, (rules - start - n) * sizeof(*mt));
+ memset(mt + rules - n, 0, n * sizeof(*mt));
+
+ if (is_last)
+ return;
+
+ for (i = start; i < rules - n; i++)
+ mt[i].to -= to_offset;
+}
+
+/**
+ * pipapo_drop() - Delete entry from lookup and mapping tables, given rule map
+ * @m: Matching data
+ * @rulemap Table of rule maps, arrays of first rule and amount of rules
+ * in next field a given entry maps to, for each field
+ *
+ * For each rule in lookup table buckets mapping to this set of rules, drop
+ * all bits set in lookup table mapping. In pictures, assuming we want to drop
+ * rules 0 and 1 from this lookup table:
+ *
+ * bucket
+ * group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ * 0 0 1,2
+ * 1 1,2 0
+ * 2 0 1,2
+ * 3 0 1,2
+ * 4 0,1,2
+ * 5 0 1 2
+ * 6 0,1,2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
+ * 7 1,2 1,2 1 1 1 0,1 1 1 1 1 1 1 1 1 1 1
+ *
+ * rule 2 becomes rule 0, and the result will be:
+ *
+ * bucket
+ * group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ * 0 0
+ * 1 0
+ * 2 0
+ * 3 0
+ * 4 0
+ * 5 0
+ * 6 0
+ * 7 0 0
+ *
+ * once this is done, call unmap() to drop all the corresponding rule references
+ * from mapping tables.
+ */
+static void pipapo_drop(struct nft_pipapo_match *m,
+ union nft_pipapo_map_bucket rulemap[])
+{
+ struct nft_pipapo_field *f;
+ int i;
+
+ nft_pipapo_for_each_field(f, i, m) {
+ int g;
+
+ for (g = 0; g < f->groups; g++) {
+ unsigned long *pos;
+ int b;
+
+ pos = f->lt + g * NFT_PIPAPO_BUCKETS * f->bsize;
+
+ for (b = 0; b < NFT_PIPAPO_BUCKETS; b++) {
+ bitmap_cut(pos, pos, rulemap[i].to,
+ rulemap[i].n,
+ f->bsize * BITS_PER_LONG);
+
+ pos += f->bsize;
+ }
+ }
+
+ pipapo_unmap(f->mt, f->rules, rulemap[i].to, rulemap[i].n,
+ rulemap[i + 1].n, i == m->field_count - 1);
+ if (pipapo_resize(f, f->rules, f->rules - rulemap[i].n)) {
+ /* We can ignore this, a failure to shrink tables down
+ * doesn't make tables invalid.
+ */
+ ;
+ }
+ f->rules -= rulemap[i].n;
+ }
+}
+
+/**
+ * pipapo_gc() - Drop expired entries from set, destroy start and end elements
+ * @set: nftables API set representation
+ * @m: Matching data
+ */
+static void pipapo_gc(const struct nft_set *set, struct nft_pipapo_match *m)
+{
+ struct nft_pipapo *priv = nft_set_priv(set);
+ int rules_f0, first_rule = 0;
+
+ while ((rules_f0 = pipapo_rules_same_key(m->f, first_rule))) {
+ union nft_pipapo_map_bucket rulemap[NFT_PIPAPO_MAX_FIELDS];
+ struct nft_pipapo_field *f;
+ struct nft_pipapo_elem *e;
+ int i, start, rules_fx;
+
+ start = first_rule;
+ rules_fx = rules_f0;
+
+ nft_pipapo_for_each_field(f, i, m) {
+ rulemap[i].to = start;
+ rulemap[i].n = rules_fx;
+
+ if (i < m->field_count - 1) {
+ rules_fx = f->mt[start].n;
+ start = f->mt[start].to;
+ }
+ }
+
+ /* Pick the last field, and its last index */
+ f--;
+ i--;
+ e = f->mt[rulemap[i].to].e;
+ if (nft_set_elem_expired(&e->ext) &&
+ !nft_set_elem_mark_busy(&e->ext)) {
+ priv->dirty = true;
+ pipapo_drop(m, rulemap);
+
+ rcu_barrier();
+ nft_set_elem_destroy(set, e, true);
+
+ /* And check again current first rule, which is now the
+ * first we haven't checked.
+ */
+ } else {
+ first_rule += rules_f0;
+ }
+ }
+
+ priv->last_gc = jiffies;
+}
+
+/**
+ * pipapo_free_fields() - Free per-field tables contained in matching data
+ * @m: Matching data
+ */
+static void pipapo_free_fields(struct nft_pipapo_match *m)
+{
+ struct nft_pipapo_field *f;
+ int i;
+
+ nft_pipapo_for_each_field(f, i, m) {
+ kvfree(f->lt);
+ kvfree(f->mt);
+ }
+}
+
+/**
+ * pipapo_reclaim_match - RCU callback to free fields from old matching data
+ * @rcu: RCU head
+ */
+static void pipapo_reclaim_match(struct rcu_head *rcu)
+{
+ struct nft_pipapo_match *m;
+ int i;
+
+ m = container_of(rcu, struct nft_pipapo_match, rcu);
+
+ for_each_possible_cpu(i)
+ kfree(*per_cpu_ptr(m->scratch, i));
+
+ free_percpu(m->scratch);
+
+ pipapo_free_fields(m);
+
+ kfree(m);
+}
+
+/**
+ * pipapo_commit() - Replace lookup data with current working copy
+ * @set: nftables API set representation
+ *
+ * While at it, check if we should perform garbage collection on the working
+ * copy before committing it for lookup, and don't replace the table if the
+ * working copy doesn't have pending changes.
+ *
+ * We also need to create a new working copy for subsequent insertions and
+ * deletions.
+ */
+static void pipapo_commit(const struct nft_set *set)
+{
+ struct nft_pipapo *priv = nft_set_priv(set);
+ struct nft_pipapo_match *new_clone, *old;
+
+ if (time_after_eq(jiffies, priv->last_gc + nft_set_gc_interval(set)))
+ pipapo_gc(set, priv->clone);
+
+ if (!priv->dirty)
+ return;
+
+ new_clone = pipapo_clone(priv->clone);
+ if (IS_ERR(new_clone))
+ return;
+
+ priv->dirty = false;
+
+ old = rcu_access_pointer(priv->match);
+ rcu_assign_pointer(priv->match, priv->clone);
+ if (old)
+ call_rcu(&old->rcu, pipapo_reclaim_match);
+
+ priv->clone = new_clone;
+}
+
+/**
+ * nft_pipapo_activate() - Mark element reference as active given key, commit
+ * @net: Network namespace
+ * @set: nftables API set representation
+ * @elem: nftables API element representation containing key data
+ *
+ * On insertion, elements are added to a copy of the matching data currently
+ * in use for lookups, and not directly inserted into current lookup data, so
+ * we'll take care of that by calling pipapo_commit() here. Both
+ * nft_pipapo_insert() and nft_pipapo_activate() are called once for each
+ * element, hence we can't purpose either one as a real commit operation.
+ */
+static void nft_pipapo_activate(const struct net *net,
+ const struct nft_set *set,
+ const struct nft_set_elem *elem)
+{
+ struct nft_pipapo_elem *e;
+
+ e = pipapo_get(net, set, (const u8 *)elem->key.val.data, 0);
+ if (IS_ERR(e))
+ return;
+
+ nft_set_elem_change_active(net, set, &e->ext);
+ nft_set_elem_clear_busy(&e->ext);
+
+ pipapo_commit(set);
+}
+
+/**
+ * pipapo_deactivate() - Check that element is in set, mark as inactive
+ * @net: Network namespace
+ * @set: nftables API set representation
+ * @data: Input key data
+ * @ext: nftables API extension pointer, used to check for end element
+ *
+ * This is a convenience function that can be called from both
+ * nft_pipapo_deactivate() and nft_pipapo_flush(), as they are in fact the same
+ * operation.
+ *
+ * Return: deactivated element if found, NULL otherwise.
+ */
+static void *pipapo_deactivate(const struct net *net, const struct nft_set *set,
+ const u8 *data, const struct nft_set_ext *ext)
+{
+ struct nft_pipapo_elem *e;
+
+ e = pipapo_get(net, set, data, nft_genmask_next(net));
+ if (IS_ERR(e))
+ return NULL;
+
+ nft_set_elem_change_active(net, set, &e->ext);
+
+ return e;
+}
+
+/**
+ * nft_pipapo_deactivate() - Call pipapo_deactivate() to make element inactive
+ * @net: Network namespace
+ * @set: nftables API set representation
+ * @elem: nftables API element representation containing key data
+ *
+ * Return: deactivated element if found, NULL otherwise.
+ */
+static void *nft_pipapo_deactivate(const struct net *net,
+ const struct nft_set *set,
+ const struct nft_set_elem *elem)
+{
+ const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
+
+ return pipapo_deactivate(net, set, (const u8 *)elem->key.val.data, ext);
+}
+
+/**
+ * nft_pipapo_flush() - Call pipapo_deactivate() to make element inactive
+ * @net: Network namespace
+ * @set: nftables API set representation
+ * @elem: nftables API element representation containing key data
+ *
+ * This is functionally the same as nft_pipapo_deactivate(), with a slightly
+ * different interface, and it's also called once for each element in a set
+ * being flushed, so we can't implement, strictly speaking, a flush operation,
+ * which would otherwise be as simple as allocating an empty copy of the
+ * matching data.
+ *
+ * Note that we could in theory do that, mark the set as flushed, and ignore
+ * subsequent calls, but we would leak all the elements after the first one,
+ * because they wouldn't then be freed as result of API calls.
+ *
+ * Return: true if element was found and deactivated.
+ */
+static bool nft_pipapo_flush(const struct net *net, const struct nft_set *set,
+ void *elem)
+{
+ struct nft_pipapo_elem *e = elem;
+
+ return pipapo_deactivate(net, set, (const u8 *)nft_set_ext_key(&e->ext),
+ &e->ext);
+}
+
+/**
+ * pipapo_get_boundaries() - Get byte interval for associated rules
+ * @f: Field including lookup table
+ * @first_rule: First rule (lowest index)
+ * @rule_count: Number of associated rules
+ * @left: Byte expression for left boundary (start of range)
+ * @right: Byte expression for right boundary (end of range)
+ *
+ * Given the first rule and amount of rules that originated from the same entry,
+ * build the original range associated with the entry, and calculate the length
+ * of the originating netmask.
+ *
+ * In pictures:
+ *
+ * bucket
+ * group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ * 0 1,2
+ * 1 1,2
+ * 2 1,2
+ * 3 1,2
+ * 4 1,2
+ * 5 1 2
+ * 6 1,2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
+ * 7 1,2 1,2 1 1 1 1 1 1 1 1 1 1 1 1 1 1
+ *
+ * this is the lookup table corresponding to the IPv4 range
+ * 192.168.1.0-192.168.2.1, which was expanded to the two composing netmasks,
+ * rule #1: 192.168.1.0/24, and rule #2: 192.168.2.0/31.
+ *
+ * This function fills @left and @right with the byte values of the leftmost
+ * and rightmost bucket indices for the lowest and highest rule indices,
+ * respectively. If @first_rule is 1 and @rule_count is 2, we obtain, in
+ * nibbles:
+ * left: < 12, 0, 10, 8, 0, 1, 0, 0 >
+ * right: < 12, 0, 10, 8, 0, 2, 2, 1 >
+ * corresponding to bytes:
+ * left: < 192, 168, 1, 0 >
+ * right: < 192, 168, 2, 1 >
+ * with mask length irrelevant here, unused on return, as the range is already
+ * defined by its start and end points. The mask length is relevant for a single
+ * ranged entry instead: if @first_rule is 1 and @rule_count is 1, we ignore
+ * rule 2 above: @left becomes < 192, 168, 1, 0 >, @right becomes
+ * < 192, 168, 1, 255 >, and the mask length, calculated from the distances
+ * between leftmost and rightmost bucket indices for each group, would be 24.
+ *
+ * Return: mask length, in bits.
+ */
+static int pipapo_get_boundaries(struct nft_pipapo_field *f, int first_rule,
+ int rule_count, u8 *left, u8 *right)
+{
+ u8 *l = left, *r = right;
+ int g, mask_len = 0;
+
+ for (g = 0; g < f->groups; g++) {
+ int b, x0, x1;
+
+ x0 = -1;
+ x1 = -1;
+ for (b = 0; b < NFT_PIPAPO_BUCKETS; b++) {
+ unsigned long *pos;
+
+ pos = f->lt + (g * NFT_PIPAPO_BUCKETS + b) * f->bsize;
+ if (test_bit(first_rule, pos) && x0 == -1)
+ x0 = b;
+ if (test_bit(first_rule + rule_count - 1, pos))
+ x1 = b;
+ }
+
+ if (g % 2) {
+ *(l++) |= x0 & 0x0f;
+ *(r++) |= x1 & 0x0f;
+ } else {
+ *l |= x0 << 4;
+ *r |= x1 << 4;
+ }
+
+ if (x1 - x0 == 0)
+ mask_len += 4;
+ else if (x1 - x0 == 1)
+ mask_len += 3;
+ else if (x1 - x0 == 3)
+ mask_len += 2;
+ else if (x1 - x0 == 7)
+ mask_len += 1;
+ }
+
+ return mask_len;
+}
+
+/**
+ * pipapo_match_field() - Match rules against byte ranges
+ * @f: Field including the lookup table
+ * @first_rule: First of associated rules originating from same entry
+ * @rule_count: Amount of associated rules
+ * @start: Start of range to be matched
+ * @end: End of range to be matched
+ *
+ * Return: true on match, false otherwise.
+ */
+static bool pipapo_match_field(struct nft_pipapo_field *f,
+ int first_rule, int rule_count,
+ const u8 *start, const u8 *end)
+{
+ u8 right[NFT_PIPAPO_MAX_BYTES] = { 0 };
+ u8 left[NFT_PIPAPO_MAX_BYTES] = { 0 };
+
+ pipapo_get_boundaries(f, first_rule, rule_count, left, right);
+
+ return !memcmp(start, left, f->groups / NFT_PIPAPO_GROUPS_PER_BYTE) &&
+ !memcmp(end, right, f->groups / NFT_PIPAPO_GROUPS_PER_BYTE);
+}
+
+/**
+ * nft_pipapo_remove() - Remove element given key, commit
+ * @net: Network namespace
+ * @set: nftables API set representation
+ * @elem: nftables API element representation containing key data
+ *
+ * Similarly to nft_pipapo_activate(), this is used as commit operation by the
+ * API, but it's called once per element in the pending transaction, so we can't
+ * implement this as a single commit operation. Closest we can get is to remove
+ * the matched element here, if any, and commit the updated matching data.
+ */
+static void nft_pipapo_remove(const struct net *net, const struct nft_set *set,
+ const struct nft_set_elem *elem)
+{
+ struct nft_pipapo *priv = nft_set_priv(set);
+ struct nft_pipapo_match *m = priv->clone;
+ struct nft_pipapo_elem *e = elem->priv;
+ int rules_f0, first_rule = 0;
+ const u8 *data;
+
+ data = (const u8 *)nft_set_ext_key(&e->ext);
+
+ e = pipapo_get(net, set, data, 0);
+ if (IS_ERR(e))
+ return;
+
+ while ((rules_f0 = pipapo_rules_same_key(m->f, first_rule))) {
+ union nft_pipapo_map_bucket rulemap[NFT_PIPAPO_MAX_FIELDS];
+ const u8 *match_start, *match_end;
+ struct nft_pipapo_field *f;
+ int i, start, rules_fx;
+
+ match_start = data;
+ match_end = (const u8 *)nft_set_ext_key_end(&e->ext)->data;
+
+ start = first_rule;
+ rules_fx = rules_f0;
+
+ nft_pipapo_for_each_field(f, i, m) {
+ if (!pipapo_match_field(f, start, rules_fx,
+ match_start, match_end))
+ break;
+
+ rulemap[i].to = start;
+ rulemap[i].n = rules_fx;
+
+ rules_fx = f->mt[start].n;
+ start = f->mt[start].to;
+
+ match_start += NFT_PIPAPO_GROUPS_PADDED_SIZE(f->groups);
+ match_end += NFT_PIPAPO_GROUPS_PADDED_SIZE(f->groups);
+ }
+
+ if (i == m->field_count) {
+ priv->dirty = true;
+ pipapo_drop(m, rulemap);
+ pipapo_commit(set);
+ return;
+ }
+
+ first_rule += rules_f0;
+ }
+}
+
+/**
+ * nft_pipapo_walk() - Walk over elements
+ * @ctx: nftables API context
+ * @set: nftables API set representation
+ * @iter: Iterator
+ *
+ * As elements are referenced in the mapping array for the last field, directly
+ * scan that array: there's no need to follow rule mappings from the first
+ * field.
+ */
+static void nft_pipapo_walk(const struct nft_ctx *ctx, struct nft_set *set,
+ struct nft_set_iter *iter)
+{
+ struct nft_pipapo *priv = nft_set_priv(set);
+ struct nft_pipapo_match *m;
+ struct nft_pipapo_field *f;
+ int i, r;
+
+ rcu_read_lock();
+ m = rcu_dereference(priv->match);
+
+ if (unlikely(!m))
+ goto out;
+
+ for (i = 0, f = m->f; i < m->field_count - 1; i++, f++)
+ ;
+
+ for (r = 0; r < f->rules; r++) {
+ struct nft_pipapo_elem *e;
+ struct nft_set_elem elem;
+
+ if (r < f->rules - 1 && f->mt[r + 1].e == f->mt[r].e)
+ continue;
+
+ if (iter->count < iter->skip)
+ goto cont;
+
+ e = f->mt[r].e;
+ if (nft_set_elem_expired(&e->ext))
+ goto cont;
+
+ elem.priv = e;
+
+ iter->err = iter->fn(ctx, set, iter, &elem);
+ if (iter->err < 0)
+ goto out;
+
+cont:
+ iter->count++;
+ }
+
+out:
+ rcu_read_unlock();
+}
+
+/**
+ * nft_pipapo_privsize() - Return the size of private data for the set
+ * @nla: netlink attributes, ignored as size doesn't depend on them
+ * @desc: Set description, ignored as size doesn't depend on it
+ *
+ * Return: size of private data for this set implementation, in bytes
+ */
+static u64 nft_pipapo_privsize(const struct nlattr * const nla[],
+ const struct nft_set_desc *desc)
+{
+ return sizeof(struct nft_pipapo);
+}
+
+/**
+ * nft_pipapo_estimate() - Estimate set size, space and lookup complexity
+ * @desc: Set description, element count and field description used here
+ * @features: Flags: NFT_SET_INTERVAL needs to be there
+ * @est: Storage for estimation data
+ *
+ * The size for this set type can vary dramatically, as it depends on the number
+ * of rules (composing netmasks) the entries expand to. We compute the worst
+ * case here.
+ *
+ * In general, for a non-ranged entry or a single composing netmask, we need
+ * one bit in each of the sixteen NFT_PIPAPO_BUCKETS, for each 4-bit group (that
+ * is, each input bit needs four bits of matching data), plus a bucket in the
+ * mapping table for each field.
+ *
+ * Return: true only for compatible range concatenations
+ */
+static bool nft_pipapo_estimate(const struct nft_set_desc *desc, u32 features,
+ struct nft_set_estimate *est)
+{
+ unsigned long entry_size;
+ int i;
+
+ if (!(features & NFT_SET_INTERVAL) || desc->field_count <= 1)
+ return false;
+
+ for (i = 0, entry_size = 0; i < desc->field_count; i++) {
+ unsigned long rules;
+
+ if (desc->field_len[i] > NFT_PIPAPO_MAX_BYTES)
+ return false;
+
+ /* Worst-case ranges for each concatenated field: each n-bit
+ * field can expand to up to n * 2 rules in each bucket, and
+ * each rule also needs a mapping bucket.
+ */
+ rules = ilog2(desc->field_len[i] * BITS_PER_BYTE) * 2;
+ entry_size += rules * NFT_PIPAPO_BUCKETS / BITS_PER_BYTE;
+ entry_size += rules * sizeof(union nft_pipapo_map_bucket);
+ }
+
+ /* Rules in lookup and mapping tables are needed for each entry */
+ est->size = desc->size * entry_size;
+ if (est->size && div_u64(est->size, desc->size) != entry_size)
+ return false;
+
+ est->size += sizeof(struct nft_pipapo) +
+ sizeof(struct nft_pipapo_match) * 2;
+
+ est->size += sizeof(struct nft_pipapo_field) * desc->field_count;
+
+ est->lookup = NFT_SET_CLASS_O_LOG_N;
+
+ est->space = NFT_SET_CLASS_O_N;
+
+ return true;
+}
+
+/**
+ * nft_pipapo_init() - Initialise data for a set instance
+ * @set: nftables API set representation
+ * @desc: Set description
+ * @nla: netlink attributes
+ *
+ * Validate number and size of fields passed as NFTA_SET_DESC_CONCAT netlink
+ * attributes, initialise internal set parameters, current instance of matching
+ * data and a copy for subsequent insertions.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int nft_pipapo_init(const struct nft_set *set,
+ const struct nft_set_desc *desc,
+ const struct nlattr * const nla[])
+{
+ struct nft_pipapo *priv = nft_set_priv(set);
+ struct nft_pipapo_match *m;
+ struct nft_pipapo_field *f;
+ int err, i;
+
+ if (desc->field_count > NFT_PIPAPO_MAX_FIELDS)
+ return -EINVAL;
+
+ m = kmalloc(sizeof(*priv->match) + sizeof(*f) * desc->field_count,
+ GFP_KERNEL);
+ if (!m)
+ return -ENOMEM;
+
+ m->field_count = desc->field_count;
+ m->bsize_max = 0;
+
+ m->scratch = alloc_percpu(unsigned long *);
+ if (!m->scratch) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+ for_each_possible_cpu(i)
+ *per_cpu_ptr(m->scratch, i) = NULL;
+
+ rcu_head_init(&m->rcu);
+
+ nft_pipapo_for_each_field(f, i, m) {
+ f->groups = desc->field_len[i] * NFT_PIPAPO_GROUPS_PER_BYTE;
+ priv->groups += f->groups;
+
+ priv->width += round_up(desc->field_len[i], sizeof(u32));
+
+ f->bsize = 0;
+ f->rules = 0;
+ f->lt = NULL;
+ f->mt = NULL;
+ }
+
+ /* Create an initial clone of matching data for next insertion */
+ priv->clone = pipapo_clone(m);
+ if (IS_ERR(priv->clone)) {
+ err = PTR_ERR(priv->clone);
+ goto out_free;
+ }
+
+ priv->dirty = false;
+
+ rcu_assign_pointer(priv->match, m);
+
+ return 0;
+
+out_free:
+ free_percpu(m->scratch);
+ kfree(m);
+
+ return err;
+}
+
+/**
+ * nft_pipapo_destroy() - Free private data for set and all committed elements
+ * @set: nftables API set representation
+ */
+static void nft_pipapo_destroy(const struct nft_set *set)
+{
+ struct nft_pipapo *priv = nft_set_priv(set);
+ struct nft_pipapo_match *m;
+ struct nft_pipapo_field *f;
+ int i, r, cpu;
+
+ m = rcu_dereference_protected(priv->match, true);
+ if (m) {
+ rcu_barrier();
+
+ for (i = 0, f = m->f; i < m->field_count - 1; i++, f++)
+ ;
+
+ for (r = 0; r < f->rules; r++) {
+ struct nft_pipapo_elem *e;
+
+ if (r < f->rules - 1 && f->mt[r + 1].e == f->mt[r].e)
+ continue;
+
+ e = f->mt[r].e;
+
+ nft_set_elem_destroy(set, e, true);
+ }
+
+ for_each_possible_cpu(cpu)
+ kfree(*per_cpu_ptr(m->scratch, cpu));
+ free_percpu(m->scratch);
+
+ pipapo_free_fields(m);
+ kfree(m);
+ priv->match = NULL;
+ }
+
+ if (priv->clone) {
+ for_each_possible_cpu(cpu)
+ kfree(*per_cpu_ptr(priv->clone->scratch, cpu));
+ free_percpu(priv->clone->scratch);
+
+ pipapo_free_fields(priv->clone);
+ kfree(priv->clone);
+ priv->clone = NULL;
+ }
+}
+
+/**
+ * nft_pipapo_gc_init() - Initialise garbage collection
+ * @set: nftables API set representation
+ *
+ * Instead of actually setting up a periodic work for garbage collection, as
+ * this operation requires a swap of matching data with the working copy, we'll
+ * do that opportunistically with other commit operations if the interval is
+ * elapsed, so we just need to set the current jiffies timestamp here.
+ */
+static void nft_pipapo_gc_init(const struct nft_set *set)
+{
+ struct nft_pipapo *priv = nft_set_priv(set);
+
+ priv->last_gc = jiffies;
+}
+
+struct nft_set_type nft_set_pipapo_type __read_mostly = {
+ .owner = THIS_MODULE,
+ .features = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT |
+ NFT_SET_TIMEOUT,
+ .ops = {
+ .lookup = nft_pipapo_lookup,
+ .insert = nft_pipapo_insert,
+ .activate = nft_pipapo_activate,
+ .deactivate = nft_pipapo_deactivate,
+ .flush = nft_pipapo_flush,
+ .remove = nft_pipapo_remove,
+ .walk = nft_pipapo_walk,
+ .get = nft_pipapo_get,
+ .privsize = nft_pipapo_privsize,
+ .estimate = nft_pipapo_estimate,
+ .init = nft_pipapo_init,
+ .destroy = nft_pipapo_destroy,
+ .gc_init = nft_pipapo_gc_init,
+ .elemsize = offsetof(struct nft_pipapo_elem, ext),
+ },
+};
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index a9f804f7a04a..5000b938ab1e 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -466,6 +466,9 @@ static void nft_rbtree_destroy(const struct nft_set *set)
static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
struct nft_set_estimate *est)
{
+ if (desc->field_count > 1)
+ return false;
+
if (desc->size)
est->size = sizeof(struct nft_rbtree) +
desc->size * sizeof(struct nft_rbtree_elem);
diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c
index 5284fcf16be7..764e88682a81 100644
--- a/net/netfilter/nft_tunnel.c
+++ b/net/netfilter/nft_tunnel.c
@@ -248,8 +248,9 @@ static int nft_tunnel_obj_vxlan_init(const struct nlattr *attr,
}
static const struct nla_policy nft_tunnel_opts_erspan_policy[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1] = {
+ [NFTA_TUNNEL_KEY_ERSPAN_VERSION] = { .type = NLA_U32 },
[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX] = { .type = NLA_U32 },
- [NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] = { .type = NLA_U8 },
+ [NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] = { .type = NLA_U8 },
[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID] = { .type = NLA_U8 },
};
@@ -338,6 +339,8 @@ static const struct nla_policy nft_tunnel_key_policy[NFTA_TUNNEL_KEY_MAX + 1] =
[NFTA_TUNNEL_KEY_FLAGS] = { .type = NLA_U32, },
[NFTA_TUNNEL_KEY_TOS] = { .type = NLA_U8, },
[NFTA_TUNNEL_KEY_TTL] = { .type = NLA_U8, },
+ [NFTA_TUNNEL_KEY_SPORT] = { .type = NLA_U16, },
+ [NFTA_TUNNEL_KEY_DPORT] = { .type = NLA_U16, },
[NFTA_TUNNEL_KEY_OPTS] = { .type = NLA_NESTED, },
};
@@ -445,10 +448,15 @@ static int nft_tunnel_ip_dump(struct sk_buff *skb, struct ip_tunnel_info *info)
if (!nest)
return -1;
- if (nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_SRC, &info->key.u.ipv6.src) < 0 ||
- nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_DST, &info->key.u.ipv6.dst) < 0 ||
- nla_put_be32(skb, NFTA_TUNNEL_KEY_IP6_FLOWLABEL, info->key.label))
+ if (nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_SRC,
+ &info->key.u.ipv6.src) < 0 ||
+ nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_DST,
+ &info->key.u.ipv6.dst) < 0 ||
+ nla_put_be32(skb, NFTA_TUNNEL_KEY_IP6_FLOWLABEL,
+ info->key.label)) {
+ nla_nest_cancel(skb, nest);
return -1;
+ }
nla_nest_end(skb, nest);
} else {
@@ -456,9 +464,13 @@ static int nft_tunnel_ip_dump(struct sk_buff *skb, struct ip_tunnel_info *info)
if (!nest)
return -1;
- if (nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_SRC, info->key.u.ipv4.src) < 0 ||
- nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_DST, info->key.u.ipv4.dst) < 0)
+ if (nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_SRC,
+ info->key.u.ipv4.src) < 0 ||
+ nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_DST,
+ info->key.u.ipv4.dst) < 0) {
+ nla_nest_cancel(skb, nest);
return -1;
+ }
nla_nest_end(skb, nest);
}
@@ -470,42 +482,58 @@ static int nft_tunnel_opts_dump(struct sk_buff *skb,
struct nft_tunnel_obj *priv)
{
struct nft_tunnel_opts *opts = &priv->opts;
- struct nlattr *nest;
+ struct nlattr *nest, *inner;
nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS);
if (!nest)
return -1;
if (opts->flags & TUNNEL_VXLAN_OPT) {
+ inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_VXLAN);
+ if (!inner)
+ goto failure;
if (nla_put_be32(skb, NFTA_TUNNEL_KEY_VXLAN_GBP,
htonl(opts->u.vxlan.gbp)))
- return -1;
+ goto inner_failure;
+ nla_nest_end(skb, inner);
} else if (opts->flags & TUNNEL_ERSPAN_OPT) {
+ inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_ERSPAN);
+ if (!inner)
+ goto failure;
+ if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_VERSION,
+ htonl(opts->u.erspan.version)))
+ goto inner_failure;
switch (opts->u.erspan.version) {
case ERSPAN_VERSION:
if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX,
opts->u.erspan.u.index))
- return -1;
+ goto inner_failure;
break;
case ERSPAN_VERSION2:
if (nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_HWID,
get_hwid(&opts->u.erspan.u.md2)) ||
nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_DIR,
opts->u.erspan.u.md2.dir))
- return -1;
+ goto inner_failure;
break;
}
+ nla_nest_end(skb, inner);
}
nla_nest_end(skb, nest);
-
return 0;
+
+inner_failure:
+ nla_nest_cancel(skb, inner);
+failure:
+ nla_nest_cancel(skb, nest);
+ return -1;
}
static int nft_tunnel_ports_dump(struct sk_buff *skb,
struct ip_tunnel_info *info)
{
- if (nla_put_be16(skb, NFTA_TUNNEL_KEY_SPORT, htons(info->key.tp_src)) < 0 ||
- nla_put_be16(skb, NFTA_TUNNEL_KEY_DPORT, htons(info->key.tp_dst)) < 0)
+ if (nla_put_be16(skb, NFTA_TUNNEL_KEY_SPORT, info->key.tp_src) < 0 ||
+ nla_put_be16(skb, NFTA_TUNNEL_KEY_DPORT, info->key.tp_dst) < 0)
return -1;
return 0;
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index ce70c2576bb2..cd2b034eef59 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -939,14 +939,14 @@ EXPORT_SYMBOL(xt_check_entry_offsets);
*
* @size: number of entries
*
- * Return: NULL or kmalloc'd or vmalloc'd array
+ * Return: NULL or zeroed kmalloc'd or vmalloc'd array
*/
unsigned int *xt_alloc_entry_offsets(unsigned int size)
{
if (size > XT_MAX_TABLE_SIZE / sizeof(unsigned int))
return NULL;
- return kvmalloc_array(size, sizeof(unsigned int), GFP_KERNEL | __GFP_ZERO);
+ return kvcalloc(size, sizeof(unsigned int), GFP_KERNEL);
}
EXPORT_SYMBOL(xt_alloc_entry_offsets);
@@ -1551,6 +1551,9 @@ static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
uint8_t nfproto = (unsigned long)PDE_DATA(file_inode(seq->file));
struct nf_mttg_trav *trav = seq->private;
+ if (ppos != NULL)
+ ++(*ppos);
+
switch (trav->class) {
case MTTG_TRAV_INIT:
trav->class = MTTG_TRAV_NFP_UNSPEC;
@@ -1576,9 +1579,6 @@ static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
default:
return NULL;
}
-
- if (ppos != NULL)
- ++*ppos;
return trav;
}
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index ced3fc8fad7c..8c835ad63729 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -36,6 +36,7 @@
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <linux/mutex.h>
#include <linux/kernel.h>
+#include <linux/refcount.h>
#include <uapi/linux/netfilter/xt_hashlimit.h>
#define XT_HASHLIMIT_ALL (XT_HASHLIMIT_HASH_DIP | XT_HASHLIMIT_HASH_DPT | \
@@ -114,7 +115,7 @@ struct dsthash_ent {
struct xt_hashlimit_htable {
struct hlist_node node; /* global list of all htables */
- int use;
+ refcount_t use;
u_int8_t family;
bool rnd_initialized;
@@ -315,7 +316,7 @@ static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg,
for (i = 0; i < hinfo->cfg.size; i++)
INIT_HLIST_HEAD(&hinfo->hash[i]);
- hinfo->use = 1;
+ refcount_set(&hinfo->use, 1);
hinfo->count = 0;
hinfo->family = family;
hinfo->rnd_initialized = false;
@@ -357,21 +358,7 @@ static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg,
return 0;
}
-static bool select_all(const struct xt_hashlimit_htable *ht,
- const struct dsthash_ent *he)
-{
- return true;
-}
-
-static bool select_gc(const struct xt_hashlimit_htable *ht,
- const struct dsthash_ent *he)
-{
- return time_after_eq(jiffies, he->expires);
-}
-
-static void htable_selective_cleanup(struct xt_hashlimit_htable *ht,
- bool (*select)(const struct xt_hashlimit_htable *ht,
- const struct dsthash_ent *he))
+static void htable_selective_cleanup(struct xt_hashlimit_htable *ht, bool select_all)
{
unsigned int i;
@@ -381,7 +368,7 @@ static void htable_selective_cleanup(struct xt_hashlimit_htable *ht,
spin_lock_bh(&ht->lock);
hlist_for_each_entry_safe(dh, n, &ht->hash[i], node) {
- if ((*select)(ht, dh))
+ if (time_after_eq(jiffies, dh->expires) || select_all)
dsthash_free(ht, dh);
}
spin_unlock_bh(&ht->lock);
@@ -395,7 +382,7 @@ static void htable_gc(struct work_struct *work)
ht = container_of(work, struct xt_hashlimit_htable, gc_work.work);
- htable_selective_cleanup(ht, select_gc);
+ htable_selective_cleanup(ht, false);
queue_delayed_work(system_power_efficient_wq,
&ht->gc_work, msecs_to_jiffies(ht->cfg.gc_interval));
@@ -415,15 +402,6 @@ static void htable_remove_proc_entry(struct xt_hashlimit_htable *hinfo)
remove_proc_entry(hinfo->name, parent);
}
-static void htable_destroy(struct xt_hashlimit_htable *hinfo)
-{
- cancel_delayed_work_sync(&hinfo->gc_work);
- htable_remove_proc_entry(hinfo);
- htable_selective_cleanup(hinfo, select_all);
- kfree(hinfo->name);
- vfree(hinfo);
-}
-
static struct xt_hashlimit_htable *htable_find_get(struct net *net,
const char *name,
u_int8_t family)
@@ -434,7 +412,7 @@ static struct xt_hashlimit_htable *htable_find_get(struct net *net,
hlist_for_each_entry(hinfo, &hashlimit_net->htables, node) {
if (!strcmp(name, hinfo->name) &&
hinfo->family == family) {
- hinfo->use++;
+ refcount_inc(&hinfo->use);
return hinfo;
}
}
@@ -443,12 +421,16 @@ static struct xt_hashlimit_htable *htable_find_get(struct net *net,
static void htable_put(struct xt_hashlimit_htable *hinfo)
{
- mutex_lock(&hashlimit_mutex);
- if (--hinfo->use == 0) {
+ if (refcount_dec_and_mutex_lock(&hinfo->use, &hashlimit_mutex)) {
hlist_del(&hinfo->node);
- htable_destroy(hinfo);
+ htable_remove_proc_entry(hinfo);
+ mutex_unlock(&hashlimit_mutex);
+
+ cancel_delayed_work_sync(&hinfo->gc_work);
+ htable_selective_cleanup(hinfo, true);
+ kfree(hinfo->name);
+ vfree(hinfo);
}
- mutex_unlock(&hashlimit_mutex);
}
/* The algorithm used is the Simple Token Bucket Filter (TBF)
@@ -851,6 +833,8 @@ hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
return hashlimit_mt_common(skb, par, hinfo, &info->cfg, 3);
}
+#define HASHLIMIT_MAX_SIZE 1048576
+
static int hashlimit_mt_check_common(const struct xt_mtchk_param *par,
struct xt_hashlimit_htable **hinfo,
struct hashlimit_cfg3 *cfg,
@@ -861,6 +845,14 @@ static int hashlimit_mt_check_common(const struct xt_mtchk_param *par,
if (cfg->gc_interval == 0 || cfg->expire == 0)
return -EINVAL;
+ if (cfg->size > HASHLIMIT_MAX_SIZE) {
+ cfg->size = HASHLIMIT_MAX_SIZE;
+ pr_info_ratelimited("size too large, truncated to %u\n", cfg->size);
+ }
+ if (cfg->max > HASHLIMIT_MAX_SIZE) {
+ cfg->max = HASHLIMIT_MAX_SIZE;
+ pr_info_ratelimited("max too large, truncated to %u\n", cfg->max);
+ }
if (par->family == NFPROTO_IPV4) {
if (cfg->srcmask > 32 || cfg->dstmask > 32)
return -EINVAL;
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index 781e0b482189..225a7ab6d79a 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -103,7 +103,7 @@ static DEFINE_SPINLOCK(recent_lock);
static DEFINE_MUTEX(recent_mutex);
#ifdef CONFIG_PROC_FS
-static const struct file_operations recent_mt_fops;
+static const struct proc_ops recent_mt_proc_ops;
#endif
static u_int32_t hash_rnd __read_mostly;
@@ -405,7 +405,7 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
goto out;
}
pde = proc_create_data(t->name, ip_list_perms, recent_net->xt_recent,
- &recent_mt_fops, t);
+ &recent_mt_proc_ops, t);
if (pde == NULL) {
recent_table_free(t);
ret = -ENOMEM;
@@ -492,12 +492,12 @@ static void *recent_seq_next(struct seq_file *seq, void *v, loff_t *pos)
const struct recent_entry *e = v;
const struct list_head *head = e->list.next;
+ (*pos)++;
while (head == &t->iphash[st->bucket]) {
if (++st->bucket >= ip_list_hash_size)
return NULL;
head = t->iphash[st->bucket].next;
}
- (*pos)++;
return list_entry(head, struct recent_entry, list);
}
@@ -616,13 +616,12 @@ recent_mt_proc_write(struct file *file, const char __user *input,
return size + 1;
}
-static const struct file_operations recent_mt_fops = {
- .open = recent_seq_open,
- .read = seq_read,
- .write = recent_mt_proc_write,
- .release = seq_release_private,
- .owner = THIS_MODULE,
- .llseek = seq_lseek,
+static const struct proc_ops recent_mt_proc_ops = {
+ .proc_open = recent_seq_open,
+ .proc_read = seq_read,
+ .proc_write = recent_mt_proc_write,
+ .proc_release = seq_release_private,
+ .proc_lseek = seq_lseek,
};
static int __net_init recent_proc_net_init(struct net *net)