aboutsummaryrefslogtreecommitdiffstats
path: root/include/net
diff options
context:
space:
mode:
authorPablo Neira Ayuso <pablo@netfilter.org>2022-01-09 17:11:21 +0100
committerPablo Neira Ayuso <pablo@netfilter.org>2022-01-09 23:35:17 +0100
commit12e4ecfa244be2f117ef5304d2d866b65e70bff3 (patch)
tree1deb284a2e6bfd58e9ff3fe6a7153dbd05615c43 /include/net
parentnetfilter: nf_tables: add NFT_REG32_NUM (diff)
downloadlinux-dev-12e4ecfa244be2f117ef5304d2d866b65e70bff3.tar.xz
linux-dev-12e4ecfa244be2f117ef5304d2d866b65e70bff3.zip
netfilter: nf_tables: add register tracking infrastructure
This patch adds new infrastructure to skip redundant selector store operations on the same register to achieve a performance boost from the packet path. This is particularly noticeable in pure linear rulesets but it also helps in rulesets which are already heaving relying in maps to avoid ruleset linear inspection. The idea is to keep data of the most recurrent store operations on register to reuse them with cmp and lookup expressions. This infrastructure allows for dynamic ruleset updates since the ruleset blob reduction happens from the kernel. Userspace still needs to be updated to maximize register utilization to cooperate to improve register data reuse / reduce number of store on register operations. Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Diffstat (limited to 'include/net')
-rw-r--r--include/net/netfilter/nf_tables.h12
1 files changed, 12 insertions, 0 deletions
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 515e5db97e01..1c37ce61daea 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -122,6 +122,16 @@ struct nft_regs {
};
};
+struct nft_regs_track {
+ struct {
+ const struct nft_expr *selector;
+ const struct nft_expr *bitwise;
+ } regs[NFT_REG32_NUM];
+
+ const struct nft_expr *cur;
+ const struct nft_expr *last;
+};
+
/* Store/load an u8, u16 or u64 integer to/from the u32 data register.
*
* Note, when using concatenations, register allocation happens at 32-bit
@@ -886,6 +896,8 @@ struct nft_expr_ops {
int (*validate)(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data);
+ bool (*reduce)(struct nft_regs_track *track,
+ const struct nft_expr *expr);
bool (*gc)(struct net *net,
const struct nft_expr *expr);
int (*offload)(struct nft_offload_ctx *ctx,