aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorPablo Neira Ayuso <pablo@netfilter.org>2025-04-22 21:52:43 +0200
committerPablo Neira Ayuso <pablo@netfilter.org>2025-05-05 13:17:32 +0200
commit4c5c6aa9967dbe55bd017bb509885928d0f31206 (patch)
tree58e5f7317807a6051869987a9474d73a5f8571f2
parentnetfilter: nf_conntrack: speed up reads from nf_conntrack proc file (diff)
downloadwireguard-linux-4c5c6aa9967dbe55bd017bb509885928d0f31206.tar.xz
wireguard-linux-4c5c6aa9967dbe55bd017bb509885928d0f31206.zip
netfilter: nft_set_pipapo: prevent overflow in lookup table allocation
When calculating the lookup table size, ensure the following multiplication does not overflow: - desc->field_len[] maximum value is U8_MAX multiplied by NFT_PIPAPO_GROUPS_PER_BYTE(f) that can be 2, worst case. - NFT_PIPAPO_BUCKETS(f->bb) is 2^8, worst case. - sizeof(unsigned long), from sizeof(*f->lt), lt in struct nft_pipapo_field. Then, use check_mul_overflow() to multiply by bucket size and then use check_add_overflow() to the alignment for avx2 (if needed). Finally, add lt_size_check_overflow() helper and use it to consolidate this. While at it, replace leftover allocation using the GFP_KERNEL to GFP_KERNEL_ACCOUNT for consistency, in pipapo_resize(). Fixes: 3c4287f62044 ("nf_tables: Add set type for arbitrary concatenation of ranges") Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> Reviewed-by: Stefano Brivio <sbrivio@redhat.com> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
-rw-r--r--net/netfilter/nft_set_pipapo.c58
1 files changed, 44 insertions, 14 deletions
diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
index 7be342b495f5..0529e4ef7520 100644
--- a/net/netfilter/nft_set_pipapo.c
+++ b/net/netfilter/nft_set_pipapo.c
@@ -683,6 +683,30 @@ out_free:
return 0;
}
+
+/**
+ * lt_calculate_size() - Get storage size for lookup table with overflow check
+ * @groups: Amount of bit groups
+ * @bb: Number of bits grouped together in lookup table buckets
+ * @bsize: Size of each bucket in lookup table, in longs
+ *
+ * Return: allocation size including alignment overhead, negative on overflow
+ */
+static ssize_t lt_calculate_size(unsigned int groups, unsigned int bb,
+ unsigned int bsize)
+{
+ ssize_t ret = groups * NFT_PIPAPO_BUCKETS(bb) * sizeof(long);
+
+ if (check_mul_overflow(ret, bsize, &ret))
+ return -1;
+ if (check_add_overflow(ret, NFT_PIPAPO_ALIGN_HEADROOM, &ret))
+ return -1;
+ if (ret > INT_MAX)
+ return -1;
+
+ return ret;
+}
+
/**
* pipapo_resize() - Resize lookup or mapping table, or both
* @f: Field containing lookup and mapping tables
@@ -701,6 +725,7 @@ static int pipapo_resize(struct nft_pipapo_field *f,
long *new_lt = NULL, *new_p, *old_lt = f->lt, *old_p;
unsigned int new_bucket_size, copy;
int group, bucket, err;
+ ssize_t lt_size;
if (rules >= NFT_PIPAPO_RULE0_MAX)
return -ENOSPC;
@@ -719,10 +744,11 @@ static int pipapo_resize(struct nft_pipapo_field *f,
else
copy = new_bucket_size;
- new_lt = kvzalloc(f->groups * NFT_PIPAPO_BUCKETS(f->bb) *
- new_bucket_size * sizeof(*new_lt) +
- NFT_PIPAPO_ALIGN_HEADROOM,
- GFP_KERNEL);
+ lt_size = lt_calculate_size(f->groups, f->bb, new_bucket_size);
+ if (lt_size < 0)
+ return -ENOMEM;
+
+ new_lt = kvzalloc(lt_size, GFP_KERNEL_ACCOUNT);
if (!new_lt)
return -ENOMEM;
@@ -907,7 +933,7 @@ static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f)
{
unsigned int groups, bb;
unsigned long *new_lt;
- size_t lt_size;
+ ssize_t lt_size;
lt_size = f->groups * NFT_PIPAPO_BUCKETS(f->bb) * f->bsize *
sizeof(*f->lt);
@@ -917,15 +943,17 @@ static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f)
groups = f->groups * 2;
bb = NFT_PIPAPO_GROUP_BITS_LARGE_SET;
- lt_size = groups * NFT_PIPAPO_BUCKETS(bb) * f->bsize *
- sizeof(*f->lt);
+ lt_size = lt_calculate_size(groups, bb, f->bsize);
+ if (lt_size < 0)
+ return;
} else if (f->bb == NFT_PIPAPO_GROUP_BITS_LARGE_SET &&
lt_size < NFT_PIPAPO_LT_SIZE_LOW) {
groups = f->groups / 2;
bb = NFT_PIPAPO_GROUP_BITS_SMALL_SET;
- lt_size = groups * NFT_PIPAPO_BUCKETS(bb) * f->bsize *
- sizeof(*f->lt);
+ lt_size = lt_calculate_size(groups, bb, f->bsize);
+ if (lt_size < 0)
+ return;
/* Don't increase group width if the resulting lookup table size
* would exceed the upper size threshold for a "small" set.
@@ -936,7 +964,7 @@ static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f)
return;
}
- new_lt = kvzalloc(lt_size + NFT_PIPAPO_ALIGN_HEADROOM, GFP_KERNEL_ACCOUNT);
+ new_lt = kvzalloc(lt_size, GFP_KERNEL_ACCOUNT);
if (!new_lt)
return;
@@ -1451,13 +1479,15 @@ static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old)
for (i = 0; i < old->field_count; i++) {
unsigned long *new_lt;
+ ssize_t lt_size;
memcpy(dst, src, offsetof(struct nft_pipapo_field, lt));
- new_lt = kvzalloc(src->groups * NFT_PIPAPO_BUCKETS(src->bb) *
- src->bsize * sizeof(*dst->lt) +
- NFT_PIPAPO_ALIGN_HEADROOM,
- GFP_KERNEL_ACCOUNT);
+ lt_size = lt_calculate_size(src->groups, src->bb, src->bsize);
+ if (lt_size < 0)
+ goto out_lt;
+
+ new_lt = kvzalloc(lt_size, GFP_KERNEL_ACCOUNT);
if (!new_lt)
goto out_lt;