summaryrefslogtreecommitdiffstatshomepage
path: root/src/data.c
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2017-04-09 04:56:00 +0200
committerJason A. Donenfeld <Jason@zx2c4.com>2017-04-09 04:56:00 +0200
commit65a56f7b0978b3e8addf337463baf074254f8581 (patch)
tree59a2993d0669a51b512c4c5d8d3af48b2f15e692 /src/data.c
parentdebug: cleanups (diff)
downloadwireguard-monolithic-historical-65a56f7b0978b3e8addf337463baf074254f8581.tar.xz
wireguard-monolithic-historical-65a56f7b0978b3e8addf337463baf074254f8581.zip
data: alloca is actually as dangerous as they say
It turns out that calling alloca from an inline function means that the memory isn't ever deallocated until the caller function exits, which means we were using tons of stack space for every iteration of the call. So, we hard code the sg array. While 128 seems like a reasonable number, we actually wind up using "MAX_SKB_FRAGS * 2 + 1". An skb has its data segment, so that's 1. Then it has its frags, which are MAX_SKB_FRAGS at max. Then it has its frag list, which, so far as I can tell, are potentially unbounded. So we just hope it's no more than MAX_SKB_FRAGS, and so we plan for at most two of those.
Diffstat (limited to 'src/data.c')
-rw-r--r--src/data.c10
1 files changed, 4 insertions, 6 deletions
diff --git a/src/data.c b/src/data.c
index f188be5..63cd60e 100644
--- a/src/data.c
+++ b/src/data.c
@@ -121,7 +121,7 @@ static inline void skb_reset(struct sk_buff *skb)
static inline bool skb_encrypt(struct sk_buff *skb, struct noise_keypair *keypair, bool have_simd)
{
- struct scatterlist *sg;
+ struct scatterlist sg[MAX_SKB_FRAGS * 2 + 1];
struct message_data *header;
unsigned int padding_len, plaintext_len, trailer_len;
int num_frags;
@@ -137,7 +137,7 @@ static inline bool skb_encrypt(struct sk_buff *skb, struct noise_keypair *keypai
/* Expand data section to have room for padding and auth tag */
num_frags = skb_cow_data(skb, trailer_len, &trailer);
- if (unlikely(num_frags < 0 || num_frags > 128))
+ if (unlikely(num_frags < 0 || num_frags > ARRAY_SIZE(sg)))
return false;
/* Set the padding to zeros, and make sure it and the auth tag are part of the skb */
@@ -159,7 +159,6 @@ static inline bool skb_encrypt(struct sk_buff *skb, struct noise_keypair *keypai
pskb_put(skb, trailer, trailer_len);
/* Now we can encrypt the scattergather segments */
- sg = __builtin_alloca(num_frags * sizeof(struct scatterlist)); /* bounded to 128 */
sg_init_table(sg, num_frags);
if (skb_to_sgvec(skb, sg, sizeof(struct message_data), noise_encrypted_len(plaintext_len)) <= 0)
return false;
@@ -168,7 +167,7 @@ static inline bool skb_encrypt(struct sk_buff *skb, struct noise_keypair *keypai
static inline bool skb_decrypt(struct sk_buff *skb, struct noise_symmetric_key *key)
{
- struct scatterlist *sg;
+ struct scatterlist sg[MAX_SKB_FRAGS * 2 + 1];
struct sk_buff *trailer;
int num_frags;
@@ -183,9 +182,8 @@ static inline bool skb_decrypt(struct sk_buff *skb, struct noise_symmetric_key *
PACKET_CB(skb)->nonce = le64_to_cpu(((struct message_data *)skb->data)->counter);
skb_pull(skb, sizeof(struct message_data));
num_frags = skb_cow_data(skb, 0, &trailer);
- if (unlikely(num_frags < 0 || num_frags > 128))
+ if (unlikely(num_frags < 0 || num_frags > ARRAY_SIZE(sg)))
return false;
- sg = __builtin_alloca(num_frags * sizeof(struct scatterlist)); /* bounded to 128 */
sg_init_table(sg, num_frags);
if (skb_to_sgvec(skb, sg, 0, skb->len) <= 0)