summaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2018-04-16 02:45:30 +0200
committerJason A. Donenfeld <Jason@zx2c4.com>2018-04-16 02:45:30 +0200
commit13225fce137b6bdfc07bbda28e98bdb4bc7ca6ae (patch)
tree1eec06138152de2e8174bce2c613edffaf7c0e0c
parentsend: account for route-based MTU (diff)
downloadwireguard-monolithic-historical-13225fce137b6bdfc07bbda28e98bdb4bc7ca6ae.tar.xz
wireguard-monolithic-historical-13225fce137b6bdfc07bbda28e98bdb4bc7ca6ae.zip
send: simplify skb_padding with nice macro
-rw-r--r--src/send.c7
1 files changed, 3 insertions, 4 deletions
diff --git a/src/send.c b/src/send.c
index 9c9f694..9b1aec0 100644
--- a/src/send.c
+++ b/src/send.c
@@ -112,12 +112,11 @@ static inline void keep_key_fresh(struct wireguard_peer *peer)
static inline unsigned int skb_padding(struct sk_buff *skb)
{
/* We do this modulo business with the MTU, just in case the networking layer
- * gives us a packet that's bigger than the MTU. Since we support GSO, this
- * isn't strictly neccessary, but it's better to be cautious here, especially
- * if that code ever changes.
+ * gives us a packet that's bigger than the MTU. In that case, we wouldn't want
+ * the final subtraction to overflow in the case of the padded_size being clamped.
*/
unsigned int last_unit = skb->len % PACKET_CB(skb)->mtu;
- unsigned int padded_size = (last_unit + MESSAGE_PADDING_MULTIPLE - 1) & ~(MESSAGE_PADDING_MULTIPLE - 1);
+ unsigned int padded_size = ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE);
if (padded_size > PACKET_CB(skb)->mtu)
padded_size = PACKET_CB(skb)->mtu;