diff options
author | Jason A. Donenfeld <Jason@zx2c4.com> | 2017-10-05 23:39:07 +0200 |
---|---|---|
committer | Jason A. Donenfeld <Jason@zx2c4.com> | 2017-10-05 23:40:19 +0200 |
commit | 970d20530828f4fd5473b4c382dfbacd7ddc13cd (patch) | |
tree | 19e6444e36ff945ea4b99a75b6fe728ee7d01f61 /src/send.c | |
parent | compat: macro rewrite netlink instead of cluttering (diff) | |
download | wireguard-monolithic-historical-970d20530828f4fd5473b4c382dfbacd7ddc13cd.tar.xz wireguard-monolithic-historical-970d20530828f4fd5473b4c382dfbacd7ddc13cd.zip |
queueing: cleanup skb_padding
Diffstat (limited to 'src/send.c')
-rw-r--r-- | src/send.c | 14 |
1 files changed, 14 insertions, 0 deletions
@@ -105,6 +105,20 @@ static inline void keep_key_fresh(struct wireguard_peer *peer) packet_send_queued_handshake_initiation(peer, false); } +static inline unsigned int skb_padding(struct sk_buff *skb) +{ + /* We do this modulo business with the MTU, just in case the networking layer + * gives us a packet that's bigger than the MTU. Since we support GSO, this + * isn't strictly neccessary, but it's better to be cautious here, especially + * if that code ever changes. */ + unsigned int last_unit = skb->len % skb->dev->mtu; + unsigned int padded_size = (last_unit + MESSAGE_PADDING_MULTIPLE - 1) & ~(MESSAGE_PADDING_MULTIPLE - 1); + + if (padded_size > skb->dev->mtu) + padded_size = skb->dev->mtu; + return padded_size - last_unit; +} + static inline bool skb_encrypt(struct sk_buff *skb, struct noise_keypair *keypair, bool have_simd) { struct scatterlist sg[MAX_SKB_FRAGS * 2 + 1]; |