aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/net/wireguard/send.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireguard/send.c')
-rw-r--r--drivers/net/wireguard/send.c98
1 files changed, 0 insertions, 98 deletions
diff --git a/drivers/net/wireguard/send.c b/drivers/net/wireguard/send.c
index 3e030d614df5..df070928d569 100644
--- a/drivers/net/wireguard/send.c
+++ b/drivers/net/wireguard/send.c
@@ -141,81 +141,6 @@ static void keep_key_fresh(struct wg_peer *peer)
wg_packet_send_queued_handshake_initiation(peer, false);
}
-static unsigned int calculate_skb_padding(struct sk_buff *skb)
-{
- unsigned int padded_size, last_unit = skb->len;
-
- if (unlikely(!PACKET_CB(skb)->mtu))
- return ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE) - last_unit;
-
- /* We do this modulo business with the MTU, just in case the networking
- * layer gives us a packet that's bigger than the MTU. In that case, we
- * wouldn't want the final subtraction to overflow in the case of the
- * padded_size being clamped. Fortunately, that's very rarely the case,
- * so we optimize for that not happening.
- */
- if (unlikely(last_unit > PACKET_CB(skb)->mtu))
- last_unit %= PACKET_CB(skb)->mtu;
-
- padded_size = min(PACKET_CB(skb)->mtu,
- ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE));
- return padded_size - last_unit;
-}
-
-static bool encrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair)
-{
- unsigned int padding_len, plaintext_len, trailer_len;
- struct scatterlist sg[MAX_SKB_FRAGS + 8];
- struct message_data *header;
- struct sk_buff *trailer;
- int num_frags;
-
- /* Calculate lengths. */
- padding_len = calculate_skb_padding(skb);
- trailer_len = padding_len + noise_encrypted_len(0);
- plaintext_len = skb->len + padding_len;
-
- /* Expand data section to have room for padding and auth tag. */
- num_frags = skb_cow_data(skb, trailer_len, &trailer);
- if (unlikely(num_frags < 0 || num_frags > ARRAY_SIZE(sg)))
- return false;
-
- /* Set the padding to zeros, and make sure it and the auth tag are part
- * of the skb.
- */
- memset(skb_tail_pointer(trailer), 0, padding_len);
-
- /* Expand head section to have room for our header and the network
- * stack's headers.
- */
- if (unlikely(skb_cow_head(skb, DATA_PACKET_HEAD_ROOM) < 0))
- return false;
-
- /* Finalize checksum calculation for the inner packet, if required. */
- if (unlikely(skb->ip_summed == CHECKSUM_PARTIAL &&
- skb_checksum_help(skb)))
- return false;
-
- /* Only after checksumming can we safely add on the padding at the end
- * and the header.
- */
- skb_set_inner_network_header(skb, 0);
- header = (struct message_data *)skb_push(skb, sizeof(*header));
- header->header.type = cpu_to_le32(MESSAGE_DATA);
- header->key_idx = keypair->remote_index;
- header->counter = cpu_to_le64(PACKET_CB(skb)->nonce);
- pskb_put(skb, trailer, trailer_len);
-
- /* Now we can encrypt the scattergather segments */
- sg_init_table(sg, num_frags);
- if (skb_to_sgvec(skb, sg, sizeof(struct message_data),
- noise_encrypted_len(plaintext_len)) <= 0)
- return false;
- return chacha20poly1305_encrypt_sg_inplace(sg, plaintext_len, NULL, 0,
- PACKET_CB(skb)->nonce,
- keypair->sending.key);
-}
-
void wg_packet_send_keepalive(struct wg_peer *peer)
{
struct sk_buff *skb;
@@ -284,29 +209,6 @@ void wg_packet_tx_worker(struct work_struct *work)
}
}
-void wg_packet_encrypt_worker(struct work_struct *work)
-{
- struct crypt_queue *queue = container_of(work, struct multicore_worker,
- work)->ptr;
- struct sk_buff *first, *skb, *next;
-
- while ((first = ptr_ring_consume_bh(&queue->ring)) != NULL) {
- enum packet_state state = PACKET_STATE_CRYPTED;
-
- skb_list_walk_safe(first, skb, next) {
- if (likely(encrypt_packet(skb,
- PACKET_CB(first)->keypair))) {
- wg_reset_packet(skb);
- } else {
- state = PACKET_STATE_DEAD;
- break;
- }
- }
- wg_queue_enqueue_per_peer(&PACKET_PEER(first)->tx_queue, first,
- state);
- }
-}
-
static void wg_packet_create_data(struct sk_buff *first)
{
struct wg_peer *peer = PACKET_PEER(first);