aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/src/packets.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/packets.h')
-rw-r--r--src/packets.h47
1 files changed, 31 insertions, 16 deletions
diff --git a/src/packets.h b/src/packets.h
index 6601ef6..0b8fe1e 100644
--- a/src/packets.h
+++ b/src/packets.h
@@ -22,30 +22,30 @@ struct packet_cb {
};
#define PACKET_CB(skb) ((struct packet_cb *)skb->cb)
-/* receive.c */
-void packet_receive(struct wireguard_device *wg, struct sk_buff *skb);
-void packet_process_queued_handshake_packets(struct work_struct *work);
-void packet_consume_data_done(struct sk_buff *skb, struct wireguard_peer *peer, struct endpoint *endpoint, bool used_new_key);
+/* data.c */
+int init_crypt_ctx_cache(void);
+void deinit_crypt_ctx_cache(void);
+void packet_send_worker(struct work_struct *work);
+void packet_encrypt_worker(struct work_struct *work);
+void packet_init_worker(struct work_struct *work);
+void packet_create_data(struct wireguard_peer *peer, struct sk_buff_head *packets);
void packet_receive_worker(struct work_struct *work);
void packet_decrypt_worker(struct work_struct *work);
void packet_consume_data(struct sk_buff *skb, struct wireguard_device *wg);
+void packet_purge_init_queue(struct wireguard_peer *peer);
+
+/* receive.c */
+void packet_process_queued_handshake_packets(struct work_struct *work);
+void packet_consume_data_done(struct sk_buff *skb, struct wireguard_peer *peer, struct endpoint *endpoint, bool used_new_key);
+void packet_receive(struct wireguard_device *wg, struct sk_buff *skb);
/* send.c */
-void keep_key_fresh_send(struct wireguard_peer *peer);
-void packet_send_keepalive(struct wireguard_peer *peer);
-void packet_queue_handshake_initiation(struct wireguard_peer *peer, bool is_retry);
void packet_send_queued_handshakes(struct work_struct *work);
+void packet_queue_handshake_initiation(struct wireguard_peer *peer, bool is_retry);
void packet_send_handshake_response(struct wireguard_peer *peer);
void packet_send_handshake_cookie(struct wireguard_device *wg, struct sk_buff *initiating_skb, __le32 sender_index);
-void packet_send_worker(struct work_struct *work);
-void packet_encrypt_worker(struct work_struct *work);
-void packet_init_worker(struct work_struct *work);
-void packet_create_data(struct wireguard_peer *peer, struct sk_buff_head *packets);
-
-/* data.c */
-int init_crypt_cache(void);
-void deinit_crypt_cache(void);
-void peer_purge_queues(struct wireguard_peer *peer);
+void packet_send_keepalive(struct wireguard_peer *peer);
+void packet_create_data_done(struct sk_buff_head *queue, struct wireguard_peer *peer);
/* Returns either the correct skb->protocol value, or 0 if invalid. */
static inline __be16 skb_examine_untrusted_ip_hdr(struct sk_buff *skb)
@@ -57,6 +57,21 @@ static inline __be16 skb_examine_untrusted_ip_hdr(struct sk_buff *skb)
return 0;
}
+/* This function is racy, in the sense that next is unlocked, so it could return
+ * the same CPU twice. A race-free version of this would be to instead store an
+ * atomic sequence number, do an increment-and-return, and then iterate through
+ * every possible CPU until we get to that index -- choose_cpu. However that's
+ * a bit slower, and it doesn't seem like this potential race actually introduces
+ * any performance loss, so we live with it. */
+static inline int cpumask_next_online(int *next)
+{
+ int cpu = *next;
+ while (unlikely(!cpumask_test_cpu(cpu, cpu_online_mask)))
+ cpu = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
+ *next = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
+ return cpu;
+}
+
#ifdef DEBUG
bool packet_counter_selftest(void);
#endif