aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
-rw-r--r--src/device.c6
-rw-r--r--src/device.h8
-rw-r--r--src/send.c8
3 files changed, 13 insertions, 9 deletions
diff --git a/src/device.c b/src/device.c
index 532d399..e888ac3 100644
--- a/src/device.c
+++ b/src/device.c
@@ -171,8 +171,8 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb(skb);
skb = segs;
}
- do {
- next = skb->next;
+
+ skb_list_walk_safe(skb, skb, next) {
skb_mark_not_on_list(skb);
skb = skb_share_check(skb, GFP_ATOMIC);
@@ -187,7 +187,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
PACKET_CB(skb)->mtu = mtu;
__skb_queue_tail(&packets, skb);
- } while ((skb = next) != NULL);
+ }
spin_lock_bh(&peer->staged_packet_queue.lock);
/* If the queue is getting too big, we start removing the oldest packets
diff --git a/src/device.h b/src/device.h
index b15a8be..c91f305 100644
--- a/src/device.h
+++ b/src/device.h
@@ -62,4 +62,12 @@ struct wg_device {
int wg_device_init(void);
void wg_device_uninit(void);
+/* Later after the dust settles, this can be moved into include/linux/skbuff.h,
+ * where virtually all code that deals with GSO segs can benefit, around ~30
+ * drivers as of writing.
+ */
+#define skb_list_walk_safe(first, skb, next) \
+ for (skb = first, next = skb->next; skb; \
+ skb = next, next = skb ? skb->next : NULL)
+
#endif /* _WG_DEVICE_H */
diff --git a/src/send.c b/src/send.c
index 3da27d6..9b54f4a 100644
--- a/src/send.c
+++ b/src/send.c
@@ -233,10 +233,6 @@ void wg_packet_send_keepalive(struct wg_peer *peer)
wg_packet_send_staged_packets(peer);
}
-#define skb_walk_null_queue_safe(first, skb, next) \
- for (skb = first, next = skb->next; skb; \
- skb = next, next = skb ? skb->next : NULL)
-
static void wg_packet_create_data_done(struct sk_buff *first,
struct wg_peer *peer)
{
@@ -245,7 +241,7 @@ static void wg_packet_create_data_done(struct sk_buff *first,
wg_timers_any_authenticated_packet_traversal(peer);
wg_timers_any_authenticated_packet_sent(peer);
- skb_walk_null_queue_safe(first, skb, next) {
+ skb_list_walk_safe(first, skb, next) {
is_keepalive = skb->len == message_data_len(0);
if (likely(!wg_socket_send_skb_to_peer(peer, skb,
PACKET_CB(skb)->ds) && !is_keepalive))
@@ -295,7 +291,7 @@ void wg_packet_encrypt_worker(struct work_struct *work)
while ((first = ptr_ring_consume_bh(&queue->ring)) != NULL) {
enum packet_state state = PACKET_STATE_CRYPTED;
- skb_walk_null_queue_safe(first, skb, next) {
+ skb_list_walk_safe(first, skb, next) {
if (likely(encrypt_packet(skb,
PACKET_CB(first)->keypair,
&simd_context))) {