From 1e674d8af37b3f2ba4cb233fbb335fb701d185ad Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Thu, 25 Oct 2018 16:37:38 +0200 Subject: send: consider dropped stage packets to be dropped Suggested-by: Andrew Lunn --- src/device.c | 6 ++++-- src/peer.c | 2 +- src/queueing.h | 1 + src/send.c | 8 ++++++++ src/timers.c | 2 +- 5 files changed, 15 insertions(+), 4 deletions(-) (limited to 'src') diff --git a/src/device.c b/src/device.c index 3a14e33..f47ed0f 100644 --- a/src/device.c +++ b/src/device.c @@ -102,7 +102,7 @@ static int wg_stop(struct net_device *dev) mutex_lock(&wg->device_update_lock); list_for_each_entry(peer, &wg->peer_list, peer_list) { - skb_queue_purge(&peer->staged_packet_queue); + wg_packet_purge_staged_packets(peer); wg_timers_stop(peer); wg_noise_handshake_clear(&peer->handshake); wg_noise_keypairs_clear(&peer->keypairs); @@ -190,8 +190,10 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev) * until it's small again. We do this before adding the new packet, so * we don't remove GSO segments that are in excess. */ - while (skb_queue_len(&peer->staged_packet_queue) > MAX_STAGED_PACKETS) + while (skb_queue_len(&peer->staged_packet_queue) > MAX_STAGED_PACKETS) { dev_kfree_skb(__skb_dequeue(&peer->staged_packet_queue)); + ++dev->stats.tx_dropped; + } skb_queue_splice_tail(&packets, &peer->staged_packet_queue); spin_unlock_bh(&peer->staged_packet_queue.lock); diff --git a/src/peer.c b/src/peer.c index 4a4ec66..e4e2405 100644 --- a/src/peer.c +++ b/src/peer.c @@ -192,7 +192,7 @@ static void kref_release(struct kref *refcount) /* Remove any lingering packets that didn't have a chance to be * transmitted. */ - skb_queue_purge(&peer->staged_packet_queue); + wg_packet_purge_staged_packets(peer); /* Free the memory used. */ call_rcu_bh(&peer->rcu, rcu_release); diff --git a/src/queueing.h b/src/queueing.h index ce522d8..60688bc 100644 --- a/src/queueing.h +++ b/src/queueing.h @@ -41,6 +41,7 @@ void wg_packet_send_handshake_cookie(struct wg_device *wg, struct sk_buff *initiating_skb, __le32 sender_index); void wg_packet_send_keepalive(struct wg_peer *peer); +void wg_packet_purge_staged_packets(struct wg_peer *peer); void wg_packet_send_staged_packets(struct wg_peer *peer); /* Workqueue workers: */ void wg_packet_handshake_send_worker(struct work_struct *work); diff --git a/src/send.c b/src/send.c index 1e97aa1..06996f0 100644 --- a/src/send.c +++ b/src/send.c @@ -347,6 +347,14 @@ err: skb_free_null_queue(first); } +void wg_packet_purge_staged_packets(struct wg_peer *peer) +{ + spin_lock_bh(&peer->staged_packet_queue.lock); + peer->device->dev->stats.tx_dropped += peer->staged_packet_queue.qlen; + __skb_queue_purge(&peer->staged_packet_queue); + spin_unlock_bh(&peer->staged_packet_queue.lock); +} + void wg_packet_send_staged_packets(struct wg_peer *peer) { struct noise_symmetric_key *key; diff --git a/src/timers.c b/src/timers.c index 22eb1ee..91ffbbc 100644 --- a/src/timers.c +++ b/src/timers.c @@ -71,7 +71,7 @@ static void wg_expired_retransmit_handshake(struct timer_list *timer) /* We drop all packets without a keypair and don't try again, * if we try unsuccessfully for too long to make a handshake. */ - skb_queue_purge(&peer->staged_packet_queue); + wg_packet_purge_staged_packets(peer); /* We set a timer for destroying any residue that might be left * of a partial exchange. -- cgit v1.2.3-59-g8ed1b