aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2018-10-25 16:37:38 +0200
committerJason A. Donenfeld <Jason@zx2c4.com>2018-10-27 03:11:31 +0200
commit1e674d8af37b3f2ba4cb233fbb335fb701d185ad (patch)
tree0c1fe2d097ea58ac70fd506c7ce9be94a25e86d6
parentallowedips: fix up macros and annotations (diff)
downloadwireguard-monolithic-historical-1e674d8af37b3f2ba4cb233fbb335fb701d185ad.tar.xz
wireguard-monolithic-historical-1e674d8af37b3f2ba4cb233fbb335fb701d185ad.zip
send: consider dropped stage packets to be dropped
Suggested-by: Andrew Lunn <andrew@lunn.ch>
-rw-r--r--src/device.c6
-rw-r--r--src/peer.c2
-rw-r--r--src/queueing.h1
-rw-r--r--src/send.c8
-rw-r--r--src/timers.c2
5 files changed, 15 insertions, 4 deletions
diff --git a/src/device.c b/src/device.c
index 3a14e33..f47ed0f 100644
--- a/src/device.c
+++ b/src/device.c
@@ -102,7 +102,7 @@ static int wg_stop(struct net_device *dev)
mutex_lock(&wg->device_update_lock);
list_for_each_entry(peer, &wg->peer_list, peer_list) {
- skb_queue_purge(&peer->staged_packet_queue);
+ wg_packet_purge_staged_packets(peer);
wg_timers_stop(peer);
wg_noise_handshake_clear(&peer->handshake);
wg_noise_keypairs_clear(&peer->keypairs);
@@ -190,8 +190,10 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
* until it's small again. We do this before adding the new packet, so
* we don't remove GSO segments that are in excess.
*/
- while (skb_queue_len(&peer->staged_packet_queue) > MAX_STAGED_PACKETS)
+ while (skb_queue_len(&peer->staged_packet_queue) > MAX_STAGED_PACKETS) {
dev_kfree_skb(__skb_dequeue(&peer->staged_packet_queue));
+ ++dev->stats.tx_dropped;
+ }
skb_queue_splice_tail(&packets, &peer->staged_packet_queue);
spin_unlock_bh(&peer->staged_packet_queue.lock);
diff --git a/src/peer.c b/src/peer.c
index 4a4ec66..e4e2405 100644
--- a/src/peer.c
+++ b/src/peer.c
@@ -192,7 +192,7 @@ static void kref_release(struct kref *refcount)
/* Remove any lingering packets that didn't have a chance to be
* transmitted.
*/
- skb_queue_purge(&peer->staged_packet_queue);
+ wg_packet_purge_staged_packets(peer);
/* Free the memory used. */
call_rcu_bh(&peer->rcu, rcu_release);
diff --git a/src/queueing.h b/src/queueing.h
index ce522d8..60688bc 100644
--- a/src/queueing.h
+++ b/src/queueing.h
@@ -41,6 +41,7 @@ void wg_packet_send_handshake_cookie(struct wg_device *wg,
struct sk_buff *initiating_skb,
__le32 sender_index);
void wg_packet_send_keepalive(struct wg_peer *peer);
+void wg_packet_purge_staged_packets(struct wg_peer *peer);
void wg_packet_send_staged_packets(struct wg_peer *peer);
/* Workqueue workers: */
void wg_packet_handshake_send_worker(struct work_struct *work);
diff --git a/src/send.c b/src/send.c
index 1e97aa1..06996f0 100644
--- a/src/send.c
+++ b/src/send.c
@@ -347,6 +347,14 @@ err:
skb_free_null_queue(first);
}
+void wg_packet_purge_staged_packets(struct wg_peer *peer)
+{
+ spin_lock_bh(&peer->staged_packet_queue.lock);
+ peer->device->dev->stats.tx_dropped += peer->staged_packet_queue.qlen;
+ __skb_queue_purge(&peer->staged_packet_queue);
+ spin_unlock_bh(&peer->staged_packet_queue.lock);
+}
+
void wg_packet_send_staged_packets(struct wg_peer *peer)
{
struct noise_symmetric_key *key;
diff --git a/src/timers.c b/src/timers.c
index 22eb1ee..91ffbbc 100644
--- a/src/timers.c
+++ b/src/timers.c
@@ -71,7 +71,7 @@ static void wg_expired_retransmit_handshake(struct timer_list *timer)
/* We drop all packets without a keypair and don't try again,
* if we try unsuccessfully for too long to make a handshake.
*/
- skb_queue_purge(&peer->staged_packet_queue);
+ wg_packet_purge_staged_packets(peer);
/* We set a timer for destroying any residue that might be left
* of a partial exchange.