aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2016-12-11 13:51:34 +0100
committerJason A. Donenfeld <Jason@zx2c4.com>2016-12-11 13:59:39 +0100
commit5c329a7861562511003657ccf1ed95738508139e (patch)
treebb1e342c2c838678b1bf0cd222a564d96112e7da
parentmain: consistent lines (diff)
downloadwireguard-monolithic-historical-5c329a7861562511003657ccf1ed95738508139e.tar.xz
wireguard-monolithic-historical-5c329a7861562511003657ccf1ed95738508139e.zip
device: clean up xmit error path
Diffstat (limited to '')
-rw-r--r--src/device.c25
-rw-r--r--src/packets.h2
-rw-r--r--src/send.c5
3 files changed, 18 insertions, 14 deletions
diff --git a/src/device.c b/src/device.c
index 82eb490..bd58eb8 100644
--- a/src/device.c
+++ b/src/device.c
@@ -137,26 +137,26 @@ static netdev_tx_t xmit(struct sk_buff *skb, struct net_device *dev)
int ret;
if (unlikely(dev_recursion_level() > 4)) {
+ ret = -ELOOP;
net_dbg_ratelimited("Routing loop detected\n");
skb_unsendable(skb, dev);
- return -ELOOP;
+ goto err;
}
peer = routing_table_lookup_dst(&wg->peer_routing_table, skb);
if (unlikely(!peer)) {
+ ret = -ENOKEY;
net_dbg_skb_ratelimited("No peer is configured for %pISc\n", skb);
- skb_unsendable(skb, dev);
- return -ENOKEY;
+ goto err;
}
read_lock_bh(&peer->endpoint_lock);
ret = peer->endpoint.addr_storage.ss_family != AF_INET && peer->endpoint.addr_storage.ss_family != AF_INET6;
read_unlock_bh(&peer->endpoint_lock);
if (unlikely(ret)) {
+ ret = -EHOSTUNREACH;
net_dbg_ratelimited("No valid endpoint has been configured or discovered for peer %Lu\n", peer->internal_id);
- skb_unsendable(skb, dev);
- peer_put(peer);
- return -EHOSTUNREACH;
+ goto err_peer;
}
/* If the queue is getting too big, we start removing the oldest packets until it's small again.
@@ -169,9 +169,8 @@ static netdev_tx_t xmit(struct sk_buff *skb, struct net_device *dev)
else {
struct sk_buff *segs = skb_gso_segment(skb, 0);
if (unlikely(IS_ERR(segs))) {
- skb_unsendable(skb, dev);
- peer_put(peer);
- return PTR_ERR(segs);
+ ret = PTR_ERR(segs);
+ goto err_peer;
}
dev_kfree_skb(skb);
skb = segs;
@@ -192,8 +191,14 @@ static netdev_tx_t xmit(struct sk_buff *skb, struct net_device *dev)
skb = next;
}
- ret = packet_send_queue(peer);
+ packet_send_queue(peer);
+ peer_put(peer);
+ return NETDEV_TX_OK;
+
+err_peer:
peer_put(peer);
+err:
+ skb_unsendable(skb, dev);
return ret;
}
diff --git a/src/packets.h b/src/packets.h
index fbb81df..e36c12c 100644
--- a/src/packets.h
+++ b/src/packets.h
@@ -19,7 +19,7 @@ void packet_receive(struct wireguard_device *wg, struct sk_buff *skb);
void packet_process_queued_handshake_packets(struct work_struct *work);
/* send.c */
-int packet_send_queue(struct wireguard_peer *peer);
+void packet_send_queue(struct wireguard_peer *peer);
void packet_send_keepalive(struct wireguard_peer *peer);
void packet_queue_handshake_initiation(struct wireguard_peer *peer);
void packet_send_queued_handshakes(struct work_struct *work);
diff --git a/src/send.c b/src/send.c
index 5f3f165..6485d34 100644
--- a/src/send.c
+++ b/src/send.c
@@ -138,7 +138,7 @@ static void message_create_data_done(struct sk_buff_head *queue, struct wireguar
packet_send_queue(peer);
}
-int packet_send_queue(struct wireguard_peer *peer)
+void packet_send_queue(struct wireguard_peer *peer)
{
struct sk_buff_head queue;
unsigned long flags;
@@ -152,7 +152,7 @@ int packet_send_queue(struct wireguard_peer *peer)
spin_unlock_irqrestore(&peer->tx_packet_queue.lock, flags);
if (unlikely(!skb_queue_len(&queue)))
- return NETDEV_TX_OK;
+ return;
/* We submit it for encryption and sending. */
switch (packet_create_data(&queue, peer, message_create_data_done)) {
@@ -189,5 +189,4 @@ int packet_send_queue(struct wireguard_peer *peer)
* a reference to the local queue. */
__skb_queue_purge(&queue);
}
- return NETDEV_TX_OK;
}