aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2018-06-13 03:40:37 +0200
committerJason A. Donenfeld <Jason@zx2c4.com>2018-06-13 15:05:46 +0200
commit58b47c6907be93ea458f52b5c3dd2edc55f6e495 (patch)
treebb6cb9e851b66afe58fa04b43d68299ea0b35dba
parenttools: support getentropy(3) (diff)
downloadwireguard-monolithic-historical-58b47c6907be93ea458f52b5c3dd2edc55f6e495.tar.xz
wireguard-monolithic-historical-58b47c6907be93ea458f52b5c3dd2edc55f6e495.zip
skb_reset: do not free socket memory with preemption disabled
It turns out that calling skb_orphan (via skb_scrub_packet(xnet=true)) might result in a call to schedule(), since it will wake up waiting socket writers in userspace. This means that everything explodes, since we're calling this with preemption disabled. Work around this by removing the spinlocks in the consumers -- they're the sole consumers and so don't need the spinlocks anyway -- and then move skb_orphan to the consumption thread, which doesn't have preemption disabled. (When we move to a proper lockless mpmc ring buffer, we won't even have spinlocks anyway.) Since we don't actually need to call skb_orphan except on tx, so that net_cls does the right thing (see net/netfilter/xt_cgroup.c and its check of skb->sk), just explictly orphan the packet in the one place where that's actually necessary, and otherwise don't orphan it. This leaves us without a call to skb_orphan on the rx path, where we don't need it and can't use it anyway because preemption is disabled for netif_receive_skb. This effectively reworks 740319127f14793a13ad385e8150cd98c715c20c.
-rw-r--r--src/queueing.c2
-rw-r--r--src/queueing.h3
-rw-r--r--src/receive.c2
-rw-r--r--src/send.c3
4 files changed, 4 insertions, 6 deletions
diff --git a/src/queueing.c b/src/queueing.c
index 85dea6b..f33395e 100644
--- a/src/queueing.c
+++ b/src/queueing.c
@@ -41,6 +41,6 @@ void packet_queue_free(struct crypt_queue *queue, bool multicore)
{
if (multicore)
free_percpu(queue->worker);
- WARN_ON(!ptr_ring_empty_bh(&queue->ring));
+ WARN_ON(!__ptr_ring_empty(&queue->ring));
ptr_ring_cleanup(&queue->ring, NULL);
}
diff --git a/src/queueing.h b/src/queueing.h
index 0057cfa..af60a31 100644
--- a/src/queueing.h
+++ b/src/queueing.h
@@ -65,7 +65,7 @@ static inline __be16 skb_examine_untrusted_ip_hdr(struct sk_buff *skb)
static inline void skb_reset(struct sk_buff *skb)
{
const int pfmemalloc = skb->pfmemalloc;
- skb_scrub_packet(skb, true);
+ skb_scrub_packet(skb, false);
memset(&skb->headers_start, 0, offsetof(struct sk_buff, headers_end) - offsetof(struct sk_buff, headers_start));
skb->pfmemalloc = pfmemalloc;
skb->queue_mapping = 0;
@@ -77,6 +77,7 @@ static inline void skb_reset(struct sk_buff *skb)
skb->tc_index = 0;
skb_reset_tc(skb);
#endif
+ ipvs_reset(skb);
skb->hdr_len = skb_headroom(skb);
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
diff --git a/src/receive.c b/src/receive.c
index 27d3d04..99d6b19 100644
--- a/src/receive.c
+++ b/src/receive.c
@@ -378,7 +378,6 @@ void packet_rx_worker(struct work_struct *work)
bool free;
local_bh_disable();
- spin_lock_bh(&queue->ring.consumer_lock);
while ((skb = __ptr_ring_peek(&queue->ring)) != NULL && (state = atomic_read(&PACKET_CB(skb)->state)) != PACKET_STATE_UNCRYPTED) {
__ptr_ring_discard_one(&queue->ring);
peer = PACKET_PEER(skb);
@@ -406,7 +405,6 @@ next:
if (unlikely(free))
dev_kfree_skb(skb);
}
- spin_unlock_bh(&queue->ring.consumer_lock);
local_bh_enable();
}
diff --git a/src/send.c b/src/send.c
index 6e04ad4..e96ad52 100644
--- a/src/send.c
+++ b/src/send.c
@@ -204,6 +204,7 @@ static void packet_create_data_done(struct sk_buff *first, struct wireguard_peer
timers_any_authenticated_packet_traversal(peer);
timers_any_authenticated_packet_sent(peer);
skb_walk_null_queue_safe(first, skb, next) {
+ skb_orphan(skb);
is_keepalive = skb->len == message_data_len(0);
if (likely(!socket_send_skb_to_peer(peer, skb, PACKET_CB(skb)->ds) && !is_keepalive))
data_sent = true;
@@ -223,7 +224,6 @@ void packet_tx_worker(struct work_struct *work)
struct sk_buff *first;
enum packet_state state;
- spin_lock_bh(&queue->ring.consumer_lock);
while ((first = __ptr_ring_peek(&queue->ring)) != NULL && (state = atomic_read(&PACKET_CB(first)->state)) != PACKET_STATE_UNCRYPTED) {
__ptr_ring_discard_one(&queue->ring);
peer = PACKET_PEER(first);
@@ -237,7 +237,6 @@ void packet_tx_worker(struct work_struct *work)
noise_keypair_put(keypair);
peer_put(peer);
}
- spin_unlock_bh(&queue->ring.consumer_lock);
}
void packet_encrypt_worker(struct work_struct *work)