diff options
| author | 2024-08-02 14:10:55 +0200 | |
|---|---|---|
| committer | 2024-08-02 14:10:55 +0200 | |
| commit | 4436e6da008fee87d54c038e983e5be9a6baf8fb (patch) | |
| tree | 265a15efcf6f17e0e32e258d66b274fc5cad41d4 /net/unix/garbage.c | |
| parent | x86/mm: Cleanup prctl_enable_tagged_addr() nr_bits error checking (diff) | |
| parent | Linux 6.11-rc1 (diff) | |
| download | wireguard-linux-4436e6da008fee87d54c038e983e5be9a6baf8fb.tar.xz wireguard-linux-4436e6da008fee87d54c038e983e5be9a6baf8fb.zip | |
Merge branch 'linus' into x86/mm
Bring x86 and selftests up to date
Diffstat (limited to 'net/unix/garbage.c')
| -rw-r--r-- | net/unix/garbage.c | 17 |
1 files changed, 6 insertions, 11 deletions
diff --git a/net/unix/garbage.c b/net/unix/garbage.c index dfe94a90ece4..06d94ad999e9 100644 --- a/net/unix/garbage.c +++ b/net/unix/garbage.c @@ -337,11 +337,6 @@ static bool unix_vertex_dead(struct unix_vertex *vertex) return true; } -enum unix_recv_queue_lock_class { - U_RECVQ_LOCK_NORMAL, - U_RECVQ_LOCK_EMBRYO, -}; - static void unix_collect_queue(struct unix_sock *u, struct sk_buff_head *hitlist) { skb_queue_splice_init(&u->sk.sk_receive_queue, hitlist); @@ -375,8 +370,7 @@ static void unix_collect_skb(struct list_head *scc, struct sk_buff_head *hitlist skb_queue_walk(queue, skb) { struct sk_buff_head *embryo_queue = &skb->sk->sk_receive_queue; - /* listener -> embryo order, the inversion never happens. */ - spin_lock_nested(&embryo_queue->lock, U_RECVQ_LOCK_EMBRYO); + spin_lock(&embryo_queue->lock); unix_collect_queue(unix_sk(skb->sk), hitlist); spin_unlock(&embryo_queue->lock); } @@ -476,6 +470,7 @@ prev_vertex: } if (vertex->index == vertex->scc_index) { + struct unix_vertex *v; struct list_head scc; bool scc_dead = true; @@ -486,15 +481,15 @@ prev_vertex: */ __list_cut_position(&scc, &vertex_stack, &vertex->scc_entry); - list_for_each_entry_reverse(vertex, &scc, scc_entry) { + list_for_each_entry_reverse(v, &scc, scc_entry) { /* Don't restart DFS from this vertex in unix_walk_scc(). */ - list_move_tail(&vertex->entry, &unix_visited_vertices); + list_move_tail(&v->entry, &unix_visited_vertices); /* Mark vertex as off-stack. */ - vertex->index = unix_vertex_grouped_index; + v->index = unix_vertex_grouped_index; if (scc_dead) - scc_dead = unix_vertex_dead(vertex); + scc_dead = unix_vertex_dead(v); } if (scc_dead) |
