aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2015-10-02 11:43:24 -0700
committerDavid S. Miller <davem@davemloft.net>2015-10-03 04:32:36 -0700
commitaac065c50aba0c534a929aeb687eb68c58e523b8 (patch)
treeb8817f56e96322e7c855653fe63eced1256c8356 /net
parenttcp: add a spinlock to protect struct request_sock_queue (diff)
downloadlinux-dev-aac065c50aba0c534a929aeb687eb68c58e523b8.tar.xz
linux-dev-aac065c50aba0c534a929aeb687eb68c58e523b8.zip
tcp: move qlen/young out of struct listen_sock
qlen_inc & young_inc were protected by listener lock, while qlen_dec & young_dec were atomic fields. Everything needs to be atomic for upcoming lockless listener. Also move qlen/young in request_sock_queue as we'll get rid of struct listen_sock eventually. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/request_sock.c8
-rw-r--r--net/ipv4/inet_connection_sock.c6
-rw-r--r--net/ipv4/inet_diag.c2
3 files changed, 8 insertions, 8 deletions
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index 8d9fd31d3d06..5ca624cea04c 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -102,7 +102,7 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
/* make all the listen_opt local to us */
struct listen_sock *lopt = reqsk_queue_yank_listen_sk(queue);
- if (listen_sock_qlen(lopt) != 0) {
+ if (reqsk_queue_len(queue) != 0) {
unsigned int i;
for (i = 0; i < lopt->nr_table_entries; i++) {
@@ -116,7 +116,7 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
* or risk a dead lock.
*/
spin_unlock_bh(&queue->syn_wait_lock);
- atomic_inc(&lopt->qlen_dec);
+ atomic_dec(&queue->qlen);
if (del_timer_sync(&req->rsk_timer))
reqsk_put(req);
reqsk_put(req);
@@ -126,8 +126,8 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
}
}
- if (WARN_ON(listen_sock_qlen(lopt) != 0))
- pr_err("qlen %u\n", listen_sock_qlen(lopt));
+ if (WARN_ON(reqsk_queue_len(queue) != 0))
+ pr_err("qlen %u\n", reqsk_queue_len(queue));
kvfree(lopt);
}
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 0085612b9e49..093ef04e6ebf 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -640,9 +640,9 @@ static void reqsk_timer_handler(unsigned long data)
* embrions; and abort old ones without pity, if old
* ones are about to clog our table.
*/
- qlen = listen_sock_qlen(lopt);
+ qlen = reqsk_queue_len(queue);
if (qlen >> (lopt->max_qlen_log - 1)) {
- int young = listen_sock_young(lopt) << 1;
+ int young = reqsk_queue_len_young(queue) << 1;
while (thresh > 2) {
if (qlen < young)
@@ -664,7 +664,7 @@ static void reqsk_timer_handler(unsigned long data)
unsigned long timeo;
if (req->num_timeout++ == 0)
- atomic_inc(&lopt->young_dec);
+ atomic_dec(&queue->young);
timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
mod_timer_pinned(&req->rsk_timer, jiffies + timeo);
return;
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index c3b1f3a0f4cf..0ac1d68dc8a6 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -753,7 +753,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
spin_lock(&icsk->icsk_accept_queue.syn_wait_lock);
lopt = icsk->icsk_accept_queue.listen_opt;
- if (!lopt || !listen_sock_qlen(lopt))
+ if (!lopt || !reqsk_queue_len(&icsk->icsk_accept_queue))
goto out;
if (bc) {