aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@ghostprotocols.net>2005-08-09 19:33:31 -0700
committerDavid S. Miller <davem@sunset.davemloft.net>2005-08-29 15:32:11 -0700
commit83e3609eba3818f6e18b8bf9442195169ac306b7 (patch)
treeb0dd71b7e5ea6e8b17813cf6b3736a1b0f443ab7
parent[NETFILTER]: Add ctnetlink subsystem (diff)
downloadlinux-dev-83e3609eba3818f6e18b8bf9442195169ac306b7.tar.xz
linux-dev-83e3609eba3818f6e18b8bf9442195169ac306b7.zip
[REQSK]: Move the syn_table destroy from tcp_listen_stop to reqsk_queue_destroy
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/request_sock.h5
-rw-r--r--net/core/request_sock.c26
-rw-r--r--net/ipv4/tcp.c35
3 files changed, 40 insertions, 26 deletions
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 72fd6f5e86b1..334717bf9ef6 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -89,6 +89,7 @@ struct listen_sock {
int qlen_young;
int clock_hand;
u32 hash_rnd;
+ u32 nr_table_entries;
struct request_sock *syn_table[0];
};
@@ -129,11 +130,13 @@ static inline struct listen_sock *reqsk_queue_yank_listen_sk(struct request_sock
return lopt;
}
-static inline void reqsk_queue_destroy(struct request_sock_queue *queue)
+static inline void __reqsk_queue_destroy(struct request_sock_queue *queue)
{
kfree(reqsk_queue_yank_listen_sk(queue));
}
+extern void reqsk_queue_destroy(struct request_sock_queue *queue);
+
static inline struct request_sock *
reqsk_queue_yank_acceptq(struct request_sock_queue *queue)
{
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index bb55675f0685..4e99ce5c08f2 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -53,6 +53,7 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd));
rwlock_init(&queue->syn_wait_lock);
queue->rskq_accept_head = queue->rskq_accept_head = NULL;
+ lopt->nr_table_entries = nr_table_entries;
write_lock_bh(&queue->syn_wait_lock);
queue->listen_opt = lopt;
@@ -62,3 +63,28 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
}
EXPORT_SYMBOL(reqsk_queue_alloc);
+
+void reqsk_queue_destroy(struct request_sock_queue *queue)
+{
+ /* make all the listen_opt local to us */
+ struct listen_sock *lopt = reqsk_queue_yank_listen_sk(queue);
+
+ if (lopt->qlen != 0) {
+ int i;
+
+ for (i = 0; i < lopt->nr_table_entries; i++) {
+ struct request_sock *req;
+
+ while ((req = lopt->syn_table[i]) != NULL) {
+ lopt->syn_table[i] = req->dl_next;
+ lopt->qlen--;
+ reqsk_free(req);
+ }
+ }
+ }
+
+ BUG_TRAP(lopt->qlen == 0);
+ kfree(lopt);
+}
+
+EXPORT_SYMBOL(reqsk_queue_destroy);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index d2696af46c70..42a2e2ccd430 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -487,7 +487,7 @@ int tcp_listen_start(struct sock *sk)
}
sk->sk_state = TCP_CLOSE;
- reqsk_queue_destroy(&tp->accept_queue);
+ __reqsk_queue_destroy(&tp->accept_queue);
return -EADDRINUSE;
}
@@ -499,38 +499,23 @@ int tcp_listen_start(struct sock *sk)
static void tcp_listen_stop (struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
- struct listen_sock *lopt;
struct request_sock *acc_req;
struct request_sock *req;
- int i;
tcp_delete_keepalive_timer(sk);
/* make all the listen_opt local to us */
- lopt = reqsk_queue_yank_listen_sk(&tp->accept_queue);
acc_req = reqsk_queue_yank_acceptq(&tp->accept_queue);
- if (lopt->qlen) {
- for (i = 0; i < TCP_SYNQ_HSIZE; i++) {
- while ((req = lopt->syn_table[i]) != NULL) {
- lopt->syn_table[i] = req->dl_next;
- lopt->qlen--;
- reqsk_free(req);
-
- /* Following specs, it would be better either to send FIN
- * (and enter FIN-WAIT-1, it is normal close)
- * or to send active reset (abort).
- * Certainly, it is pretty dangerous while synflood, but it is
- * bad justification for our negligence 8)
- * To be honest, we are not able to make either
- * of the variants now. --ANK
- */
- }
- }
- }
- BUG_TRAP(!lopt->qlen);
-
- kfree(lopt);
+ /* Following specs, it would be better either to send FIN
+ * (and enter FIN-WAIT-1, it is normal close)
+ * or to send active reset (abort).
+ * Certainly, it is pretty dangerous while synflood, but it is
+ * bad justification for our negligence 8)
+ * To be honest, we are not able to make either
+ * of the variants now. --ANK
+ */
+ reqsk_queue_destroy(&tp->accept_queue);
while ((req = acc_req) != NULL) {
struct sock *child = req->sk;