aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-09-29 16:53:10 -0700
committerDavid S. Miller <davem@davemloft.net>2015-09-29 16:53:10 -0700
commite6934f3ec00b04234acb24a1a2c28af59763d3b5 (patch)
tree335da15ef581d750ada2ba11e6ca6900ba82ae86 /include
parentMerge branch 'ipv4-routing-cleanups' (diff)
parenttcp: prepare fastopen code for upcoming listener changes (diff)
downloadlinux-dev-e6934f3ec00b04234acb24a1a2c28af59763d3b5.tar.xz
linux-dev-e6934f3ec00b04234acb24a1a2c28af59763d3b5.zip
Merge branch 'listener-refactoring-preparations'
Eric Dumazet says: ==================== tcp: listener refactoring preparations This patch series makes changes to TCP/DCCP stacks so that we can switch listener code to lockless mode. This is done by marking const the listener socket in all appropriate paths. FastOpen code had to be changed to not dynamically allocate a very small structure to make code simpler for following changes. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r--include/linux/tcp.h22
-rw-r--r--include/net/inet6_connection_sock.h2
-rw-r--r--include/net/inet_connection_sock.h5
-rw-r--r--include/net/inet_hashtables.h2
-rw-r--r--include/net/request_sock.h16
-rw-r--r--include/net/sock.h2
-rw-r--r--include/net/tcp.h28
7 files changed, 30 insertions, 47 deletions
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index fcb573be75d9..e442e6e9a365 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -382,25 +382,11 @@ static inline bool tcp_passive_fastopen(const struct sock *sk)
tcp_sk(sk)->fastopen_rsk != NULL);
}
-extern void tcp_sock_destruct(struct sock *sk);
-
-static inline int fastopen_init_queue(struct sock *sk, int backlog)
+static inline void fastopen_queue_tune(struct sock *sk, int backlog)
{
- struct request_sock_queue *queue =
- &inet_csk(sk)->icsk_accept_queue;
-
- if (queue->fastopenq == NULL) {
- queue->fastopenq = kzalloc(
- sizeof(struct fastopen_queue),
- sk->sk_allocation);
- if (queue->fastopenq == NULL)
- return -ENOMEM;
-
- sk->sk_destruct = tcp_sock_destruct;
- spin_lock_init(&queue->fastopenq->lock);
- }
- queue->fastopenq->max_qlen = backlog;
- return 0;
+ struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
+
+ queue->fastopenq.max_qlen = backlog;
}
static inline void tcp_saved_syn_free(struct tcp_sock *tp)
diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h
index 81d937e820c4..79b2a4c09ca6 100644
--- a/include/net/inet6_connection_sock.h
+++ b/include/net/inet6_connection_sock.h
@@ -26,7 +26,7 @@ int inet6_csk_bind_conflict(const struct sock *sk,
const struct inet_bind_bucket *tb, bool relax);
struct dst_entry *inet6_csk_route_req(const struct sock *sk, struct flowi6 *fl6,
- const struct request_sock *req);
+ const struct request_sock *req, u8 proto);
struct request_sock *inet6_csk_search_req(struct sock *sk,
const __be16 rport,
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 00c3ced6ee55..ee54f21a8113 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -41,7 +41,7 @@ struct inet_connection_sock_af_ops {
int (*rebuild_header)(struct sock *sk);
void (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb);
int (*conn_request)(struct sock *sk, struct sk_buff *skb);
- struct sock *(*syn_recv_sock)(struct sock *sk, struct sk_buff *skb,
+ struct sock *(*syn_recv_sock)(const struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
struct dst_entry *dst);
u16 net_header_len;
@@ -268,7 +268,8 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum);
struct dst_entry *inet_csk_route_req(const struct sock *sk, struct flowi4 *fl4,
const struct request_sock *req);
-struct dst_entry *inet_csk_route_child_sock(struct sock *sk, struct sock *newsk,
+struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
+ struct sock *newsk,
const struct request_sock *req);
static inline void inet_csk_reqsk_queue_add(struct sock *sk,
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index b07d126694a7..3fb778d7c875 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -199,7 +199,7 @@ static inline int inet_sk_listen_hashfn(const struct sock *sk)
}
/* Caller must disable local BH processing. */
-int __inet_inherit_port(struct sock *sk, struct sock *child);
+int __inet_inherit_port(const struct sock *sk, struct sock *child);
void inet_put_port(struct sock *sk);
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 181f97f9fe1c..d2544de329bd 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -34,9 +34,9 @@ struct request_sock_ops {
char *slab_name;
int (*rtx_syn_ack)(const struct sock *sk,
struct request_sock *req);
- void (*send_ack)(struct sock *sk, struct sk_buff *skb,
+ void (*send_ack)(const struct sock *sk, struct sk_buff *skb,
struct request_sock *req);
- void (*send_reset)(struct sock *sk,
+ void (*send_reset)(const struct sock *sk,
struct sk_buff *skb);
void (*destructor)(struct request_sock *req);
void (*syn_ack_timeout)(const struct request_sock *req);
@@ -129,9 +129,8 @@ struct listen_sock {
atomic_t qlen_dec; /* qlen = qlen_inc - qlen_dec */
atomic_t young_dec;
- u8 max_qlen_log ____cacheline_aligned_in_smp;
- u8 synflood_warned;
- /* 2 bytes hole, try to use */
+ u32 max_qlen_log ____cacheline_aligned_in_smp;
+ u32 synflood_warned;
u32 hash_rnd;
u32 nr_table_entries;
struct request_sock *syn_table[0];
@@ -181,11 +180,8 @@ struct request_sock_queue {
struct request_sock *rskq_accept_tail;
u8 rskq_defer_accept;
struct listen_sock *listen_opt;
- struct fastopen_queue *fastopenq; /* This is non-NULL iff TFO has been
- * enabled on this listener. Check
- * max_qlen != 0 in fastopen_queue
- * to determine if TFO is enabled
- * right at this moment.
+ struct fastopen_queue fastopenq; /* Check max_qlen != 0 to determine
+ * if TFO is enabled.
*/
/* temporary alignment, our goal is to get rid of this lock */
diff --git a/include/net/sock.h b/include/net/sock.h
index 94dff7f566f5..dfe2eb8e1132 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -759,7 +759,7 @@ static inline int sk_memalloc_socks(void)
#endif
-static inline gfp_t sk_gfp_atomic(struct sock *sk, gfp_t gfp_mask)
+static inline gfp_t sk_gfp_atomic(const struct sock *sk, gfp_t gfp_mask)
{
return GFP_ATOMIC | (sk->sk_allocation & __GFP_MEMALLOC);
}
diff --git a/include/net/tcp.h b/include/net/tcp.h
index cdbf63d3c5cf..2c7dfe52f473 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -365,8 +365,7 @@ void tcp_wfree(struct sk_buff *skb);
void tcp_write_timer_handler(struct sock *sk);
void tcp_delack_timer_handler(struct sock *sk);
int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
-int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
- const struct tcphdr *th, unsigned int len);
+int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
const struct tcphdr *th, unsigned int len);
void tcp_rcv_space_adjust(struct sock *sk);
@@ -451,11 +450,11 @@ void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
void tcp_v4_mtu_reduced(struct sock *sk);
void tcp_req_err(struct sock *sk, u32 seq);
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
-struct sock *tcp_create_openreq_child(struct sock *sk,
+struct sock *tcp_create_openreq_child(const struct sock *sk,
struct request_sock *req,
struct sk_buff *skb);
void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
-struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
+struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
struct dst_entry *dst);
int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
@@ -492,8 +491,9 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
/* syncookies: remember time of last synqueue overflow
* But do not dirty this field too often (once per second is enough)
+ * It is racy as we do not hold a lock, but race is very minor.
*/
-static inline void tcp_synq_overflow(struct sock *sk)
+static inline void tcp_synq_overflow(const struct sock *sk)
{
unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
unsigned long now = jiffies;
@@ -520,8 +520,7 @@ static inline u32 tcp_cookie_time(void)
u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
u16 *mssp);
-__u32 cookie_v4_init_sequence(struct sock *sk, const struct sk_buff *skb,
- __u16 *mss);
+__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
__u32 cookie_init_timestamp(struct request_sock *req);
bool cookie_timestamp_decode(struct tcp_options_received *opt);
bool cookie_ecn_ok(const struct tcp_options_received *opt,
@@ -534,8 +533,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
const struct tcphdr *th, u16 *mssp);
-__u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb,
- __u16 *mss);
+__u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
#endif
/* tcp_output.c */
@@ -1710,10 +1708,10 @@ struct tcp_request_sock_ops {
const struct sock *sk_listener,
struct sk_buff *skb);
#ifdef CONFIG_SYN_COOKIES
- __u32 (*cookie_init_seq)(struct sock *sk, const struct sk_buff *skb,
+ __u32 (*cookie_init_seq)(const struct sk_buff *skb,
__u16 *mss);
#endif
- struct dst_entry *(*route_req)(struct sock *sk, struct flowi *fl,
+ struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
const struct request_sock *req,
bool *strict);
__u32 (*init_seq)(const struct sk_buff *skb);
@@ -1726,14 +1724,16 @@ struct tcp_request_sock_ops {
#ifdef CONFIG_SYN_COOKIES
static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
- struct sock *sk, struct sk_buff *skb,
+ const struct sock *sk, struct sk_buff *skb,
__u16 *mss)
{
- return ops->cookie_init_seq(sk, skb, mss);
+ tcp_synq_overflow(sk);
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
+ return ops->cookie_init_seq(skb, mss);
}
#else
static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
- struct sock *sk, struct sk_buff *skb,
+ const struct sock *sk, struct sk_buff *skb,
__u16 *mss)
{
return 0;