aboutsummaryrefslogtreecommitdiffstats
path: root/include/net
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2015-10-08 19:33:23 -0700
committerDavid S. Miller <davem@davemloft.net>2015-10-12 19:28:22 -0700
commited53d0ab761f5c71d77c8dc05fd19c0a851200db (patch)
treee8020f7b24c958497a92b38fbf0dd89e99b8261e /include/net
parentnet: align sk_refcnt on 128 bytes boundary (diff)
downloadlinux-dev-ed53d0ab761f5c71d77c8dc05fd19c0a851200db.tar.xz
linux-dev-ed53d0ab761f5c71d77c8dc05fd19c0a851200db.zip
net: shrink struct sock and request_sock by 8 bytes
One 32bit hole is following skc_refcnt, use it. skc_incoming_cpu can also be an union for request_sock rcv_wnd. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net')
-rw-r--r--include/net/request_sock.h5
-rw-r--r--include/net/sock.h14
2 files changed, 11 insertions, 8 deletions
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 6b818b77d5e5..2e73748956d5 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -51,15 +51,14 @@ struct request_sock {
#define rsk_refcnt __req_common.skc_refcnt
#define rsk_hash __req_common.skc_hash
#define rsk_listener __req_common.skc_listener
+#define rsk_window_clamp __req_common.skc_window_clamp
+#define rsk_rcv_wnd __req_common.skc_rcv_wnd
struct request_sock *dl_next;
u16 mss;
u8 num_retrans; /* number of retransmits */
u8 cookie_ts:1; /* syncookie: encode tcpopts in timestamp */
u8 num_timeout:7; /* number of timeouts */
- /* The following two fields can be easily recomputed I think -AK */
- u32 window_clamp; /* window clamp at creation time */
- u32 rcv_wnd; /* rcv_wnd offered first time */
u32 ts_recent;
struct timer_list rsk_timer;
const struct request_sock_ops *rsk_ops;
diff --git a/include/net/sock.h b/include/net/sock.h
index 65712409464b..19cfe1fc911c 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -226,11 +226,18 @@ struct sock_common {
struct hlist_nulls_node skc_nulls_node;
};
int skc_tx_queue_mapping;
- int skc_incoming_cpu;
+ union {
+ int skc_incoming_cpu;
+ u32 skc_rcv_wnd;
+ };
atomic_t skc_refcnt;
/* private: */
int skc_dontcopy_end[0];
+ union {
+ u32 skc_rxhash;
+ u32 skc_window_clamp;
+ };
/* public: */
};
@@ -287,7 +294,6 @@ struct cg_proto;
* @sk_rcvlowat: %SO_RCVLOWAT setting
* @sk_rcvtimeo: %SO_RCVTIMEO setting
* @sk_sndtimeo: %SO_SNDTIMEO setting
- * @sk_rxhash: flow hash received from netif layer
* @sk_txhash: computed flow hash for use on transmit
* @sk_filter: socket filtering instructions
* @sk_timer: sock cleanup timer
@@ -346,6 +352,7 @@ struct sock {
#define sk_cookie __sk_common.skc_cookie
#define sk_incoming_cpu __sk_common.skc_incoming_cpu
#define sk_flags __sk_common.skc_flags
+#define sk_rxhash __sk_common.skc_rxhash
socket_lock_t sk_lock;
struct sk_buff_head sk_receive_queue;
@@ -365,9 +372,6 @@ struct sock {
} sk_backlog;
#define sk_rmem_alloc sk_backlog.rmem_alloc
int sk_forward_alloc;
-#ifdef CONFIG_RPS
- __u32 sk_rxhash;
-#endif
__u32 sk_txhash;
#ifdef CONFIG_NET_RX_BUSY_POLL