aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/sock.h
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--include/net/sock.h58
1 files changed, 43 insertions, 15 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index d27e748a8259..324b3ea233d6 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -44,6 +44,7 @@
#include <linux/timer.h>
#include <linux/cache.h>
#include <linux/module.h>
+#include <linux/lockdep.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h> /* struct sk_buff */
#include <linux/security.h>
@@ -78,14 +79,17 @@ typedef struct {
spinlock_t slock;
struct sock_iocb *owner;
wait_queue_head_t wq;
+ /*
+ * We express the mutex-alike socket_lock semantics
+ * to the lock validator by explicitly managing
+ * the slock as a lock variant (in addition to
+ * the slock itself):
+ */
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
} socket_lock_t;
-#define sock_lock_init(__sk) \
-do { spin_lock_init(&((__sk)->sk_lock.slock)); \
- (__sk)->sk_lock.owner = NULL; \
- init_waitqueue_head(&((__sk)->sk_lock.wq)); \
-} while(0)
-
struct sock;
struct proto;
@@ -131,6 +135,7 @@ struct sock_common {
* @sk_receive_queue: incoming packets
* @sk_wmem_alloc: transmit queue bytes committed
* @sk_write_queue: Packet sending queue
+ * @sk_async_wait_queue: DMA copied packets
* @sk_omem_alloc: "o" is "option" or "other"
* @sk_wmem_queued: persistent queue size
* @sk_forward_alloc: space allocated forward
@@ -139,6 +144,7 @@ struct sock_common {
* @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, %SO_OOBINLINE settings
* @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets
* @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
+ * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
* @sk_lingertime: %SO_LINGER l_linger setting
* @sk_backlog: always used with the per-socket spinlock held
* @sk_callback_lock: used with the callbacks in the end of this struct
@@ -204,11 +210,13 @@ struct sock {
atomic_t sk_omem_alloc;
struct sk_buff_head sk_receive_queue;
struct sk_buff_head sk_write_queue;
+ struct sk_buff_head sk_async_wait_queue;
int sk_wmem_queued;
int sk_forward_alloc;
gfp_t sk_allocation;
int sk_sndbuf;
int sk_route_caps;
+ int sk_gso_type;
int sk_rcvlowat;
unsigned long sk_flags;
unsigned long sk_lingertime;
@@ -381,7 +389,6 @@ enum sock_flags {
SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */
SOCK_DBG, /* %SO_DEBUG setting */
SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */
- SOCK_NO_LARGESEND, /* whether to sent large segments or not */
SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
};
@@ -744,6 +751,9 @@ extern void FASTCALL(release_sock(struct sock *sk));
/* BH context may only use the following locking interface. */
#define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock))
+#define bh_lock_sock_nested(__sk) \
+ spin_lock_nested(&((__sk)->sk_lock.slock), \
+ SINGLE_DEPTH_NESTING)
#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
extern struct sock *sk_alloc(int family,
@@ -870,10 +880,7 @@ static inline int sk_filter(struct sock *sk, struct sk_buff *skb, int needlock)
if (filter) {
unsigned int pkt_len = sk_run_filter(skb, filter->insns,
filter->len);
- if (!pkt_len)
- err = -EPERM;
- else
- skb_trim(skb, pkt_len);
+ err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
}
if (needlock)
@@ -1027,13 +1034,22 @@ extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
+static inline int sk_can_gso(const struct sock *sk)
+{
+ return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
+}
+
static inline void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
{
__sk_dst_set(sk, dst);
sk->sk_route_caps = dst->dev->features;
- if (sk->sk_route_caps & NETIF_F_TSO) {
- if (sock_flag(sk, SOCK_NO_LARGESEND) || dst->header_len)
- sk->sk_route_caps &= ~NETIF_F_TSO;
+ if (sk->sk_route_caps & NETIF_F_GSO)
+ sk->sk_route_caps |= NETIF_F_GSO_MASK;
+ if (sk_can_gso(sk)) {
+ if (dst->header_len)
+ sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
+ else
+ sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
}
}
@@ -1266,15 +1282,27 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
* sk_eat_skb - Release a skb if it is no longer needed
* @sk: socket to eat this skb from
* @skb: socket buffer to eat
+ * @copied_early: flag indicating whether DMA operations copied this data early
*
* This routine must be called with interrupts disabled or with the socket
* locked so that the sk_buff queue operation is ok.
*/
-static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
+#ifdef CONFIG_NET_DMA
+static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
+{
+ __skb_unlink(skb, &sk->sk_receive_queue);
+ if (!copied_early)
+ __kfree_skb(skb);
+ else
+ __skb_queue_tail(&sk->sk_async_wait_queue, skb);
+}
+#else
+static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
{
__skb_unlink(skb, &sk->sk_receive_queue);
__kfree_skb(skb);
}
+#endif
extern void sock_enable_timestamp(struct sock *sk);
extern int sock_get_timestamp(struct sock *, struct timeval __user *);