aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/brcmphy.h7
-rw-r--r--include/linux/caif/caif_socket.h34
-rw-r--r--include/linux/eeprom_93cx6.h1
-rw-r--r--include/linux/ethtool.h17
-rw-r--r--include/linux/filter.h48
-rw-r--r--include/linux/if.h2
-rw-r--r--include/linux/if_macvlan.h19
-rw-r--r--include/linux/in.h1
-rw-r--r--include/linux/netdevice.h15
-rw-r--r--include/linux/netpoll.h24
-rw-r--r--include/linux/nl80211.h22
-rw-r--r--include/linux/skbuff.h8
-rw-r--r--include/linux/socket.h5
-rw-r--r--include/linux/u64_stats_sync.h140
-rw-r--r--include/linux/user_namespace.h14
-rw-r--r--include/net/af_unix.h4
-rw-r--r--include/net/caif/caif_dev.h8
-rw-r--r--include/net/caif/caif_layer.h6
-rw-r--r--include/net/caif/caif_spi.h153
-rw-r--r--include/net/caif/cfcnfg.h16
-rw-r--r--include/net/caif/cfsrvl.h15
-rw-r--r--include/net/cfg80211.h17
-rw-r--r--include/net/inet_frag.h1
-rw-r--r--include/net/inet_sock.h3
-rw-r--r--include/net/inetpeer.h30
-rw-r--r--include/net/ip.h22
-rw-r--r--include/net/ipv6.h12
-rw-r--r--include/net/mac80211.h113
-rw-r--r--include/net/neighbour.h2
-rw-r--r--include/net/netlink.h2
-rw-r--r--include/net/sch_generic.h11
-rw-r--r--include/net/scm.h30
-rw-r--r--include/net/snmp.h77
-rw-r--r--include/net/sock.h3
-rw-r--r--include/net/tcp.h26
35 files changed, 760 insertions, 148 deletions
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 7f437ca1ed44..b840a4960282 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -1,6 +1,13 @@
#define PHY_ID_BCM50610 0x0143bd60
#define PHY_ID_BCM50610M 0x0143bd70
+#define PHY_ID_BCM5241 0x0143bc30
#define PHY_ID_BCMAC131 0x0143bc70
+#define PHY_ID_BCM5481 0x0143bca0
+#define PHY_ID_BCM5482 0x0143bcb0
+#define PHY_ID_BCM5411 0x00206070
+#define PHY_ID_BCM5421 0x002060e0
+#define PHY_ID_BCM5464 0x002060b0
+#define PHY_ID_BCM5461 0x002060c0
#define PHY_ID_BCM57780 0x03625d90
#define PHY_BCM_OUI_MASK 0xfffffc00
diff --git a/include/linux/caif/caif_socket.h b/include/linux/caif/caif_socket.h
index 2a61eb1beb85..d9cb19b7cff7 100644
--- a/include/linux/caif/caif_socket.h
+++ b/include/linux/caif/caif_socket.h
@@ -62,6 +62,7 @@ enum caif_channel_priority {
* @CAIFPROTO_DATAGRAM_LOOP: Datagram loopback channel, used for testing.
* @CAIFPROTO_UTIL: Utility (Psock) channel.
* @CAIFPROTO_RFM: Remote File Manager
+ * @CAIFPROTO_DEBUG: Debug link
*
* This enum defines the CAIF Channel type to be used. This defines
* the service to connect to on the modem.
@@ -72,6 +73,7 @@ enum caif_protocol_type {
CAIFPROTO_DATAGRAM_LOOP,
CAIFPROTO_UTIL,
CAIFPROTO_RFM,
+ CAIFPROTO_DEBUG,
_CAIFPROTO_MAX
};
#define CAIFPROTO_MAX _CAIFPROTO_MAX
@@ -83,6 +85,28 @@ enum caif_protocol_type {
enum caif_at_type {
CAIF_ATTYPE_PLAIN = 2
};
+ /**
+ * enum caif_debug_type - Content selection for debug connection
+ * @CAIF_DEBUG_TRACE_INTERACTIVE: Connection will contain
+ * both trace and interactive debug.
+ * @CAIF_DEBUG_TRACE: Connection contains trace only.
+ * @CAIF_DEBUG_INTERACTIVE: Connection to interactive debug.
+ */
+enum caif_debug_type {
+ CAIF_DEBUG_TRACE_INTERACTIVE = 0,
+ CAIF_DEBUG_TRACE,
+ CAIF_DEBUG_INTERACTIVE,
+};
+
+/**
+ * enum caif_debug_service - Debug Service Endpoint
+ * @CAIF_RADIO_DEBUG_SERVICE: Debug service on the Radio sub-system
+ * @CAIF_APP_DEBUG_SERVICE: Debug for the applications sub-system
+ */
+enum caif_debug_service {
+ CAIF_RADIO_DEBUG_SERVICE = 1,
+ CAIF_APP_DEBUG_SERVICE
+};
/**
* struct sockaddr_caif - the sockaddr structure for CAIF sockets.
@@ -109,6 +133,12 @@ enum caif_at_type {
*
* @u.rfm.volume: Volume to mount.
*
+ * @u.dbg: Applies when family = CAIFPROTO_DEBUG.
+ *
+ * @u.dbg.type: Type of debug connection to set up
+ * (caif_debug_type).
+ *
+ * @u.dbg.service: Service sub-system to connect (caif_debug_service
* Description:
* This structure holds the connect parameters used for setting up a
* CAIF Channel. It defines the service to connect to on the modem.
@@ -130,6 +160,10 @@ struct sockaddr_caif {
__u32 connection_id;
char volume[16];
} rfm; /* CAIFPROTO_RFM */
+ struct {
+ __u8 type; /* type:enum caif_debug_type */
+ __u8 service; /* service:caif_debug_service */
+ } dbg; /* CAIFPROTO_DEBUG */
} u;
};
diff --git a/include/linux/eeprom_93cx6.h b/include/linux/eeprom_93cx6.h
index a55c873e8b66..c4627cbdb8e0 100644
--- a/include/linux/eeprom_93cx6.h
+++ b/include/linux/eeprom_93cx6.h
@@ -30,6 +30,7 @@
#define PCI_EEPROM_WIDTH_93C46 6
#define PCI_EEPROM_WIDTH_93C56 8
#define PCI_EEPROM_WIDTH_93C66 8
+#define PCI_EEPROM_WIDTH_93C86 8
#define PCI_EEPROM_WIDTH_OPCODE 3
#define PCI_EEPROM_WRITE_OPCODE 0x05
#define PCI_EEPROM_READ_OPCODE 0x06
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 2c8af093d8b3..c1be61f3938b 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -384,6 +384,15 @@ struct ethtool_rxnfc {
__u32 rule_locs[0];
};
+struct ethtool_rxfh_indir {
+ __u32 cmd;
+ /* On entry, this is the array size of the user buffer. On
+ * return from ETHTOOL_GRXFHINDIR, this is the array size of
+ * the hardware indirection table. */
+ __u32 size;
+ __u32 ring_index[0]; /* ring/queue index for each hash value */
+};
+
struct ethtool_rx_ntuple_flow_spec {
__u32 flow_type;
union {
@@ -457,7 +466,7 @@ int ethtool_op_set_tso(struct net_device *dev, u32 data);
u32 ethtool_op_get_ufo(struct net_device *dev);
int ethtool_op_set_ufo(struct net_device *dev, u32 data);
u32 ethtool_op_get_flags(struct net_device *dev);
-int ethtool_op_set_flags(struct net_device *dev, u32 data);
+int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported);
void ethtool_ntuple_flush(struct net_device *dev);
/**
@@ -576,6 +585,10 @@ struct ethtool_ops {
int (*set_rx_ntuple)(struct net_device *,
struct ethtool_rx_ntuple *);
int (*get_rx_ntuple)(struct net_device *, u32 stringset, void *);
+ int (*get_rxfh_indir)(struct net_device *,
+ struct ethtool_rxfh_indir *);
+ int (*set_rxfh_indir)(struct net_device *,
+ const struct ethtool_rxfh_indir *);
};
#endif /* __KERNEL__ */
@@ -637,6 +650,8 @@ struct ethtool_ops {
#define ETHTOOL_SRXNTUPLE 0x00000035 /* Add an n-tuple filter to device */
#define ETHTOOL_GRXNTUPLE 0x00000036 /* Get n-tuple filters from device */
#define ETHTOOL_GSSET_INFO 0x00000037 /* Get string set info */
+#define ETHTOOL_GRXFHINDIR 0x00000038 /* Get RX flow hash indir'n table */
+#define ETHTOOL_SRXFHINDIR 0x00000039 /* Set RX flow hash indir'n table */
/* compatibility with older code */
#define SPARC_ETH_GSET ETHTOOL_GSET
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 151f5d703b7e..69b43dbea6c6 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -91,6 +91,54 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
#define BPF_TAX 0x00
#define BPF_TXA 0x80
+enum {
+ BPF_S_RET_K = 0,
+ BPF_S_RET_A,
+ BPF_S_ALU_ADD_K,
+ BPF_S_ALU_ADD_X,
+ BPF_S_ALU_SUB_K,
+ BPF_S_ALU_SUB_X,
+ BPF_S_ALU_MUL_K,
+ BPF_S_ALU_MUL_X,
+ BPF_S_ALU_DIV_X,
+ BPF_S_ALU_AND_K,
+ BPF_S_ALU_AND_X,
+ BPF_S_ALU_OR_K,
+ BPF_S_ALU_OR_X,
+ BPF_S_ALU_LSH_K,
+ BPF_S_ALU_LSH_X,
+ BPF_S_ALU_RSH_K,
+ BPF_S_ALU_RSH_X,
+ BPF_S_ALU_NEG,
+ BPF_S_LD_W_ABS,
+ BPF_S_LD_H_ABS,
+ BPF_S_LD_B_ABS,
+ BPF_S_LD_W_LEN,
+ BPF_S_LD_W_IND,
+ BPF_S_LD_H_IND,
+ BPF_S_LD_B_IND,
+ BPF_S_LD_IMM,
+ BPF_S_LDX_W_LEN,
+ BPF_S_LDX_B_MSH,
+ BPF_S_LDX_IMM,
+ BPF_S_MISC_TAX,
+ BPF_S_MISC_TXA,
+ BPF_S_ALU_DIV_K,
+ BPF_S_LD_MEM,
+ BPF_S_LDX_MEM,
+ BPF_S_ST,
+ BPF_S_STX,
+ BPF_S_JMP_JA,
+ BPF_S_JMP_JEQ_K,
+ BPF_S_JMP_JEQ_X,
+ BPF_S_JMP_JGE_K,
+ BPF_S_JMP_JGE_X,
+ BPF_S_JMP_JGT_K,
+ BPF_S_JMP_JGT_X,
+ BPF_S_JMP_JSET_K,
+ BPF_S_JMP_JSET_X,
+};
+
#ifndef BPF_MAXINSNS
#define BPF_MAXINSNS 4096
#endif
diff --git a/include/linux/if.h b/include/linux/if.h
index be350e62a905..53558ec59e1b 100644
--- a/include/linux/if.h
+++ b/include/linux/if.h
@@ -73,6 +73,8 @@
#define IFF_DONT_BRIDGE 0x800 /* disallow bridging this ether dev */
#define IFF_IN_NETPOLL 0x1000 /* whether we are processing netpoll */
#define IFF_DISABLE_NETPOLL 0x2000 /* disable netpoll at run-time */
+#define IFF_MACVLAN_PORT 0x4000 /* device used as macvlan port */
+#define IFF_BRIDGE_PORT 0x8000 /* device used as bridge port */
#define IF_GET_IFACE 0x0001 /* for querying only */
#define IF_GET_PROTO 0x0002
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index c26a0e4f0ce8..e24ce6ea1fa3 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -6,6 +6,7 @@
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <net/netlink.h>
+#include <linux/u64_stats_sync.h>
#if defined(CONFIG_MACVTAP) || defined(CONFIG_MACVTAP_MODULE)
struct socket *macvtap_get_socket(struct file *);
@@ -27,14 +28,16 @@ struct macvtap_queue;
* struct macvlan_rx_stats - MACVLAN percpu rx stats
* @rx_packets: number of received packets
* @rx_bytes: number of received bytes
- * @multicast: number of received multicast packets
+ * @rx_multicast: number of received multicast packets
+ * @syncp: synchronization point for 64bit counters
* @rx_errors: number of errors
*/
struct macvlan_rx_stats {
- unsigned long rx_packets;
- unsigned long rx_bytes;
- unsigned long multicast;
- unsigned long rx_errors;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_multicast;
+ struct u64_stats_sync syncp;
+ unsigned long rx_errors;
};
struct macvlan_dev {
@@ -56,12 +59,14 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
{
struct macvlan_rx_stats *rx_stats;
- rx_stats = per_cpu_ptr(vlan->rx_stats, smp_processor_id());
+ rx_stats = this_cpu_ptr(vlan->rx_stats);
if (likely(success)) {
+ u64_stats_update_begin(&rx_stats->syncp);
rx_stats->rx_packets++;;
rx_stats->rx_bytes += len;
if (multicast)
- rx_stats->multicast++;
+ rx_stats->rx_multicast++;
+ u64_stats_update_end(&rx_stats->syncp);
} else {
rx_stats->rx_errors++;
}
diff --git a/include/linux/in.h b/include/linux/in.h
index 583c76f9c30f..41d88a4689af 100644
--- a/include/linux/in.h
+++ b/include/linux/in.h
@@ -85,6 +85,7 @@ struct in_addr {
#define IP_RECVORIGDSTADDR IP_ORIGDSTADDR
#define IP_MINTTL 21
+#define IP_NODEFRAG 22
/* IP_MTU_DISCOVER values */
#define IP_PMTUDISC_DONT 0 /* Never send DF frames */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 4fbccc5f609a..8fa5e5aa879a 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -673,7 +673,7 @@ struct netdev_rx_queue {
* 1. Define @ndo_get_stats64 to update a rtnl_link_stats64 structure
* (which should normally be dev->stats64) and return a ponter to
* it. The structure must not be changed asynchronously.
- * 2. Define @ndo_get_stats to update a net_device_stats64 structure
+ * 2. Define @ndo_get_stats to update a net_device_stats structure
* (which should normally be dev->stats) and return a pointer to
* it. The structure may be changed asynchronously only if each
* field is written atomically.
@@ -744,6 +744,8 @@ struct net_device_ops {
unsigned short vid);
#ifdef CONFIG_NET_POLL_CONTROLLER
void (*ndo_poll_controller)(struct net_device *dev);
+ int (*ndo_netpoll_setup)(struct net_device *dev,
+ struct netpoll_info *info);
void (*ndo_netpoll_cleanup)(struct net_device *dev);
#endif
int (*ndo_set_vf_mac)(struct net_device *dev,
@@ -863,7 +865,8 @@ struct net_device {
#define NETIF_F_FSO (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT)
/* List of features with software fallbacks. */
-#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
+#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \
+ NETIF_F_TSO6 | NETIF_F_UFO)
#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
@@ -977,6 +980,7 @@ struct net_device {
struct netdev_queue rx_queue;
rx_handler_func_t *rx_handler;
+ void *rx_handler_data;
struct netdev_queue *_tx ____cacheline_aligned_in_smp;
@@ -1044,10 +1048,6 @@ struct net_device {
/* mid-layer private */
void *ml_priv;
- /* bridge stuff */
- struct net_bridge_port *br_port;
- /* macvlan */
- struct macvlan_port *macvlan_port;
/* GARP */
struct garp_port *garp_port;
@@ -1710,7 +1710,8 @@ static inline void napi_free_frags(struct napi_struct *napi)
}
extern int netdev_rx_handler_register(struct net_device *dev,
- rx_handler_func_t *rx_handler);
+ rx_handler_func_t *rx_handler,
+ void *rx_handler_data);
extern void netdev_rx_handler_unregister(struct net_device *dev);
extern void netif_nit_deliver(struct sk_buff *skb);
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index e9e231215865..413742c92d14 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -46,9 +46,11 @@ void netpoll_poll(struct netpoll *np);
void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
void netpoll_print_options(struct netpoll *np);
int netpoll_parse_options(struct netpoll *np, char *opt);
+int __netpoll_setup(struct netpoll *np);
int netpoll_setup(struct netpoll *np);
int netpoll_trap(void);
void netpoll_set_trap(int trap);
+void __netpoll_cleanup(struct netpoll *np);
void netpoll_cleanup(struct netpoll *np);
int __netpoll_rx(struct sk_buff *skb);
void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb);
@@ -57,12 +59,15 @@ void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb);
#ifdef CONFIG_NETPOLL
static inline bool netpoll_rx(struct sk_buff *skb)
{
- struct netpoll_info *npinfo = skb->dev->npinfo;
+ struct netpoll_info *npinfo;
unsigned long flags;
bool ret = false;
+ rcu_read_lock_bh();
+ npinfo = rcu_dereference_bh(skb->dev->npinfo);
+
if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags))
- return false;
+ goto out;
spin_lock_irqsave(&npinfo->rx_lock, flags);
/* check rx_flags again with the lock held */
@@ -70,12 +75,14 @@ static inline bool netpoll_rx(struct sk_buff *skb)
ret = true;
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
+out:
+ rcu_read_unlock_bh();
return ret;
}
static inline int netpoll_rx_on(struct sk_buff *skb)
{
- struct netpoll_info *npinfo = skb->dev->npinfo;
+ struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo);
return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags);
}
@@ -91,7 +98,6 @@ static inline void *netpoll_poll_lock(struct napi_struct *napi)
{
struct net_device *dev = napi->dev;
- rcu_read_lock(); /* deal with race on ->npinfo */
if (dev && dev->npinfo) {
spin_lock(&napi->poll_lock);
napi->poll_owner = smp_processor_id();
@@ -108,7 +114,11 @@ static inline void netpoll_poll_unlock(void *have)
napi->poll_owner = -1;
spin_unlock(&napi->poll_lock);
}
- rcu_read_unlock();
+}
+
+static inline int netpoll_tx_running(struct net_device *dev)
+{
+ return irqs_disabled();
}
#else
@@ -134,6 +144,10 @@ static inline void netpoll_poll_unlock(void *have)
static inline void netpoll_netdev_init(struct net_device *dev)
{
}
+static inline int netpoll_tx_running(struct net_device *dev)
+{
+ return 0;
+}
#endif
#endif
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index 64fb32b93a28..2c8701687336 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -725,6 +725,12 @@ enum nl80211_commands {
* @NL80211_ATTR_AP_ISOLATE: (AP mode) Do not forward traffic between stations
* connected to this BSS.
*
+ * @NL80211_ATTR_WIPHY_TX_POWER_SETTING: Transmit power setting type. See
+ * &enum nl80211_tx_power_setting for possible values.
+ * @NL80211_ATTR_WIPHY_TX_POWER_LEVEL: Transmit power level in signed mBm units.
+ * This is used in association with @NL80211_ATTR_WIPHY_TX_POWER_SETTING
+ * for non-automatic settings.
+ *
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
*/
@@ -882,6 +888,9 @@ enum nl80211_attrs {
NL80211_ATTR_AP_ISOLATE,
+ NL80211_ATTR_WIPHY_TX_POWER_SETTING,
+ NL80211_ATTR_WIPHY_TX_POWER_LEVEL,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -1659,4 +1668,17 @@ enum nl80211_cqm_rssi_threshold_event {
NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
};
+
+/**
+ * enum nl80211_tx_power_setting - TX power adjustment
+ * @NL80211_TX_POWER_AUTOMATIC: automatically determine transmit power
+ * @NL80211_TX_POWER_LIMITED: limit TX power by the mBm parameter
+ * @NL80211_TX_POWER_FIXED: fix TX power to the mBm parameter
+ */
+enum nl80211_tx_power_setting {
+ NL80211_TX_POWER_AUTOMATIC,
+ NL80211_TX_POWER_LIMITED,
+ NL80211_TX_POWER_FIXED,
+};
+
#endif /* __LINUX_NL80211_H */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 122d08396e56..ac74ee085d74 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1414,12 +1414,14 @@ static inline int skb_network_offset(const struct sk_buff *skb)
*
* Various parts of the networking layer expect at least 32 bytes of
* headroom, you should not reduce this.
- * With RPS, we raised NET_SKB_PAD to 64 so that get_rps_cpus() fetches span
- * a 64 bytes aligned block to fit modern (>= 64 bytes) cache line sizes
+ *
+ * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
+ * to reduce average number of cache lines per packet.
+ * get_rps_cpus() for example only access one 64 bytes aligned block :
* NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
*/
#ifndef NET_SKB_PAD
-#define NET_SKB_PAD 64
+#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
#endif
extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 032a19eb61b1..a2fada9becb6 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -24,6 +24,9 @@ struct __kernel_sockaddr_storage {
#include <linux/types.h> /* pid_t */
#include <linux/compiler.h> /* __user */
+struct pid;
+struct cred;
+
#define __sockaddr_check_size(size) \
BUILD_BUG_ON(((size) > sizeof(struct __kernel_sockaddr_storage)))
@@ -309,6 +312,8 @@ struct ucred {
#define IPX_TYPE 1
#ifdef __KERNEL__
+extern void cred_to_ucred(struct pid *pid, const struct cred *cred, struct ucred *ucred);
+
extern int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
extern int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
int offset, int len);
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
new file mode 100644
index 000000000000..fa261a0da280
--- /dev/null
+++ b/include/linux/u64_stats_sync.h
@@ -0,0 +1,140 @@
+#ifndef _LINUX_U64_STATS_SYNC_H
+#define _LINUX_U64_STATS_SYNC_H
+
+/*
+ * To properly implement 64bits network statistics on 32bit and 64bit hosts,
+ * we provide a synchronization point, that is a noop on 64bit or UP kernels.
+ *
+ * Key points :
+ * 1) Use a seqcount on SMP 32bits, with low overhead.
+ * 2) Whole thing is a noop on 64bit arches or UP kernels.
+ * 3) Write side must ensure mutual exclusion or one seqcount update could
+ * be lost, thus blocking readers forever.
+ * If this synchronization point is not a mutex, but a spinlock or
+ * spinlock_bh() or disable_bh() :
+ * 3.1) Write side should not sleep.
+ * 3.2) Write side should not allow preemption.
+ * 3.3) If applicable, interrupts should be disabled.
+ *
+ * 4) If reader fetches several counters, there is no guarantee the whole values
+ * are consistent (remember point 1) : this is a noop on 64bit arches anyway)
+ *
+ * 5) readers are allowed to sleep or be preempted/interrupted : They perform
+ * pure reads. But if they have to fetch many values, it's better to not allow
+ * preemptions/interruptions to avoid many retries.
+ *
+ * 6) If counter might be written by an interrupt, readers should block interrupts.
+ * (On UP, there is no seqcount_t protection, a reader allowing interrupts could
+ * read partial values)
+ *
+ * 7) For softirq uses, readers can use u64_stats_fetch_begin_bh() and
+ * u64_stats_fetch_retry_bh() helpers
+ *
+ * Usage :
+ *
+ * Stats producer (writer) should use following template granted it already got
+ * an exclusive access to counters (a lock is already taken, or per cpu
+ * data is used [in a non preemptable context])
+ *
+ * spin_lock_bh(...) or other synchronization to get exclusive access
+ * ...
+ * u64_stats_update_begin(&stats->syncp);
+ * stats->bytes64 += len; // non atomic operation
+ * stats->packets64++; // non atomic operation
+ * u64_stats_update_end(&stats->syncp);
+ *
+ * While a consumer (reader) should use following template to get consistent
+ * snapshot for each variable (but no guarantee on several ones)
+ *
+ * u64 tbytes, tpackets;
+ * unsigned int start;
+ *
+ * do {
+ * start = u64_stats_fetch_begin(&stats->syncp);
+ * tbytes = stats->bytes64; // non atomic operation
+ * tpackets = stats->packets64; // non atomic operation
+ * } while (u64_stats_fetch_retry(&stats->syncp, start));
+ *
+ *
+ * Example of use in drivers/net/loopback.c, using per_cpu containers,
+ * in BH disabled context.
+ */
+#include <linux/seqlock.h>
+
+struct u64_stats_sync {
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ seqcount_t seq;
+#endif
+};
+
+static void inline u64_stats_update_begin(struct u64_stats_sync *syncp)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ write_seqcount_begin(&syncp->seq);
+#endif
+}
+
+static void inline u64_stats_update_end(struct u64_stats_sync *syncp)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ write_seqcount_end(&syncp->seq);
+#endif
+}
+
+static unsigned int inline u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ return read_seqcount_begin(&syncp->seq);
+#else
+#if BITS_PER_LONG==32
+ preempt_disable();
+#endif
+ return 0;
+#endif
+}
+
+static bool inline u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
+ unsigned int start)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ return read_seqcount_retry(&syncp->seq, start);
+#else
+#if BITS_PER_LONG==32
+ preempt_enable();
+#endif
+ return false;
+#endif
+}
+
+/*
+ * In case softirq handlers can update u64 counters, readers can use following helpers
+ * - SMP 32bit arches use seqcount protection, irq safe.
+ * - UP 32bit must disable BH.
+ * - 64bit have no problem atomically reading u64 values, irq safe.
+ */
+static unsigned int inline u64_stats_fetch_begin_bh(const struct u64_stats_sync *syncp)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ return read_seqcount_begin(&syncp->seq);
+#else
+#if BITS_PER_LONG==32
+ local_bh_disable();
+#endif
+ return 0;
+#endif
+}
+
+static bool inline u64_stats_fetch_retry_bh(const struct u64_stats_sync *syncp,
+ unsigned int start)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ return read_seqcount_retry(&syncp->seq, start);
+#else
+#if BITS_PER_LONG==32
+ local_bh_enable();
+#endif
+ return false;
+#endif
+}
+
+#endif /* _LINUX_U64_STATS_SYNC_H */
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index cc4f45361dbb..8178156711f9 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -36,6 +36,9 @@ static inline void put_user_ns(struct user_namespace *ns)
kref_put(&ns->kref, free_user_ns);
}
+uid_t user_ns_map_uid(struct user_namespace *to, const struct cred *cred, uid_t uid);
+gid_t user_ns_map_gid(struct user_namespace *to, const struct cred *cred, gid_t gid);
+
#else
static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
@@ -52,6 +55,17 @@ static inline void put_user_ns(struct user_namespace *ns)
{
}
+static inline uid_t user_ns_map_uid(struct user_namespace *to,
+ const struct cred *cred, uid_t uid)
+{
+ return uid;
+}
+static inline gid_t user_ns_map_gid(struct user_namespace *to,
+ const struct cred *cred, gid_t gid)
+{
+ return gid;
+}
+
#endif
#endif /* _LINUX_USER_H */
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 20725e213aee..90c9e2872f27 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -23,7 +23,8 @@ struct unix_address {
};
struct unix_skb_parms {
- struct ucred creds; /* Skb credentials */
+ struct pid *pid; /* Skb credentials */
+ const struct cred *cred;
struct scm_fp_list *fp; /* Passed files */
#ifdef CONFIG_SECURITY_NETWORK
u32 secid; /* Security ID */
@@ -31,7 +32,6 @@ struct unix_skb_parms {
};
#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
-#define UNIXCREDS(skb) (&UNIXCB((skb)).creds)
#define UNIXSID(skb) (&UNIXCB((skb)).secid)
#define unix_state_lock(s) spin_lock(&unix_sk(s)->lock)
diff --git a/include/net/caif/caif_dev.h b/include/net/caif/caif_dev.h
index 318ab9478a44..6da573c75d54 100644
--- a/include/net/caif/caif_dev.h
+++ b/include/net/caif/caif_dev.h
@@ -50,6 +50,9 @@ struct caif_connect_request {
* @client_layer: User implementation of client layer. This layer
* MUST have receive and control callback functions
* implemented.
+ * @ifindex: Link layer interface index used for this connection.
+ * @headroom: Head room needed by CAIF protocol.
+ * @tailroom: Tail room needed by CAIF protocol.
*
* This function connects a CAIF channel. The Client must implement
* the struct cflayer. This layer represents the Client layer and holds
@@ -59,8 +62,9 @@ struct caif_connect_request {
* E.g. CAIF Socket will call this function for each socket it connects
* and have one client_layer instance for each socket.
*/
-int caif_connect_client(struct caif_connect_request *config,
- struct cflayer *client_layer);
+int caif_connect_client(struct caif_connect_request *conn_req,
+ struct cflayer *client_layer, int *ifindex,
+ int *headroom, int *tailroom);
/**
* caif_disconnect_client - Disconnects a client from the CAIF stack.
diff --git a/include/net/caif/caif_layer.h b/include/net/caif/caif_layer.h
index 25c472f0e5b8..c8b07a904e78 100644
--- a/include/net/caif/caif_layer.h
+++ b/include/net/caif/caif_layer.h
@@ -15,14 +15,8 @@ struct cfpktq;
struct caif_payload_info;
struct caif_packet_funcs;
-#define CAIF_MAX_FRAMESIZE 4096
-#define CAIF_MAX_PAYLOAD_SIZE (4096 - 64)
-#define CAIF_NEEDED_HEADROOM (10)
-#define CAIF_NEEDED_TAILROOM (2)
#define CAIF_LAYER_NAME_SZ 16
-#define CAIF_SUCCESS 1
-#define CAIF_FAILURE 0
/**
* caif_assert() - Assert function for CAIF.
diff --git a/include/net/caif/caif_spi.h b/include/net/caif/caif_spi.h
new file mode 100644
index 000000000000..ce4570dff020
--- /dev/null
+++ b/include/net/caif/caif_spi.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Daniel Martensson / Daniel.Martensson@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef CAIF_SPI_H_
+#define CAIF_SPI_H_
+
+#include <net/caif/caif_device.h>
+
+#define SPI_CMD_WR 0x00
+#define SPI_CMD_RD 0x01
+#define SPI_CMD_EOT 0x02
+#define SPI_CMD_IND 0x04
+
+#define SPI_DMA_BUF_LEN 8192
+
+#define WL_SZ 2 /* 16 bits. */
+#define SPI_CMD_SZ 4 /* 32 bits. */
+#define SPI_IND_SZ 4 /* 32 bits. */
+
+#define SPI_XFER 0
+#define SPI_SS_ON 1
+#define SPI_SS_OFF 2
+#define SPI_TERMINATE 3
+
+/* Minimum time between different levels is 50 microseconds. */
+#define MIN_TRANSITION_TIME_USEC 50
+
+/* Defines for calculating duration of SPI transfers for a particular
+ * number of bytes.
+ */
+#define SPI_MASTER_CLK_MHZ 13
+#define SPI_XFER_TIME_USEC(bytes, clk) (((bytes) * 8) / clk)
+
+/* Normally this should be aligned on the modem in order to benefit from full
+ * duplex transfers. However a size of 8188 provokes errors when running with
+ * the modem. These errors occur when packet sizes approaches 4 kB of data.
+ */
+#define CAIF_MAX_SPI_FRAME 4092
+
+/* Maximum number of uplink CAIF frames that can reside in the same SPI frame.
+ * This number should correspond with the modem setting. The application side
+ * CAIF accepts any number of embedded downlink CAIF frames.
+ */
+#define CAIF_MAX_SPI_PKTS 9
+
+/* Decides if SPI buffers should be prefilled with 0xFF pattern for easier
+ * debugging. Both TX and RX buffers will be filled before the transfer.
+ */
+#define CFSPI_DBG_PREFILL 0
+
+/* Structure describing a SPI transfer. */
+struct cfspi_xfer {
+ u16 tx_dma_len;
+ u16 rx_dma_len;
+ void *va_tx;
+ dma_addr_t pa_tx;
+ void *va_rx;
+ dma_addr_t pa_rx;
+};
+
+/* Structure implemented by the SPI interface. */
+struct cfspi_ifc {
+ void (*ss_cb) (bool assert, struct cfspi_ifc *ifc);
+ void (*xfer_done_cb) (struct cfspi_ifc *ifc);
+ void *priv;
+};
+
+/* Structure implemented by SPI clients. */
+struct cfspi_dev {
+ int (*init_xfer) (struct cfspi_xfer *xfer, struct cfspi_dev *dev);
+ void (*sig_xfer) (bool xfer, struct cfspi_dev *dev);
+ struct cfspi_ifc *ifc;
+ char *name;
+ u32 clk_mhz;
+ void *priv;
+};
+
+/* Enumeration describing the CAIF SPI state. */
+enum cfspi_state {
+ CFSPI_STATE_WAITING = 0,
+ CFSPI_STATE_AWAKE,
+ CFSPI_STATE_FETCH_PKT,
+ CFSPI_STATE_GET_NEXT,
+ CFSPI_STATE_INIT_XFER,
+ CFSPI_STATE_WAIT_ACTIVE,
+ CFSPI_STATE_SIG_ACTIVE,
+ CFSPI_STATE_WAIT_XFER_DONE,
+ CFSPI_STATE_XFER_DONE,
+ CFSPI_STATE_WAIT_INACTIVE,
+ CFSPI_STATE_SIG_INACTIVE,
+ CFSPI_STATE_DELIVER_PKT,
+ CFSPI_STATE_MAX,
+};
+
+/* Structure implemented by SPI physical interfaces. */
+struct cfspi {
+ struct caif_dev_common cfdev;
+ struct net_device *ndev;
+ struct platform_device *pdev;
+ struct sk_buff_head qhead;
+ struct sk_buff_head chead;
+ u16 cmd;
+ u16 tx_cpck_len;
+ u16 tx_npck_len;
+ u16 rx_cpck_len;
+ u16 rx_npck_len;
+ struct cfspi_ifc ifc;
+ struct cfspi_xfer xfer;
+ struct cfspi_dev *dev;
+ unsigned long state;
+ struct work_struct work;
+ struct workqueue_struct *wq;
+ struct list_head list;
+ int flow_off_sent;
+ u32 qd_low_mark;
+ u32 qd_high_mark;
+ struct completion comp;
+ wait_queue_head_t wait;
+ spinlock_t lock;
+ bool flow_stop;
+#ifdef CONFIG_DEBUG_FS
+ enum cfspi_state dbg_state;
+ u16 pcmd;
+ u16 tx_ppck_len;
+ u16 rx_ppck_len;
+ struct dentry *dbgfs_dir;
+ struct dentry *dbgfs_state;
+ struct dentry *dbgfs_frame;
+#endif /* CONFIG_DEBUG_FS */
+};
+
+extern int spi_frm_align;
+extern int spi_up_head_align;
+extern int spi_up_tail_align;
+extern int spi_down_head_align;
+extern int spi_down_tail_align;
+extern struct platform_driver cfspi_spi_driver;
+
+void cfspi_dbg_state(struct cfspi *cfspi, int state);
+int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len);
+int cfspi_xmitlen(struct cfspi *cfspi);
+int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len);
+int cfspi_spi_remove(struct platform_device *pdev);
+int cfspi_spi_probe(struct platform_device *pdev);
+int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len);
+int cfspi_xmitlen(struct cfspi *cfspi);
+int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len);
+void cfspi_xfer(struct work_struct *work);
+
+#endif /* CAIF_SPI_H_ */
diff --git a/include/net/caif/cfcnfg.h b/include/net/caif/cfcnfg.h
index 9fc2fc20b884..bd646faffa47 100644
--- a/include/net/caif/cfcnfg.h
+++ b/include/net/caif/cfcnfg.h
@@ -7,6 +7,7 @@
#ifndef CFCNFG_H_
#define CFCNFG_H_
#include <linux/spinlock.h>
+#include <linux/netdevice.h>
#include <net/caif/caif_layer.h>
#include <net/caif/cfctrl.h>
@@ -73,8 +74,8 @@ void cfcnfg_remove(struct cfcnfg *cfg);
void
cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
- void *dev, struct cflayer *phy_layer, u16 *phyid,
- enum cfcnfg_phy_preference pref,
+ struct net_device *dev, struct cflayer *phy_layer,
+ u16 *phyid, enum cfcnfg_phy_preference pref,
bool fcs, bool stx);
/**
@@ -114,11 +115,18 @@ void cfcnfg_release_adap_layer(struct cflayer *adap_layer);
* @param: Link setup parameters.
* @adap_layer: Specify the adaptation layer; the receive and
* flow-control functions MUST be set in the structure.
- *
+ * @ifindex: Link layer interface index used for this connection.
+ * @proto_head: Protocol head-space needed by CAIF protocol,
+ * excluding link layer.
+ * @proto_tail: Protocol tail-space needed by CAIF protocol,
+ * excluding link layer.
*/
int cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg,
struct cfctrl_link_param *param,
- struct cflayer *adap_layer);
+ struct cflayer *adap_layer,
+ int *ifindex,
+ int *proto_head,
+ int *proto_tail);
/**
* cfcnfg_get_phyid() - Get physical ID, given type.
diff --git a/include/net/caif/cfsrvl.h b/include/net/caif/cfsrvl.h
index 2dc9eb193ecf..b1fa87ee0992 100644
--- a/include/net/caif/cfsrvl.h
+++ b/include/net/caif/cfsrvl.h
@@ -16,6 +16,8 @@ struct cfsrvl {
bool open;
bool phy_flow_on;
bool modem_flow_on;
+ bool supports_flowctrl;
+ void (*release)(struct kref *);
struct dev_info dev_info;
struct kref ref;
};
@@ -25,13 +27,15 @@ struct cflayer *cfvei_create(u8 linkid, struct dev_info *dev_info);
struct cflayer *cfdgml_create(u8 linkid, struct dev_info *dev_info);
struct cflayer *cfutill_create(u8 linkid, struct dev_info *dev_info);
struct cflayer *cfvidl_create(u8 linkid, struct dev_info *dev_info);
-struct cflayer *cfrfml_create(u8 linkid, struct dev_info *dev_info);
+struct cflayer *cfrfml_create(u8 linkid, struct dev_info *dev_info,
+ int mtu_size);
struct cflayer *cfdbgl_create(u8 linkid, struct dev_info *dev_info);
bool cfsrvl_phyid_match(struct cflayer *layer, int phyid);
void cfservl_destroy(struct cflayer *layer);
void cfsrvl_init(struct cfsrvl *service,
- u8 channel_id,
- struct dev_info *dev_info);
+ u8 channel_id,
+ struct dev_info *dev_info,
+ bool supports_flowctrl);
bool cfsrvl_ready(struct cfsrvl *service, int *err);
u8 cfsrvl_getphyid(struct cflayer *layer);
@@ -50,7 +54,10 @@ static inline void cfsrvl_put(struct cflayer *layr)
if (layr == NULL)
return;
s = container_of(layr, struct cfsrvl, layer);
- kref_put(&s->ref, cfsrvl_release);
+
+ WARN_ON(!s->release);
+ if (s->release)
+ kref_put(&s->ref, s->release);
}
#endif /* CFSRVL_H_ */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index a56bac1e69e0..168fe530b214 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -810,6 +810,7 @@ struct cfg80211_disassoc_request {
* @beacon_interval: beacon interval to use
* @privacy: this is a protected network, keys will be configured
* after joining
+ * @basic_rates: bitmap of basic rates to use when creating the IBSS
*/
struct cfg80211_ibss_params {
u8 *ssid;
@@ -818,6 +819,7 @@ struct cfg80211_ibss_params {
u8 *ie;
u8 ssid_len, ie_len;
u16 beacon_interval;
+ u32 basic_rates;
bool channel_fixed;
bool privacy;
};
@@ -873,19 +875,6 @@ enum wiphy_params_flags {
WIPHY_PARAM_COVERAGE_CLASS = 1 << 4,
};
-/**
- * enum tx_power_setting - TX power adjustment
- *
- * @TX_POWER_AUTOMATIC: the dbm parameter is ignored
- * @TX_POWER_LIMITED: limit TX power by the dbm parameter
- * @TX_POWER_FIXED: fix TX power to the dbm parameter
- */
-enum tx_power_setting {
- TX_POWER_AUTOMATIC,
- TX_POWER_LIMITED,
- TX_POWER_FIXED,
-};
-
/*
* cfg80211_bitrate_mask - masks for bitrate control
*/
@@ -1147,7 +1136,7 @@ struct cfg80211_ops {
int (*set_wiphy_params)(struct wiphy *wiphy, u32 changed);
int (*set_tx_power)(struct wiphy *wiphy,
- enum tx_power_setting type, int dbm);
+ enum nl80211_tx_power_setting type, int mbm);
int (*get_tx_power)(struct wiphy *wiphy, int *dbm);
int (*set_wds_peer)(struct wiphy *wiphy, struct net_device *dev,
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 39f2dc943908..16ff29a7bb30 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -20,6 +20,7 @@ struct inet_frag_queue {
atomic_t refcnt;
struct timer_list timer; /* when will this queue expire? */
struct sk_buff *fragments; /* list of received fragments */
+ struct sk_buff *fragments_tail;
ktime_t stamp;
int len; /* total length of orig datagram */
int meat;
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 1653de515cee..1989cfd7405f 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -137,7 +137,8 @@ struct inet_sock {
hdrincl:1,
mc_loop:1,
transparent:1,
- mc_all:1;
+ mc_all:1,
+ nodefrag:1;
int mc_index;
__be32 mc_addr;
struct ip_mc_socklist *mc_list;
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 87b1df0d4d8c..417d0c894f29 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -22,10 +22,21 @@ struct inet_peer {
__u32 dtime; /* the time of last use of not
* referenced entries */
atomic_t refcnt;
- atomic_t rid; /* Frag reception counter */
- atomic_t ip_id_count; /* IP ID for the next packet */
- __u32 tcp_ts;
- __u32 tcp_ts_stamp;
+ /*
+ * Once inet_peer is queued for deletion (refcnt == -1), following fields
+ * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp
+ * We can share memory with rcu_head to keep inet_peer small
+ * (less then 64 bytes)
+ */
+ union {
+ struct {
+ atomic_t rid; /* Frag reception counter */
+ atomic_t ip_id_count; /* IP ID for the next packet */
+ __u32 tcp_ts;
+ __u32 tcp_ts_stamp;
+ };
+ struct rcu_head rcu;
+ };
};
void inet_initpeers(void) __init;
@@ -36,10 +47,21 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create);
/* can be called from BH context or outside */
extern void inet_putpeer(struct inet_peer *p);
+/*
+ * temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
+ * tcp_ts_stamp if no refcount is taken on inet_peer
+ */
+static inline void inet_peer_refcheck(const struct inet_peer *p)
+{
+ WARN_ON_ONCE(atomic_read(&p->refcnt) <= 0);
+}
+
+
/* can be called with or without local BH being disabled */
static inline __u16 inet_getid(struct inet_peer *p, int more)
{
more++;
+ inet_peer_refcheck(p);
return atomic_add_return(more, &p->ip_id_count) - more;
}
diff --git a/include/net/ip.h b/include/net/ip.h
index d52f01180361..890f9725d681 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -165,12 +165,12 @@ struct ipv4_config {
};
extern struct ipv4_config ipv4_config;
-#define IP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.ip_statistics, field)
-#define IP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.ip_statistics, field)
-#define IP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.ip_statistics, field, val)
-#define IP_ADD_STATS_BH(net, field, val) SNMP_ADD_STATS_BH((net)->mib.ip_statistics, field, val)
-#define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS((net)->mib.ip_statistics, field, val)
-#define IP_UPD_PO_STATS_BH(net, field, val) SNMP_UPD_PO_STATS_BH((net)->mib.ip_statistics, field, val)
+#define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field)
+#define IP_INC_STATS_BH(net, field) SNMP_INC_STATS64_BH((net)->mib.ip_statistics, field)
+#define IP_ADD_STATS(net, field, val) SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
+#define IP_ADD_STATS_BH(net, field, val) SNMP_ADD_STATS64_BH((net)->mib.ip_statistics, field, val)
+#define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
+#define IP_UPD_PO_STATS_BH(net, field, val) SNMP_UPD_PO_STATS64_BH((net)->mib.ip_statistics, field, val)
#define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field)
#define NET_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.net_statistics, field)
#define NET_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)->mib.net_statistics, field)
@@ -178,7 +178,15 @@ extern struct ipv4_config ipv4_config;
#define NET_ADD_STATS_USER(net, field, adnd) SNMP_ADD_STATS_USER((net)->mib.net_statistics, field, adnd)
extern unsigned long snmp_fold_field(void __percpu *mib[], int offt);
-extern int snmp_mib_init(void __percpu *ptr[2], size_t mibsize);
+#if BITS_PER_LONG==32
+extern u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t sync_off);
+#else
+static inline u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_off)
+{
+ return snmp_fold_field(mib, offt);
+}
+#endif
+extern int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align);
extern void snmp_mib_free(void __percpu *ptr[2]);
extern struct local_ports {
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index f5808d596aab..1f8412410998 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -136,17 +136,17 @@ extern struct ctl_path net_ipv6_ctl_path[];
/* MIBs */
#define IP6_INC_STATS(net, idev,field) \
- _DEVINC(net, ipv6, , idev, field)
+ _DEVINC(net, ipv6, 64, idev, field)
#define IP6_INC_STATS_BH(net, idev,field) \
- _DEVINC(net, ipv6, _BH, idev, field)
+ _DEVINC(net, ipv6, 64_BH, idev, field)
#define IP6_ADD_STATS(net, idev,field,val) \
- _DEVADD(net, ipv6, , idev, field, val)
+ _DEVADD(net, ipv6, 64, idev, field, val)
#define IP6_ADD_STATS_BH(net, idev,field,val) \
- _DEVADD(net, ipv6, _BH, idev, field, val)
+ _DEVADD(net, ipv6, 64_BH, idev, field, val)
#define IP6_UPD_PO_STATS(net, idev,field,val) \
- _DEVUPD(net, ipv6, , idev, field, val)
+ _DEVUPD(net, ipv6, 64, idev, field, val)
#define IP6_UPD_PO_STATS_BH(net, idev,field,val) \
- _DEVUPD(net, ipv6, _BH, idev, field, val)
+ _DEVUPD(net, ipv6, 64_BH, idev, field, val)
#define ICMP6_INC_STATS(net, idev, field) \
_DEVINC(net, icmpv6, , idev, field)
#define ICMP6_INC_STATS_BH(net, idev, field) \
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index abb3b1a9ddc9..7f256e23c57f 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -19,7 +19,6 @@
#include <linux/wireless.h>
#include <linux/device.h>
#include <linux/ieee80211.h>
-#include <linux/inetdevice.h>
#include <net/cfg80211.h>
/**
@@ -147,6 +146,7 @@ struct ieee80211_low_level_stats {
* enabled/disabled (beaconing modes)
* @BSS_CHANGED_CQM: Connection quality monitor config changed
* @BSS_CHANGED_IBSS: IBSS join status changed
+ * @BSS_CHANGED_ARP_FILTER: Hardware ARP filter address list or state changed.
*/
enum ieee80211_bss_change {
BSS_CHANGED_ASSOC = 1<<0,
@@ -161,10 +161,18 @@ enum ieee80211_bss_change {
BSS_CHANGED_BEACON_ENABLED = 1<<9,
BSS_CHANGED_CQM = 1<<10,
BSS_CHANGED_IBSS = 1<<11,
+ BSS_CHANGED_ARP_FILTER = 1<<12,
/* when adding here, make sure to change ieee80211_reconfig */
};
+/*
+ * The maximum number of IPv4 addresses listed for ARP filtering. If the number
+ * of addresses for an interface increase beyond this value, hardware ARP
+ * filtering will be disabled.
+ */
+#define IEEE80211_BSS_ARP_ADDR_LIST_LEN 4
+
/**
* struct ieee80211_bss_conf - holds the BSS's changing parameters
*
@@ -200,6 +208,15 @@ enum ieee80211_bss_change {
* @cqm_rssi_thold: Connection quality monitor RSSI threshold, a zero value
* implies disabled
* @cqm_rssi_hyst: Connection quality monitor RSSI hysteresis
+ * @arp_addr_list: List of IPv4 addresses for hardware ARP filtering. The
+ * may filter ARP queries targeted for other addresses than listed here.
+ * The driver must allow ARP queries targeted for all address listed here
+ * to pass through. An empty list implies no ARP queries need to pass.
+ * @arp_addr_cnt: Number of addresses currently on the list.
+ * @arp_filter_enabled: Enable ARP filtering - if enabled, the hardware may
+ * filter ARP queries based on the @arp_addr_list, if disabled, the
+ * hardware must not perform any ARP filtering. Note, that the filter will
+ * be enabled also in promiscuous mode.
*/
struct ieee80211_bss_conf {
const u8 *bssid;
@@ -220,6 +237,9 @@ struct ieee80211_bss_conf {
s32 cqm_rssi_thold;
u32 cqm_rssi_hyst;
enum nl80211_channel_type channel_type;
+ __be32 arp_addr_list[IEEE80211_BSS_ARP_ADDR_LIST_LEN];
+ u8 arp_addr_cnt;
+ bool arp_filter_enabled;
};
/**
@@ -675,9 +695,6 @@ enum ieee80211_smps_mode {
* @dynamic_ps_timeout: The dynamic powersave timeout (in ms), see the
* powersave documentation below. This variable is valid only when
* the CONF_PS flag is set.
- * @dynamic_ps_forced_timeout: The dynamic powersave timeout (in ms) configured
- * by cfg80211 (essentially, wext) If set, this value overrules the value
- * chosen by mac80211 based on ps qos network latency.
*
* @power_level: requested transmit power (in dBm)
*
@@ -697,7 +714,7 @@ enum ieee80211_smps_mode {
*/
struct ieee80211_conf {
u32 flags;
- int power_level, dynamic_ps_timeout, dynamic_ps_forced_timeout;
+ int power_level, dynamic_ps_timeout;
int max_sleep_period;
u16 listen_interval;
@@ -1254,6 +1271,15 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw,
* dynamic PS feature in stack and will just keep %IEEE80211_CONF_PS
* enabled whenever user has enabled powersave.
*
+ * Some hardware need to toggle a single shared antenna between WLAN and
+ * Bluetooth to facilitate co-existence. These types of hardware set
+ * limitations on the use of host controlled dynamic powersave whenever there
+ * is simultaneous WLAN and Bluetooth traffic. For these types of hardware, the
+ * driver may request temporarily going into full power save, in order to
+ * enable toggling the antenna between BT and WLAN. If the driver requests
+ * disabling dynamic powersave, the @dynamic_ps_timeout value will be
+ * temporarily set to zero until the driver re-enables dynamic powersave.
+ *
* Driver informs U-APSD client support by enabling
* %IEEE80211_HW_SUPPORTS_UAPSD flag. The mode is configured through the
* uapsd paramater in conf_tx() operation. Hardware needs to send the QoS
@@ -1445,7 +1471,7 @@ enum ieee80211_filter_flags {
*
* Note that drivers MUST be able to deal with a TX aggregation
* session being stopped even before they OK'ed starting it by
- * calling ieee80211_start_tx_ba_cb(_irqsafe), because the peer
+ * calling ieee80211_start_tx_ba_cb_irqsafe, because the peer
* might receive the addBA frame and send a delBA right away!
*
* @IEEE80211_AMPDU_RX_START: start Rx aggregation
@@ -1529,16 +1555,6 @@ enum ieee80211_ampdu_mlme_action {
* of the bss parameters has changed when a call is made. The callback
* can sleep.
*
- * @configure_arp_filter: Configuration function for hardware ARP query filter.
- * This function is called with all the IP addresses configured to the
- * interface as argument - all ARP queries targeted to any of these
- * addresses must pass through. If the hardware filter does not support
- * enought addresses, hardware filtering must be disabled. The ifa_list
- * argument may be NULL, indicating that filtering must be disabled.
- * This function is called upon association complete with current
- * address(es), and while associated whenever the IP address(es) change.
- * The callback can sleep.
- *
* @prepare_multicast: Prepare for multicast filter configuration.
* This callback is optional, and its return value is passed
* to configure_filter(). This callback must be atomic.
@@ -1640,7 +1656,7 @@ enum ieee80211_ampdu_mlme_action {
* is the first frame we expect to perform the action on. Notice
* that TX/RX_STOP can pass NULL for this parameter.
* Returns a negative error code on failure.
- * The callback must be atomic.
+ * The callback can sleep.
*
* @get_survey: Return per-channel survey information
*
@@ -1678,9 +1694,6 @@ struct ieee80211_ops {
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
u32 changed);
- int (*configure_arp_filter)(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct in_ifaddr *ifa_list);
u64 (*prepare_multicast)(struct ieee80211_hw *hw,
struct netdev_hw_addr_list *mc_list);
void (*configure_filter)(struct ieee80211_hw *hw,
@@ -2314,25 +2327,14 @@ void ieee80211_queue_delayed_work(struct ieee80211_hw *hw,
int ieee80211_start_tx_ba_session(struct ieee80211_sta *sta, u16 tid);
/**
- * ieee80211_start_tx_ba_cb - low level driver ready to aggregate.
- * @vif: &struct ieee80211_vif pointer from the add_interface callback
- * @ra: receiver address of the BA session recipient.
- * @tid: the TID to BA on.
- *
- * This function must be called by low level driver once it has
- * finished with preparations for the BA session.
- */
-void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid);
-
-/**
* ieee80211_start_tx_ba_cb_irqsafe - low level driver ready to aggregate.
* @vif: &struct ieee80211_vif pointer from the add_interface callback
* @ra: receiver address of the BA session recipient.
* @tid: the TID to BA on.
*
* This function must be called by low level driver once it has
- * finished with preparations for the BA session.
- * This version of the function is IRQ-safe.
+ * finished with preparations for the BA session. It can be called
+ * from any context.
*/
void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, const u8 *ra,
u16 tid);
@@ -2351,25 +2353,14 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, const u8 *ra,
int ieee80211_stop_tx_ba_session(struct ieee80211_sta *sta, u16 tid);
/**
- * ieee80211_stop_tx_ba_cb - low level driver ready to stop aggregate.
- * @vif: &struct ieee80211_vif pointer from the add_interface callback
- * @ra: receiver address of the BA session recipient.
- * @tid: the desired TID to BA on.
- *
- * This function must be called by low level driver once it has
- * finished with preparations for the BA session tear down.
- */
-void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid);
-
-/**
* ieee80211_stop_tx_ba_cb_irqsafe - low level driver ready to stop aggregate.
* @vif: &struct ieee80211_vif pointer from the add_interface callback
* @ra: receiver address of the BA session recipient.
* @tid: the desired TID to BA on.
*
* This function must be called by low level driver once it has
- * finished with preparations for the BA session tear down.
- * This version of the function is IRQ-safe.
+ * finished with preparations for the BA session tear down. It
+ * can be called from any context.
*/
void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, const u8 *ra,
u16 tid);
@@ -2465,6 +2456,36 @@ void ieee80211_beacon_loss(struct ieee80211_vif *vif);
void ieee80211_connection_loss(struct ieee80211_vif *vif);
/**
+ * ieee80211_disable_dyn_ps - force mac80211 to temporarily disable dynamic psm
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * Some hardware require full power save to manage simultaneous BT traffic
+ * on the WLAN frequency. Full PSM is required periodically, whenever there are
+ * burst of BT traffic. The hardware gets information of BT traffic via
+ * hardware co-existence lines, and consequentially requests mac80211 to
+ * (temporarily) enter full psm.
+ * This function will only temporarily disable dynamic PS, not enable PSM if
+ * it was not already enabled.
+ * The driver must make sure to re-enable dynamic PS using
+ * ieee80211_enable_dyn_ps() if the driver has disabled it.
+ *
+ */
+void ieee80211_disable_dyn_ps(struct ieee80211_vif *vif);
+
+/**
+ * ieee80211_enable_dyn_ps - restore dynamic psm after being disabled
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * This function restores dynamic PS after being temporarily disabled via
+ * ieee80211_disable_dyn_ps(). Each ieee80211_disable_dyn_ps() call must
+ * be coupled with an eventual call to this function.
+ *
+ */
+void ieee80211_enable_dyn_ps(struct ieee80211_vif *vif);
+
+/**
* ieee80211_cqm_rssi_notify - inform a configured connection quality monitoring
* rssi threshold triggered
*
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index eb21340a573b..242879b6c4df 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -151,7 +151,7 @@ struct neigh_table {
void (*proxy_redo)(struct sk_buff *skb);
char *id;
struct neigh_parms parms;
- /* HACK. gc_* shoul follow parms without a gap! */
+ /* HACK. gc_* should follow parms without a gap! */
int gc_interval;
int gc_thresh1;
int gc_thresh2;
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 4fc05b58503e..f3b201d335b3 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -35,7 +35,7 @@
* nlmsg_new() create a new netlink message
* nlmsg_put() add a netlink message to an skb
* nlmsg_put_answer() callback based nlmsg_put()
- * nlmsg_end() finanlize netlink message
+ * nlmsg_end() finalize netlink message
* nlmsg_get_pos() return current position in message
* nlmsg_trim() trim part of message
* nlmsg_cancel() cancel message construction
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index b35301b0c7b6..977ec06ed0c7 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -594,9 +594,16 @@ static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
}
#ifdef CONFIG_NET_CLS_ACT
-static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask)
+static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask,
+ int action)
{
- struct sk_buff *n = skb_clone(skb, gfp_mask);
+ struct sk_buff *n;
+
+ if ((action == TC_ACT_STOLEN || action == TC_ACT_QUEUED) &&
+ !skb_shared(skb))
+ n = skb_get(skb);
+ else
+ n = skb_clone(skb, gfp_mask);
if (n) {
n->tc_verd = SET_TC_VERD(n->tc_verd, 0);
diff --git a/include/net/scm.h b/include/net/scm.h
index 8360e47aa7e3..31656506d967 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -19,8 +19,10 @@ struct scm_fp_list {
};
struct scm_cookie {
- struct ucred creds; /* Skb credentials */
+ struct pid *pid; /* Skb credentials */
+ const struct cred *cred;
struct scm_fp_list *fp; /* Passed files */
+ struct ucred creds; /* Skb credentials */
#ifdef CONFIG_SECURITY_NETWORK
u32 secid; /* Passed security ID */
#endif
@@ -42,8 +44,27 @@ static __inline__ void unix_get_peersec_dgram(struct socket *sock, struct scm_co
{ }
#endif /* CONFIG_SECURITY_NETWORK */
+static __inline__ void scm_set_cred(struct scm_cookie *scm,
+ struct pid *pid, const struct cred *cred)
+{
+ scm->pid = get_pid(pid);
+ scm->cred = get_cred(cred);
+ cred_to_ucred(pid, cred, &scm->creds);
+}
+
+static __inline__ void scm_destroy_cred(struct scm_cookie *scm)
+{
+ put_pid(scm->pid);
+ scm->pid = NULL;
+
+ if (scm->cred)
+ put_cred(scm->cred);
+ scm->cred = NULL;
+}
+
static __inline__ void scm_destroy(struct scm_cookie *scm)
{
+ scm_destroy_cred(scm);
if (scm && scm->fp)
__scm_destroy(scm);
}
@@ -51,10 +72,7 @@ static __inline__ void scm_destroy(struct scm_cookie *scm)
static __inline__ int scm_send(struct socket *sock, struct msghdr *msg,
struct scm_cookie *scm)
{
- struct task_struct *p = current;
- scm->creds.uid = current_uid();
- scm->creds.gid = current_gid();
- scm->creds.pid = task_tgid_vnr(p);
+ scm_set_cred(scm, task_tgid(current), current_cred());
scm->fp = NULL;
unix_get_peersec_dgram(sock, scm);
if (msg->msg_controllen <= 0)
@@ -96,6 +114,8 @@ static __inline__ void scm_recv(struct socket *sock, struct msghdr *msg,
if (test_bit(SOCK_PASSCRED, &sock->flags))
put_cmsg(msg, SOL_SOCKET, SCM_CREDENTIALS, sizeof(scm->creds), &scm->creds);
+ scm_destroy_cred(scm);
+
scm_passec(sock, msg, scm);
if (!scm->fp)
diff --git a/include/net/snmp.h b/include/net/snmp.h
index 92456f1035f5..a0e61806d480 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -47,15 +47,16 @@ struct snmp_mib {
}
/*
- * We use all unsigned longs. Linux will soon be so reliable that even
- * these will rapidly get too small 8-). Seriously consider the IpInReceives
- * count on the 20Gb/s + networks people expect in a few years time!
+ * We use unsigned longs for most mibs but u64 for ipstats.
*/
+#include <linux/u64_stats_sync.h>
/* IPstats */
#define IPSTATS_MIB_MAX __IPSTATS_MIB_MAX
struct ipstats_mib {
- unsigned long mibs[IPSTATS_MIB_MAX];
+ /* mibs[] must be first field of struct ipstats_mib */
+ u64 mibs[IPSTATS_MIB_MAX];
+ struct u64_stats_sync syncp;
};
/* ICMP */
@@ -134,7 +135,7 @@ struct linux_xfrm_mib {
#define SNMP_ADD_STATS_USER(mib, field, addend) \
this_cpu_add(mib[1]->mibs[field], addend)
#define SNMP_ADD_STATS(mib, field, addend) \
- this_cpu_add(mib[0]->mibs[field], addend)
+ this_cpu_add(mib[!in_softirq()]->mibs[field], addend)
/*
* Use "__typeof__(*mib[0]) *ptr" instead of "__typeof__(mib[0]) ptr"
* to make @ptr a non-percpu pointer.
@@ -155,4 +156,70 @@ struct linux_xfrm_mib {
ptr->mibs[basefield##PKTS]++; \
ptr->mibs[basefield##OCTETS] += addend;\
} while (0)
+
+
+#if BITS_PER_LONG==32
+
+#define SNMP_ADD_STATS64_BH(mib, field, addend) \
+ do { \
+ __typeof__(*mib[0]) *ptr = __this_cpu_ptr((mib)[0]); \
+ u64_stats_update_begin(&ptr->syncp); \
+ ptr->mibs[field] += addend; \
+ u64_stats_update_end(&ptr->syncp); \
+ } while (0)
+#define SNMP_ADD_STATS64_USER(mib, field, addend) \
+ do { \
+ __typeof__(*mib[0]) *ptr; \
+ preempt_disable(); \
+ ptr = __this_cpu_ptr((mib)[1]); \
+ u64_stats_update_begin(&ptr->syncp); \
+ ptr->mibs[field] += addend; \
+ u64_stats_update_end(&ptr->syncp); \
+ preempt_enable(); \
+ } while (0)
+#define SNMP_ADD_STATS64(mib, field, addend) \
+ do { \
+ __typeof__(*mib[0]) *ptr; \
+ preempt_disable(); \
+ ptr = __this_cpu_ptr((mib)[!in_softirq()]); \
+ u64_stats_update_begin(&ptr->syncp); \
+ ptr->mibs[field] += addend; \
+ u64_stats_update_end(&ptr->syncp); \
+ preempt_enable(); \
+ } while (0)
+#define SNMP_INC_STATS64_BH(mib, field) SNMP_ADD_STATS64_BH(mib, field, 1)
+#define SNMP_INC_STATS64_USER(mib, field) SNMP_ADD_STATS64_USER(mib, field, 1)
+#define SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1)
+#define SNMP_UPD_PO_STATS64(mib, basefield, addend) \
+ do { \
+ __typeof__(*mib[0]) *ptr; \
+ preempt_disable(); \
+ ptr = __this_cpu_ptr((mib)[!in_softirq()]); \
+ u64_stats_update_begin(&ptr->syncp); \
+ ptr->mibs[basefield##PKTS]++; \
+ ptr->mibs[basefield##OCTETS] += addend; \
+ u64_stats_update_end(&ptr->syncp); \
+ preempt_enable(); \
+ } while (0)
+#define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) \
+ do { \
+ __typeof__(*mib[0]) *ptr; \
+ ptr = __this_cpu_ptr((mib)[!in_softirq()]); \
+ u64_stats_update_begin(&ptr->syncp); \
+ ptr->mibs[basefield##PKTS]++; \
+ ptr->mibs[basefield##OCTETS] += addend; \
+ u64_stats_update_end(&ptr->syncp); \
+ } while (0)
+#else
+#define SNMP_INC_STATS64_BH(mib, field) SNMP_INC_STATS_BH(mib, field)
+#define SNMP_INC_STATS64_USER(mib, field) SNMP_INC_STATS_USER(mib, field)
+#define SNMP_INC_STATS64(mib, field) SNMP_INC_STATS(mib, field)
+#define SNMP_DEC_STATS64(mib, field) SNMP_DEC_STATS(mib, field)
+#define SNMP_ADD_STATS64_BH(mib, field, addend) SNMP_ADD_STATS_BH(mib, field, addend)
+#define SNMP_ADD_STATS64_USER(mib, field, addend) SNMP_ADD_STATS_USER(mib, field, addend)
+#define SNMP_ADD_STATS64(mib, field, addend) SNMP_ADD_STATS(mib, field, addend)
+#define SNMP_UPD_PO_STATS64(mib, basefield, addend) SNMP_UPD_PO_STATS(mib, basefield, addend)
+#define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) SNMP_UPD_PO_STATS_BH(mib, basefield, addend)
+#endif
+
#endif
diff --git a/include/net/sock.h b/include/net/sock.h
index f8acf38f092f..4f26f2f83be9 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -295,7 +295,8 @@ struct sock {
unsigned short sk_ack_backlog;
unsigned short sk_max_ack_backlog;
__u32 sk_priority;
- struct ucred sk_peercred;
+ struct pid *sk_peer_pid;
+ const struct cred *sk_peer_cred;
long sk_rcvtimeo;
long sk_sndtimeo;
struct sk_filter *sk_filter;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 573166484413..c2f96c2cc89c 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -464,7 +464,7 @@ extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
__u16 *mss);
extern __u32 cookie_init_timestamp(struct request_sock *req);
-extern void cookie_check_timestamp(struct tcp_options_received *tcp_opt);
+extern bool cookie_check_timestamp(struct tcp_options_received *opt, bool *);
/* From net/ipv6/syncookies.c */
extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
@@ -602,6 +602,17 @@ extern u32 __tcp_select_window(struct sock *sk);
*/
#define tcp_time_stamp ((__u32)(jiffies))
+#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
+
+#define TCPHDR_FIN 0x01
+#define TCPHDR_SYN 0x02
+#define TCPHDR_RST 0x04
+#define TCPHDR_PSH 0x08
+#define TCPHDR_ACK 0x10
+#define TCPHDR_URG 0x20
+#define TCPHDR_ECE 0x40
+#define TCPHDR_CWR 0x80
+
/* This is what the send packet queuing engine uses to pass
* TCP per-packet control information to the transmission
* code. We also store the host-order sequence numbers in
@@ -620,19 +631,6 @@ struct tcp_skb_cb {
__u32 end_seq; /* SEQ + FIN + SYN + datalen */
__u32 when; /* used to compute rtt's */
__u8 flags; /* TCP header flags. */
-
- /* NOTE: These must match up to the flags byte in a
- * real TCP header.
- */
-#define TCPCB_FLAG_FIN 0x01
-#define TCPCB_FLAG_SYN 0x02
-#define TCPCB_FLAG_RST 0x04
-#define TCPCB_FLAG_PSH 0x08
-#define TCPCB_FLAG_ACK 0x10
-#define TCPCB_FLAG_URG 0x20
-#define TCPCB_FLAG_ECE 0x40
-#define TCPCB_FLAG_CWR 0x80
-
__u8 sacked; /* State flags for SACK/FACK. */
#define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
#define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */