aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-03-23 23:25:14 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-23 23:25:14 -0700
commitaca04ce5dbda50758b813b1e3d710098991cfa9a (patch)
tree9781ea0bca2be767bc587ff4bffb7c2a9b3858a0 /include
parentMerge tag 'platform-drivers-x86-v4.6-1' of git://git.infradead.org/users/dvhart/linux-platform-drivers-x86 (diff)
parentnet: ping: make ping_v6_sendmsg static (diff)
downloadwireguard-linux-aca04ce5dbda50758b813b1e3d710098991cfa9a.tar.xz
wireguard-linux-aca04ce5dbda50758b813b1e3d710098991cfa9a.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking bugfixes from David Miller: "Several bug fixes rolling in, some for changes introduced in this merge window, and some for problems that have existed for some time: 1) Fix prepare_to_wait() handling in AF_VSOCK, from Claudio Imbrenda. 2) The new DST_CACHE should be a silent config option, from Dave Jones. 3) inet_current_timestamp() unintentionally truncates timestamps to 16-bit, from Deepa Dinamani. 4) Missing reference to netns in ppp, from Guillaume Nault. 5) Free memory reference in hv_netvsc driver, from Haiyang Zhang. 6) Missing kernel doc documentation for function arguments in various spots around the networking, from Luis de Bethencourt. 7) UDP stopped receiving broadcast packets properly, due to overzealous multicast checks, fix from Paolo Abeni" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (59 commits) net: ping: make ping_v6_sendmsg static hv_netvsc: Fix the order of num_sc_offered decrement net: Fix typos and whitespace. hv_netvsc: Fix the array sizes to be max supported channels hv_netvsc: Fix accessing freed memory in netvsc_change_mtu() ppp: take reference on channels netns net: Reset encap_level to avoid resetting features on inner IP headers net: mediatek: fix checking for NULL instead of IS_ERR() in .probe net: phy: at803x: Request 'reset' GPIO only for AT8030 PHY at803x: fix reset handling AF_VSOCK: Shrink the area influenced by prepare_to_wait Revert "vsock: Fix blocking ops call in prepare_to_wait" macb: fix PHY reset ipv4: initialize flowi4_flags before calling fib_lookup() fsl/fman: Workaround for Errata A-007273 ipv4: fix broadcast packets reception net: hns: bug fix about the overflow of mss net: hns: adds limitation for debug port mtu net: hns: fix the bug about mtu setting net: hns: fixes a bug of RSS ...
Diffstat (limited to 'include')
-rw-r--r--include/linux/mISDNif.h2
-rw-r--r--include/linux/netdevice.h225
-rw-r--r--include/net/flow.h2
-rw-r--r--include/net/inet6_connection_sock.h1
-rw-r--r--include/net/ip_tunnels.h16
-rw-r--r--include/net/ipv6.h6
-rw-r--r--include/net/ping.h1
-rw-r--r--include/net/sctp/sctp.h10
-rw-r--r--include/net/sctp/structs.h2
-rw-r--r--include/net/vxlan.h16
-rw-r--r--include/trace/events/fib6.h2
-rw-r--r--include/uapi/linux/ethtool.h6
-rw-r--r--include/uapi/linux/if_link.h2
13 files changed, 159 insertions, 132 deletions
diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h
index 246a3529ecf6..ac02c54520e9 100644
--- a/include/linux/mISDNif.h
+++ b/include/linux/mISDNif.h
@@ -596,7 +596,7 @@ static inline struct mISDNdevice *dev_to_mISDN(struct device *dev)
}
extern void set_channel_address(struct mISDNchannel *, u_int, u_int);
-extern void mISDN_clock_update(struct mISDNclock *, int, struct timeval *);
+extern void mISDN_clock_update(struct mISDNclock *, int, ktime_t *);
extern unsigned short mISDN_clock_get(void);
extern const char *mISDNDevName4ch(struct mISDNchannel *);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 009c85adae4c..cb0d5d09c2e4 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -81,8 +81,8 @@ void netdev_set_default_ethtool_ops(struct net_device *dev,
* function. Real network devices commonly used with qdiscs should only return
* the driver transmit return codes though - when qdiscs are used, the actual
* transmission happens asynchronously, so the value is not propagated to
- * higher layers. Virtual network devices transmit synchronously, in this case
- * the driver transmit return codes are consumed by dev_queue_xmit(), all
+ * higher layers. Virtual network devices transmit synchronously; in this case
+ * the driver transmit return codes are consumed by dev_queue_xmit(), and all
* others are propagated to higher layers.
*/
@@ -129,7 +129,7 @@ static inline bool dev_xmit_complete(int rc)
}
/*
- * Compute the worst case header length according to the protocols
+ * Compute the worst-case header length according to the protocols
* used.
*/
@@ -246,7 +246,7 @@ struct hh_cache {
unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
};
-/* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
+/* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much.
* Alternative is:
* dev->hard_header_len ? (dev->hard_header_len +
* (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
@@ -272,7 +272,7 @@ struct header_ops {
};
/* These flag bits are private to the generic network queueing
- * layer, they may not be explicitly referenced by any other
+ * layer; they may not be explicitly referenced by any other
* code.
*/
@@ -286,7 +286,7 @@ enum netdev_state_t {
/*
- * This structure holds at boot time configured netdevice settings. They
+ * This structure holds boot-time configured netdevice settings. They
* are then used in the device probing.
*/
struct netdev_boot_setup {
@@ -304,7 +304,7 @@ struct napi_struct {
/* The poll_list must only be managed by the entity which
* changes the state of the NAPI_STATE_SCHED bit. This means
* whoever atomically sets that bit can add this napi_struct
- * to the per-cpu poll_list, and whoever clears that bit
+ * to the per-CPU poll_list, and whoever clears that bit
* can remove from the list right before clearing the bit.
*/
struct list_head poll_list;
@@ -350,7 +350,7 @@ typedef enum gro_result gro_result_t;
* @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
* case skb->dev was changed by rx_handler.
* @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
- * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called.
+ * @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called.
*
* rx_handlers are functions called from inside __netif_receive_skb(), to do
* special processing of the skb, prior to delivery to protocol handlers.
@@ -365,19 +365,19 @@ typedef enum gro_result gro_result_t;
* Upon return, rx_handler is expected to tell __netif_receive_skb() what to
* do with the skb.
*
- * If the rx_handler consumed to skb in some way, it should return
+ * If the rx_handler consumed the skb in some way, it should return
* RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
- * the skb to be delivered in some other ways.
+ * the skb to be delivered in some other way.
*
* If the rx_handler changed skb->dev, to divert the skb to another
* net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
* new device will be called if it exists.
*
- * If the rx_handler consider the skb should be ignored, it should return
+ * If the rx_handler decides the skb should be ignored, it should return
* RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
* are registered on exact device (ptype->dev == skb->dev).
*
- * If the rx_handler didn't changed skb->dev, but want the skb to be normally
+ * If the rx_handler didn't change skb->dev, but wants the skb to be normally
* delivered, it should return RX_HANDLER_PASS.
*
* A device without a registered rx_handler will behave as if rx_handler
@@ -402,11 +402,11 @@ static inline bool napi_disable_pending(struct napi_struct *n)
}
/**
- * napi_schedule_prep - check if napi can be scheduled
- * @n: napi context
+ * napi_schedule_prep - check if NAPI can be scheduled
+ * @n: NAPI context
*
* Test if NAPI routine is already running, and if not mark
- * it as running. This is used as a condition variable
+ * it as running. This is used as a condition variable to
* insure only one NAPI poll instance runs. We also make
* sure there is no pending NAPI disable.
*/
@@ -418,7 +418,7 @@ static inline bool napi_schedule_prep(struct napi_struct *n)
/**
* napi_schedule - schedule NAPI poll
- * @n: napi context
+ * @n: NAPI context
*
* Schedule NAPI poll routine to be called if it is not already
* running.
@@ -431,7 +431,7 @@ static inline void napi_schedule(struct napi_struct *n)
/**
* napi_schedule_irqoff - schedule NAPI poll
- * @n: napi context
+ * @n: NAPI context
*
* Variant of napi_schedule(), assuming hard irqs are masked.
*/
@@ -455,7 +455,7 @@ void __napi_complete(struct napi_struct *n);
void napi_complete_done(struct napi_struct *n, int work_done);
/**
* napi_complete - NAPI processing complete
- * @n: napi context
+ * @n: NAPI context
*
* Mark NAPI processing as complete.
* Consider using napi_complete_done() instead.
@@ -467,32 +467,32 @@ static inline void napi_complete(struct napi_struct *n)
/**
* napi_hash_add - add a NAPI to global hashtable
- * @napi: napi context
+ * @napi: NAPI context
*
- * generate a new napi_id and store a @napi under it in napi_hash
- * Used for busy polling (CONFIG_NET_RX_BUSY_POLL)
+ * Generate a new napi_id and store a @napi under it in napi_hash.
+ * Used for busy polling (CONFIG_NET_RX_BUSY_POLL).
* Note: This is normally automatically done from netif_napi_add(),
- * so might disappear in a future linux version.
+ * so might disappear in a future Linux version.
*/
void napi_hash_add(struct napi_struct *napi);
/**
* napi_hash_del - remove a NAPI from global table
- * @napi: napi context
+ * @napi: NAPI context
*
- * Warning: caller must observe rcu grace period
+ * Warning: caller must observe RCU grace period
* before freeing memory containing @napi, if
* this function returns true.
* Note: core networking stack automatically calls it
- * from netif_napi_del()
+ * from netif_napi_del().
* Drivers might want to call this helper to combine all
- * the needed rcu grace periods into a single one.
+ * the needed RCU grace periods into a single one.
*/
bool napi_hash_del(struct napi_struct *napi);
/**
* napi_disable - prevent NAPI from scheduling
- * @n: napi context
+ * @n: NAPI context
*
* Stop NAPI from being scheduled on this context.
* Waits till any outstanding processing completes.
@@ -501,7 +501,7 @@ void napi_disable(struct napi_struct *n);
/**
* napi_enable - enable NAPI scheduling
- * @n: napi context
+ * @n: NAPI context
*
* Resume NAPI from being scheduled on this context.
* Must be paired with napi_disable.
@@ -516,7 +516,7 @@ static inline void napi_enable(struct napi_struct *n)
/**
* napi_synchronize - wait until NAPI is not running
- * @n: napi context
+ * @n: NAPI context
*
* Wait until NAPI is done being scheduled on this context.
* Waits till any outstanding processing completes but
@@ -559,7 +559,7 @@ enum netdev_queue_state_t {
struct netdev_queue {
/*
- * read mostly part
+ * read-mostly part
*/
struct net_device *dev;
struct Qdisc __rcu *qdisc;
@@ -571,7 +571,7 @@ struct netdev_queue {
int numa_node;
#endif
/*
- * write mostly part
+ * write-mostly part
*/
spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
int xmit_lock_owner;
@@ -648,11 +648,11 @@ struct rps_dev_flow_table {
/*
* The rps_sock_flow_table contains mappings of flows to the last CPU
* on which they were processed by the application (set in recvmsg).
- * Each entry is a 32bit value. Upper part is the high order bits
- * of flow hash, lower part is cpu number.
+ * Each entry is a 32bit value. Upper part is the high-order bits
+ * of flow hash, lower part is CPU number.
* rps_cpu_mask is used to partition the space, depending on number of
- * possible cpus : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
- * For example, if 64 cpus are possible, rps_cpu_mask = 0x3f,
+ * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
+ * For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f,
* meaning we use 32-6=26 bits for the hash.
*/
struct rps_sock_flow_table {
@@ -674,7 +674,7 @@ static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
unsigned int index = hash & table->mask;
u32 val = hash & ~rps_cpu_mask;
- /* We only give a hint, preemption can change cpu under us */
+ /* We only give a hint, preemption can change CPU under us */
val |= raw_smp_processor_id();
if (table->ents[index] != val)
@@ -807,21 +807,21 @@ struct tc_to_netdev {
* optional and can be filled with a null pointer.
*
* int (*ndo_init)(struct net_device *dev);
- * This function is called once when network device is registered.
- * The network device can use this to any late stage initializaton
- * or semantic validattion. It can fail with an error code which will
- * be propogated back to register_netdev
+ * This function is called once when a network device is registered.
+ * The network device can use this for any late stage initialization
+ * or semantic validation. It can fail with an error code which will
+ * be propagated back to register_netdev.
*
* void (*ndo_uninit)(struct net_device *dev);
* This function is called when device is unregistered or when registration
* fails. It is not called if init fails.
*
* int (*ndo_open)(struct net_device *dev);
- * This function is called when network device transistions to the up
+ * This function is called when a network device transitions to the up
* state.
*
* int (*ndo_stop)(struct net_device *dev);
- * This function is called when network device transistions to the down
+ * This function is called when a network device transitions to the down
* state.
*
* netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
@@ -832,7 +832,7 @@ struct tc_to_netdev {
* corner cases, but the stack really does a non-trivial amount
* of useless work if you return NETDEV_TX_BUSY.
* (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
- * Required can not be NULL.
+ * Required; cannot be NULL.
*
* netdev_features_t (*ndo_fix_features)(struct net_device *dev,
* netdev_features_t features);
@@ -842,34 +842,34 @@ struct tc_to_netdev {
*
* u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
* void *accel_priv, select_queue_fallback_t fallback);
- * Called to decide which queue to when device supports multiple
+ * Called to decide which queue to use when device supports multiple
* transmit queues.
*
* void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
* This function is called to allow device receiver to make
- * changes to configuration when multicast or promiscious is enabled.
+ * changes to configuration when multicast or promiscuous is enabled.
*
* void (*ndo_set_rx_mode)(struct net_device *dev);
* This function is called device changes address list filtering.
* If driver handles unicast address filtering, it should set
- * IFF_UNICAST_FLT to its priv_flags.
+ * IFF_UNICAST_FLT in its priv_flags.
*
* int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
* This function is called when the Media Access Control address
* needs to be changed. If this interface is not defined, the
- * mac address can not be changed.
+ * MAC address can not be changed.
*
* int (*ndo_validate_addr)(struct net_device *dev);
* Test if Media Access Control address is valid for the device.
*
* int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
- * Called when a user request an ioctl which can't be handled by
- * the generic interface code. If not defined ioctl's return
+ * Called when a user requests an ioctl which can't be handled by
+ * the generic interface code. If not defined ioctls return
* not supported error code.
*
* int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
* Used to set network devices bus interface parameters. This interface
- * is retained for legacy reason, new devices should use the bus
+ * is retained for legacy reasons; new devices should use the bus
* interface (PCI) for low level management.
*
* int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
@@ -878,7 +878,7 @@ struct tc_to_netdev {
* will return an error.
*
* void (*ndo_tx_timeout)(struct net_device *dev);
- * Callback uses when the transmitter has not made any progress
+ * Callback used when the transmitter has not made any progress
* for dev->watchdog ticks.
*
* struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
@@ -896,11 +896,11 @@ struct tc_to_netdev {
* neither operation.
*
* int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
- * If device support VLAN filtering this function is called when a
+ * If device supports VLAN filtering this function is called when a
* VLAN id is registered.
*
* int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid);
- * If device support VLAN filtering this function is called when a
+ * If device supports VLAN filtering this function is called when a
* VLAN id is unregistered.
*
* void (*ndo_poll_controller)(struct net_device *dev);
@@ -920,7 +920,7 @@ struct tc_to_netdev {
*
* Enable or disable the VF ability to query its RSS Redirection Table and
* Hash Key. This is needed since on some devices VF share this information
- * with PF and querying it may adduce a theoretical security risk.
+ * with PF and querying it may introduce a theoretical security risk.
* int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
* int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
* int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
@@ -1030,20 +1030,20 @@ struct tc_to_netdev {
*
* void (*ndo_add_vxlan_port)(struct net_device *dev,
* sa_family_t sa_family, __be16 port);
- * Called by vxlan to notiy a driver about the UDP port and socket
- * address family that vxlan is listnening to. It is called only when
+ * Called by vxlan to notify a driver about the UDP port and socket
+ * address family that vxlan is listening to. It is called only when
* a new port starts listening. The operation is protected by the
* vxlan_net->sock_lock.
*
* void (*ndo_add_geneve_port)(struct net_device *dev,
- * sa_family_t sa_family, __be16 port);
+ * sa_family_t sa_family, __be16 port);
* Called by geneve to notify a driver about the UDP port and socket
* address family that geneve is listnening to. It is called only when
* a new port starts listening. The operation is protected by the
* geneve_net->sock_lock.
*
* void (*ndo_del_geneve_port)(struct net_device *dev,
- * sa_family_t sa_family, __be16 port);
+ * sa_family_t sa_family, __be16 port);
* Called by geneve to notify the driver about a UDP port and socket
* address family that geneve is not listening to anymore. The operation
* is protected by the geneve_net->sock_lock.
@@ -1072,9 +1072,9 @@ struct tc_to_netdev {
* Callback to use for xmit over the accelerated station. This
* is used in place of ndo_start_xmit on accelerated net
* devices.
- * netdev_features_t (*ndo_features_check) (struct sk_buff *skb,
- * struct net_device *dev
- * netdev_features_t features);
+ * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
+ * struct net_device *dev
+ * netdev_features_t features);
* Called by core transmit path to determine if device is capable of
* performing offload operations on a given packet. This is to give
* the device an opportunity to implement any restrictions that cannot
@@ -1088,7 +1088,7 @@ struct tc_to_netdev {
* int (*ndo_get_iflink)(const struct net_device *dev);
* Called to get the iflink value of this device.
* void (*ndo_change_proto_down)(struct net_device *dev,
- * bool proto_down);
+ * bool proto_down);
* This function is used to pass protocol port error state information
* to the switch driver. The switch driver can react to the proto_down
* by doing a phys down on the associated switch port.
@@ -1100,7 +1100,7 @@ struct tc_to_netdev {
* This function is used to specify the headroom that the skb must
* consider when allocation skb during packet reception. Setting
* appropriate rx headroom value allows avoiding skb head copy on
- * forward. Setting a negative value reset the rx headroom to the
+ * forward. Setting a negative value resets the rx headroom to the
* default value.
*
*/
@@ -1299,7 +1299,7 @@ struct net_device_ops {
*
* These are the &struct net_device, they are only set internally
* by drivers and used in the kernel. These flags are invisible to
- * userspace, this means that the order of these flags can change
+ * userspace; this means that the order of these flags can change
* during any kernel release.
*
* You should have a pretty good reason to be extending these flags.
@@ -1323,6 +1323,10 @@ struct net_device_ops {
* @IFF_LIVE_ADDR_CHANGE: device supports hardware address
* change when it's running
* @IFF_MACVLAN: Macvlan device
+ * @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account
+ * underlying stacked devices
+ * @IFF_IPVLAN_MASTER: IPvlan master device
+ * @IFF_IPVLAN_SLAVE: IPvlan slave device
* @IFF_L3MDEV_MASTER: device is an L3 master device
* @IFF_NO_QUEUE: device can run without qdisc attached
* @IFF_OPENVSWITCH: device is a Open vSwitch master
@@ -1413,10 +1417,12 @@ enum netdev_priv_flags {
*
* @state: Generic network queuing layer state, see netdev_state_t
* @dev_list: The global list of network devices
- * @napi_list: List entry, that is used for polling napi devices
- * @unreg_list: List entry, that is used, when we are unregistering the
- * device, see the function unregister_netdev
- * @close_list: List entry, that is used, when we are closing the device
+ * @napi_list: List entry used for polling NAPI devices
+ * @unreg_list: List entry when we are unregistering the
+ * device; see the function unregister_netdev
+ * @close_list: List entry used when we are closing the device
+ * @ptype_all: Device-specific packet handlers for all protocols
+ * @ptype_specific: Device-specific, protocol-specific packet handlers
*
* @adj_list: Directly linked devices, like slaves for bonding
* @all_adj_list: All linked devices, *including* neighbours
@@ -1434,7 +1440,7 @@ enum netdev_priv_flags {
* @mpls_features: Mask of features inheritable by MPLS
*
* @ifindex: interface index
- * @group: The group, that the device belongs to
+ * @group: The group the device belongs to
*
* @stats: Statistics struct, which was left as a legacy, use
* rtnl_link_stats64 instead
@@ -1488,7 +1494,7 @@ enum netdev_priv_flags {
* @dev_port: Used to differentiate devices that share
* the same function
* @addr_list_lock: XXX: need comments on this one
- * @uc_promisc: Counter, that indicates, that promiscuous mode
+ * @uc_promisc: Counter that indicates promiscuous mode
* has been enabled due to the need to listen to
* additional unicast addresses in a device that
* does not implement ndo_set_rx_mode()
@@ -1496,9 +1502,9 @@ enum netdev_priv_flags {
* @mc: multicast mac addresses
* @dev_addrs: list of device hw addresses
* @queues_kset: Group of all Kobjects in the Tx and RX queues
- * @promiscuity: Number of times, the NIC is told to work in
- * Promiscuous mode, if it becomes 0 the NIC will
- * exit from working in Promiscuous mode
+ * @promiscuity: Number of times the NIC is told to work in
+ * promiscuous mode; if it becomes 0 the NIC will
+ * exit promiscuous mode
* @allmulti: Counter, enables or disables allmulticast mode
*
* @vlan_info: VLAN info
@@ -1544,7 +1550,7 @@ enum netdev_priv_flags {
*
* @trans_start: Time (in jiffies) of last Tx
* @watchdog_timeo: Represents the timeout that is used by
- * the watchdog ( see dev_watchdog() )
+ * the watchdog (see dev_watchdog())
* @watchdog_timer: List of timers
*
* @pcpu_refcnt: Number of references to this device
@@ -1661,8 +1667,8 @@ struct net_device {
atomic_long_t rx_nohandler;
#ifdef CONFIG_WIRELESS_EXT
- const struct iw_handler_def * wireless_handlers;
- struct iw_public_data * wireless_data;
+ const struct iw_handler_def *wireless_handlers;
+ struct iw_public_data *wireless_data;
#endif
const struct net_device_ops *netdev_ops;
const struct ethtool_ops *ethtool_ops;
@@ -1715,7 +1721,7 @@ struct net_device {
unsigned int allmulti;
- /* Protocol specific pointers */
+ /* Protocol-specific pointers */
#if IS_ENABLED(CONFIG_VLAN_8021Q)
struct vlan_info __rcu *vlan_info;
@@ -1745,13 +1751,11 @@ struct net_device {
/* Interface address info used in eth_type_trans() */
unsigned char *dev_addr;
-
#ifdef CONFIG_SYSFS
struct netdev_rx_queue *_rx;
unsigned int num_rx_queues;
unsigned int real_num_rx_queues;
-
#endif
unsigned long gro_flush_timeout;
@@ -1843,7 +1847,7 @@ struct net_device {
struct garp_port __rcu *garp_port;
struct mrp_port __rcu *mrp_port;
- struct device dev;
+ struct device dev;
const struct attribute_group *sysfs_groups[4];
const struct attribute_group *sysfs_rx_queue_group;
@@ -1858,9 +1862,9 @@ struct net_device {
#ifdef CONFIG_DCB
const struct dcbnl_rtnl_ops *dcbnl_ops;
#endif
- u8 num_tc;
- struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
- u8 prio_tc_map[TC_BITMASK + 1];
+ u8 num_tc;
+ struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
+ u8 prio_tc_map[TC_BITMASK + 1];
#if IS_ENABLED(CONFIG_FCOE)
unsigned int fcoe_ddp_xid;
@@ -1868,9 +1872,9 @@ struct net_device {
#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
struct netprio_map __rcu *priomap;
#endif
- struct phy_device *phydev;
- struct lock_class_key *qdisc_tx_busylock;
- bool proto_down;
+ struct phy_device *phydev;
+ struct lock_class_key *qdisc_tx_busylock;
+ bool proto_down;
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
@@ -2018,7 +2022,7 @@ static inline void *netdev_priv(const struct net_device *dev)
/* Set the sysfs device type for the network logical device to allow
* fine-grained identification of different network device types. For
- * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc.
+ * example Ethernet, Wireless LAN, Bluetooth, WiMAX etc.
*/
#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
@@ -2028,22 +2032,22 @@ static inline void *netdev_priv(const struct net_device *dev)
#define NAPI_POLL_WEIGHT 64
/**
- * netif_napi_add - initialize a napi context
+ * netif_napi_add - initialize a NAPI context
* @dev: network device
- * @napi: napi context
+ * @napi: NAPI context
* @poll: polling function
* @weight: default weight
*
- * netif_napi_add() must be used to initialize a napi context prior to calling
- * *any* of the other napi related functions.
+ * netif_napi_add() must be used to initialize a NAPI context prior to calling
+ * *any* of the other NAPI-related functions.
*/
void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
int (*poll)(struct napi_struct *, int), int weight);
/**
- * netif_tx_napi_add - initialize a napi context
+ * netif_tx_napi_add - initialize a NAPI context
* @dev: network device
- * @napi: napi context
+ * @napi: NAPI context
* @poll: polling function
* @weight: default weight
*
@@ -2061,22 +2065,22 @@ static inline void netif_tx_napi_add(struct net_device *dev,
}
/**
- * netif_napi_del - remove a napi context
- * @napi: napi context
+ * netif_napi_del - remove a NAPI context
+ * @napi: NAPI context
*
- * netif_napi_del() removes a napi context from the network device napi list
+ * netif_napi_del() removes a NAPI context from the network device NAPI list
*/
void netif_napi_del(struct napi_struct *napi);
struct napi_gro_cb {
/* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
- void *frag0;
+ void *frag0;
/* Length of frag0. */
unsigned int frag0_len;
/* This indicates where we are processing relative to skb->data. */
- int data_offset;
+ int data_offset;
/* This is non-zero if the packet cannot be merged with the new skb. */
u16 flush;
@@ -2099,8 +2103,8 @@ struct napi_gro_cb {
/* This is non-zero if the packet may be of the same flow. */
u8 same_flow:1;
- /* Used in udp_gro_receive */
- u8 udp_mark:1;
+ /* Used in tunnel GRO receive */
+ u8 encap_mark:1;
/* GRO checksum is valid */
u8 csum_valid:1;
@@ -2172,7 +2176,7 @@ struct udp_offload {
struct udp_offload_callbacks callbacks;
};
-/* often modified stats are per cpu, other are shared (netdev->stats) */
+/* often modified stats are per-CPU, other are shared (netdev->stats) */
struct pcpu_sw_netstats {
u64 rx_packets;
u64 rx_bytes;
@@ -2269,7 +2273,7 @@ struct netdev_notifier_changeupper_info {
struct netdev_notifier_info info; /* must be first */
struct net_device *upper_dev; /* new upper dev */
bool master; /* is upper dev master */
- bool linking; /* is the nofication for link or unlink */
+ bool linking; /* is the notification for link or unlink */
void *upper_info; /* upper dev info */
};
@@ -2734,7 +2738,7 @@ extern int netdev_flow_limit_table_len;
#endif /* CONFIG_NET_FLOW_LIMIT */
/*
- * Incoming packets are placed on per-cpu queues
+ * Incoming packets are placed on per-CPU queues
*/
struct softnet_data {
struct list_head poll_list;
@@ -2904,7 +2908,7 @@ netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
* @dev_queue: pointer to transmit queue
*
* BQL enabled drivers might use this helper in their ndo_start_xmit(),
- * to give appropriate hint to the cpu.
+ * to give appropriate hint to the CPU.
*/
static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
{
@@ -2918,7 +2922,7 @@ static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_que
* @dev_queue: pointer to transmit queue
*
* BQL enabled drivers might use this helper in their TX completion path,
- * to give appropriate hint to the cpu.
+ * to give appropriate hint to the CPU.
*/
static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
{
@@ -3057,7 +3061,7 @@ static inline bool netif_running(const struct net_device *dev)
}
/*
- * Routines to manage the subqueues on a device. We only need start
+ * Routines to manage the subqueues on a device. We only need start,
* stop, and a check if it's stopped. All other device management is
* done at the overall netdevice level.
* Also test the device if we're multiqueue.
@@ -3341,7 +3345,6 @@ void netif_carrier_off(struct net_device *dev);
* in a "pending" state, waiting for some external event. For "on-
* demand" interfaces, this new state identifies the situation where the
* interface is waiting for events to place it in the up state.
- *
*/
static inline void netif_dormant_on(struct net_device *dev)
{
@@ -3676,7 +3679,7 @@ void dev_uc_init(struct net_device *dev);
*
* Add newly added addresses to the interface, and release
* addresses that have been deleted.
- **/
+ */
static inline int __dev_uc_sync(struct net_device *dev,
int (*sync)(struct net_device *,
const unsigned char *),
@@ -3692,7 +3695,7 @@ static inline int __dev_uc_sync(struct net_device *dev,
* @unsync: function to call if address should be removed
*
* Remove all addresses that were added to the device by dev_uc_sync().
- **/
+ */
static inline void __dev_uc_unsync(struct net_device *dev,
int (*unsync)(struct net_device *,
const unsigned char *))
@@ -3720,7 +3723,7 @@ void dev_mc_init(struct net_device *dev);
*
* Add newly added addresses to the interface, and release
* addresses that have been deleted.
- **/
+ */
static inline int __dev_mc_sync(struct net_device *dev,
int (*sync)(struct net_device *,
const unsigned char *),
@@ -3736,7 +3739,7 @@ static inline int __dev_mc_sync(struct net_device *dev,
* @unsync: function to call if address should be removed
*
* Remove all addresses that were added to the device by dev_mc_sync().
- **/
+ */
static inline void __dev_mc_unsync(struct net_device *dev,
int (*unsync)(struct net_device *,
const unsigned char *))
diff --git a/include/net/flow.h b/include/net/flow.h
index 83969eebebf3..d47ef4bb5423 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -127,7 +127,6 @@ struct flowi6 {
#define flowi6_oif __fl_common.flowic_oif
#define flowi6_iif __fl_common.flowic_iif
#define flowi6_mark __fl_common.flowic_mark
-#define flowi6_tos __fl_common.flowic_tos
#define flowi6_scope __fl_common.flowic_scope
#define flowi6_proto __fl_common.flowic_proto
#define flowi6_flags __fl_common.flowic_flags
@@ -135,6 +134,7 @@ struct flowi6 {
#define flowi6_tun_key __fl_common.flowic_tun_key
struct in6_addr daddr;
struct in6_addr saddr;
+ /* Note: flowi6_tos is encoded in flowlabel, too. */
__be32 flowlabel;
union flowi_uli uli;
#define fl6_sport uli.ports.sport
diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h
index 064cfbe639d0..954ad6bfb56a 100644
--- a/include/net/inet6_connection_sock.h
+++ b/include/net/inet6_connection_sock.h
@@ -15,7 +15,6 @@
#include <linux/types.h>
-struct in6_addr;
struct inet_bind_bucket;
struct request_sock;
struct sk_buff;
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index c35dda9ec991..56050f913339 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -305,6 +305,22 @@ struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask);
+static inline int iptunnel_pull_offloads(struct sk_buff *skb)
+{
+ if (skb_is_gso(skb)) {
+ int err;
+
+ err = skb_unclone(skb, GFP_ATOMIC);
+ if (unlikely(err))
+ return err;
+ skb_shinfo(skb)->gso_type &= ~(NETIF_F_GSO_ENCAP_ALL >>
+ NETIF_F_GSO_SHIFT);
+ }
+
+ skb->encapsulation = 0;
+ return 0;
+}
+
static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len)
{
if (pkt_len > 0) {
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index f3c9857c645d..d0aeb97aec5d 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -835,6 +835,12 @@ static inline u8 ip6_tclass(__be32 flowinfo)
{
return ntohl(flowinfo & IPV6_TCLASS_MASK) >> IPV6_TCLASS_SHIFT;
}
+
+static inline __be32 ip6_make_flowinfo(unsigned int tclass, __be32 flowlabel)
+{
+ return htonl(tclass << IPV6_TCLASS_SHIFT) | flowlabel;
+}
+
/*
* Prototypes exported by ipv6
*/
diff --git a/include/net/ping.h b/include/net/ping.h
index 5fd7cc244833..4cd90d6b5c25 100644
--- a/include/net/ping.h
+++ b/include/net/ping.h
@@ -79,7 +79,6 @@ int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
int flags, int *addr_len);
int ping_common_sendmsg(int family, struct msghdr *msg, size_t len,
void *user_icmph, size_t icmph_len);
-int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
bool ping_rcv(struct sk_buff *skb);
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 835aa2ed9870..65521cfdcade 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -82,6 +82,11 @@
#define SCTP_PROTOSW_FLAG INET_PROTOSW_PERMANENT
#endif
+/* Round an int up to the next multiple of 4. */
+#define WORD_ROUND(s) (((s)+3)&~3)
+/* Truncate to the previous multiple of 4. */
+#define WORD_TRUNC(s) ((s)&~3)
+
/*
* Function declarations.
*/
@@ -426,7 +431,7 @@ static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu)
if (asoc->user_frag)
frag = min_t(int, frag, asoc->user_frag);
- frag = min_t(int, frag, SCTP_MAX_CHUNK_LEN);
+ frag = WORD_TRUNC(min_t(int, frag, SCTP_MAX_CHUNK_LEN));
return frag;
}
@@ -475,9 +480,6 @@ for (pos = chunk->subh.fwdtsn_hdr->skip;\
(void *)pos <= (void *)chunk->subh.fwdtsn_hdr->skip + end - sizeof(struct sctp_fwdtsn_skip);\
pos++)
-/* Round an int up to the next multiple of 4. */
-#define WORD_ROUND(s) (((s)+3)&~3)
-
/* External references. */
extern struct proto sctp_prot;
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index e2ac0620d4be..6df1ce7a411c 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1097,7 +1097,7 @@ int sctp_bind_addr_dup(struct sctp_bind_addr *dest,
const struct sctp_bind_addr *src,
gfp_t gfp);
int sctp_add_bind_addr(struct sctp_bind_addr *, union sctp_addr *,
- __u8 addr_state, gfp_t gfp);
+ int new_size, __u8 addr_state, gfp_t gfp);
int sctp_del_bind_addr(struct sctp_bind_addr *, union sctp_addr *);
int sctp_bind_addr_match(struct sctp_bind_addr *, const union sctp_addr *,
struct sctp_sock *);
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index a763c96ecde4..73ed2e951c02 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -271,36 +271,36 @@ static inline struct vxlanhdr *vxlan_hdr(struct sk_buff *skb)
static inline __be32 vxlan_vni(__be32 vni_field)
{
#if defined(__BIG_ENDIAN)
- return vni_field >> 8;
+ return (__force __be32)((__force u32)vni_field >> 8);
#else
- return (vni_field & VXLAN_VNI_MASK) << 8;
+ return (__force __be32)((__force u32)(vni_field & VXLAN_VNI_MASK) << 8);
#endif
}
static inline __be32 vxlan_vni_field(__be32 vni)
{
#if defined(__BIG_ENDIAN)
- return vni << 8;
+ return (__force __be32)((__force u32)vni << 8);
#else
- return vni >> 8;
+ return (__force __be32)((__force u32)vni >> 8);
#endif
}
static inline __be32 vxlan_tun_id_to_vni(__be64 tun_id)
{
#if defined(__BIG_ENDIAN)
- return tun_id;
+ return (__force __be32)tun_id;
#else
- return tun_id >> 32;
+ return (__force __be32)((__force u64)tun_id >> 32);
#endif
}
static inline __be64 vxlan_vni_to_tun_id(__be32 vni)
{
#if defined(__BIG_ENDIAN)
- return (__be64)vni;
+ return (__force __be64)vni;
#else
- return (__be64)vni << 32;
+ return (__force __be64)((u64)(__force u32)vni << 32);
#endif
}
diff --git a/include/trace/events/fib6.h b/include/trace/events/fib6.h
index 4cf6bac4686d..d60096cddb2a 100644
--- a/include/trace/events/fib6.h
+++ b/include/trace/events/fib6.h
@@ -37,7 +37,7 @@ TRACE_EVENT(fib6_table_lookup,
__entry->tb_id = tb_id;
__entry->oif = flp->flowi6_oif;
__entry->iif = flp->flowi6_iif;
- __entry->tos = flp->flowi6_tos;
+ __entry->tos = ip6_tclass(flp->flowlabel);
__entry->scope = flp->flowi6_scope;
__entry->flags = flp->flowi6_flags;
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 2835b07416b7..9222db8ccccc 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1648,9 +1648,9 @@ enum ethtool_reset_flags {
* %ETHTOOL_GLINKSETTINGS: on entry, number of words passed by user
* (>= 0); on return, if handshake in progress, negative if
* request size unsupported by kernel: absolute value indicates
- * kernel recommended size and cmd field is 0, as well as all the
- * other fields; otherwise (handshake completed), strictly
- * positive to indicate size used by kernel and cmd field is
+ * kernel expected size and all the other fields but cmd
+ * are 0; otherwise (handshake completed), strictly positive
+ * to indicate size used by kernel and cmd field stays
* %ETHTOOL_GLINKSETTINGS, all other fields populated by driver. For
* %ETHTOOL_SLINKSETTINGS: must be valid on entry, ie. a positive
* value returned previously by %ETHTOOL_GLINKSETTINGS, otherwise
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index a62a0129d614..c488066fb53a 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -153,6 +153,8 @@ enum {
IFLA_LINK_NETNSID,
IFLA_PHYS_PORT_NAME,
IFLA_PROTO_DOWN,
+ IFLA_GSO_MAX_SEGS,
+ IFLA_GSO_MAX_SIZE,
__IFLA_MAX
};