aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_core.c111
-rw-r--r--net/8021q/vlan_dev.c14
-rw-r--r--net/appletalk/aarp.c5
-rw-r--r--net/bluetooth/bnep/bnep.h1
-rw-r--r--net/bluetooth/bnep/core.c12
-rw-r--r--net/bluetooth/bnep/netdev.c33
-rw-r--r--net/bridge/netfilter/ebtables.c3
-rw-r--r--net/can/af_can.c15
-rw-r--r--net/can/bcm.c12
-rw-r--r--net/can/raw.c15
-rw-r--r--net/core/dev.c238
-rw-r--r--net/dccp/ccids/Kconfig2
-rw-r--r--net/dccp/ccids/lib/tfrc.c2
-rw-r--r--net/dsa/slave.c51
-rw-r--r--net/ipv4/tcp.c7
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv6/af_inet6.c107
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/ipv6/sysctl_net_ipv6.c2
-rw-r--r--net/ipv6/tcp_ipv6.c47
-rw-r--r--net/phonet/pep-gprs.c12
-rw-r--r--net/sched/sch_sfq.c2
-rw-r--r--net/sched/sch_teql.c20
-rw-r--r--net/sctp/auth.c2
-rw-r--r--net/tipc/bcast.h2
-rw-r--r--net/wimax/Kconfig14
-rw-r--r--net/wimax/id-table.c8
-rw-r--r--net/wimax/op-rfkill.c2
-rw-r--r--net/wireless/wext.c4
-rw-r--r--net/xfrm/xfrm_ipcomp.c1
31 files changed, 457 insertions, 293 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index dd86a1dc4cd0..6c1323940263 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -3,46 +3,35 @@
#include <linux/if_vlan.h>
#include "vlan.h"
-struct vlan_hwaccel_cb {
- struct net_device *dev;
-};
-
-static inline struct vlan_hwaccel_cb *vlan_hwaccel_cb(struct sk_buff *skb)
-{
- return (struct vlan_hwaccel_cb *)skb->cb;
-}
-
/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */
int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
u16 vlan_tci, int polling)
{
- struct vlan_hwaccel_cb *cb = vlan_hwaccel_cb(skb);
-
- if (skb_bond_should_drop(skb)) {
- dev_kfree_skb_any(skb);
- return NET_RX_DROP;
- }
+ if (skb_bond_should_drop(skb))
+ goto drop;
skb->vlan_tci = vlan_tci;
- cb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
+ skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
+
+ if (!skb->dev)
+ goto drop;
return (polling ? netif_receive_skb(skb) : netif_rx(skb));
+
+drop:
+ dev_kfree_skb_any(skb);
+ return NET_RX_DROP;
}
EXPORT_SYMBOL(__vlan_hwaccel_rx);
int vlan_hwaccel_do_receive(struct sk_buff *skb)
{
- struct vlan_hwaccel_cb *cb = vlan_hwaccel_cb(skb);
- struct net_device *dev = cb->dev;
+ struct net_device *dev = skb->dev;
struct net_device_stats *stats;
+ skb->dev = vlan_dev_info(dev)->real_dev;
netif_nit_deliver(skb);
- if (dev == NULL) {
- kfree_skb(skb);
- return -1;
- }
-
skb->dev = dev;
skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci);
skb->vlan_tci = 0;
@@ -80,3 +69,79 @@ u16 vlan_dev_vlan_id(const struct net_device *dev)
return vlan_dev_info(dev)->vlan_id;
}
EXPORT_SYMBOL_GPL(vlan_dev_vlan_id);
+
+static int vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
+ unsigned int vlan_tci, struct sk_buff *skb)
+{
+ struct sk_buff *p;
+
+ if (skb_bond_should_drop(skb))
+ goto drop;
+
+ skb->vlan_tci = vlan_tci;
+ skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
+
+ if (!skb->dev)
+ goto drop;
+
+ for (p = napi->gro_list; p; p = p->next) {
+ NAPI_GRO_CB(p)->same_flow = p->dev == skb->dev;
+ NAPI_GRO_CB(p)->flush = 0;
+ }
+
+ return dev_gro_receive(napi, skb);
+
+drop:
+ return 2;
+}
+
+int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
+ unsigned int vlan_tci, struct sk_buff *skb)
+{
+ int err = NET_RX_SUCCESS;
+
+ switch (vlan_gro_common(napi, grp, vlan_tci, skb)) {
+ case -1:
+ return netif_receive_skb(skb);
+
+ case 2:
+ err = NET_RX_DROP;
+ /* fall through */
+
+ case 1:
+ kfree_skb(skb);
+ break;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(vlan_gro_receive);
+
+int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
+ unsigned int vlan_tci, struct napi_gro_fraginfo *info)
+{
+ struct sk_buff *skb = napi_fraginfo_skb(napi, info);
+ int err = NET_RX_DROP;
+
+ if (!skb)
+ goto out;
+
+ err = NET_RX_SUCCESS;
+
+ switch (vlan_gro_common(napi, grp, vlan_tci, skb)) {
+ case -1:
+ return netif_receive_skb(skb);
+
+ case 2:
+ err = NET_RX_DROP;
+ /* fall through */
+
+ case 1:
+ napi_reuse_skb(napi, skb);
+ break;
+ }
+
+out:
+ return err;
+}
+EXPORT_SYMBOL(vlan_gro_frags);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 89a3bbdfca3f..4a19acd3a32b 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -546,6 +546,18 @@ static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return err;
}
+static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa)
+{
+ struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+ const struct net_device_ops *ops = real_dev->netdev_ops;
+ int err = 0;
+
+ if (netif_device_present(real_dev) && ops->ndo_neigh_setup)
+ err = ops->ndo_neigh_setup(dev, pa);
+
+ return err;
+}
+
static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
{
struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
@@ -713,6 +725,7 @@ static const struct net_device_ops vlan_netdev_ops = {
.ndo_set_multicast_list = vlan_dev_set_rx_mode,
.ndo_change_rx_flags = vlan_dev_change_rx_flags,
.ndo_do_ioctl = vlan_dev_ioctl,
+ .ndo_neigh_setup = vlan_dev_neigh_setup,
};
static const struct net_device_ops vlan_netdev_accel_ops = {
@@ -728,6 +741,7 @@ static const struct net_device_ops vlan_netdev_accel_ops = {
.ndo_set_multicast_list = vlan_dev_set_rx_mode,
.ndo_change_rx_flags = vlan_dev_change_rx_flags,
.ndo_do_ioctl = vlan_dev_ioctl,
+ .ndo_neigh_setup = vlan_dev_neigh_setup,
};
void vlan_setup(struct net_device *dev)
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index b03ff58e9308..89f99d3beb60 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -443,13 +443,14 @@ static void aarp_send_probe_phase1(struct atalk_iface *iface)
{
struct ifreq atreq;
struct sockaddr_at *sa = (struct sockaddr_at *)&atreq.ifr_addr;
+ const struct net_device_ops *ops = iface->dev->netdev_ops;
sa->sat_addr.s_node = iface->address.s_node;
sa->sat_addr.s_net = ntohs(iface->address.s_net);
/* We pass the Net:Node to the drivers/cards by a Device ioctl. */
- if (!(iface->dev->do_ioctl(iface->dev, &atreq, SIOCSIFADDR))) {
- (void)iface->dev->do_ioctl(iface->dev, &atreq, SIOCGIFADDR);
+ if (!(ops->ndo_do_ioctl(iface->dev, &atreq, SIOCSIFADDR))) {
+ ops->ndo_do_ioctl(iface->dev, &atreq, SIOCGIFADDR);
if (iface->address.s_net != htons(sa->sat_addr.s_net) ||
iface->address.s_node != sa->sat_addr.s_node)
iface->status |= ATIF_PROBE_FAIL;
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h
index d20f8a40f36e..0d9e506f5d5a 100644
--- a/net/bluetooth/bnep/bnep.h
+++ b/net/bluetooth/bnep/bnep.h
@@ -165,7 +165,6 @@ struct bnep_session {
struct socket *sock;
struct net_device *dev;
- struct net_device_stats stats;
};
void bnep_net_setup(struct net_device *dev);
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 70fea8bdb4e5..52a6ce0d772b 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -306,7 +306,7 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
struct sk_buff *nskb;
u8 type;
- s->stats.rx_bytes += skb->len;
+ dev->stats.rx_bytes += skb->len;
type = *(u8 *) skb->data; skb_pull(skb, 1);
@@ -343,7 +343,7 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
* may not be modified and because of the alignment requirements. */
nskb = alloc_skb(2 + ETH_HLEN + skb->len, GFP_KERNEL);
if (!nskb) {
- s->stats.rx_dropped++;
+ dev->stats.rx_dropped++;
kfree_skb(skb);
return -ENOMEM;
}
@@ -378,14 +378,14 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
skb_copy_from_linear_data(skb, __skb_put(nskb, skb->len), skb->len);
kfree_skb(skb);
- s->stats.rx_packets++;
+ dev->stats.rx_packets++;
nskb->ip_summed = CHECKSUM_NONE;
nskb->protocol = eth_type_trans(nskb, dev);
netif_rx_ni(nskb);
return 0;
badframe:
- s->stats.rx_errors++;
+ dev->stats.rx_errors++;
kfree_skb(skb);
return 0;
}
@@ -448,8 +448,8 @@ send:
kfree_skb(skb);
if (len > 0) {
- s->stats.tx_bytes += len;
- s->stats.tx_packets++;
+ s->dev->stats.tx_bytes += len;
+ s->dev->stats.tx_packets++;
return 0;
}
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index f897da6e0444..d7a0e9722def 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -55,12 +55,6 @@ static int bnep_net_close(struct net_device *dev)
return 0;
}
-static struct net_device_stats *bnep_net_get_stats(struct net_device *dev)
-{
- struct bnep_session *s = netdev_priv(dev);
- return &s->stats;
-}
-
static void bnep_net_set_mc_list(struct net_device *dev)
{
#ifdef CONFIG_BT_BNEP_MC_FILTER
@@ -128,11 +122,6 @@ static void bnep_net_timeout(struct net_device *dev)
netif_wake_queue(dev);
}
-static int bnep_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- return -EINVAL;
-}
-
#ifdef CONFIG_BT_BNEP_MC_FILTER
static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s)
{
@@ -217,6 +206,18 @@ static int bnep_net_xmit(struct sk_buff *skb, struct net_device *dev)
return 0;
}
+static const struct net_device_ops bnep_netdev_ops = {
+ .ndo_open = bnep_net_open,
+ .ndo_stop = bnep_net_close,
+ .ndo_start_xmit = bnep_net_xmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_multicast_list = bnep_net_set_mc_list,
+ .ndo_set_mac_address = bnep_net_set_mac_addr,
+ .ndo_tx_timeout = bnep_net_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+
+};
+
void bnep_net_setup(struct net_device *dev)
{
@@ -224,15 +225,7 @@ void bnep_net_setup(struct net_device *dev)
dev->addr_len = ETH_ALEN;
ether_setup(dev);
-
- dev->open = bnep_net_open;
- dev->stop = bnep_net_close;
- dev->hard_start_xmit = bnep_net_xmit;
- dev->get_stats = bnep_net_get_stats;
- dev->do_ioctl = bnep_net_ioctl;
- dev->set_mac_address = bnep_net_set_mac_addr;
- dev->set_multicast_list = bnep_net_set_mc_list;
+ dev->netdev_ops = &bnep_netdev_ops;
dev->watchdog_timeo = HZ * 2;
- dev->tx_timeout = bnep_net_timeout;
}
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index fa108c46e851..8a8743d7d6e7 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -85,12 +85,13 @@ static inline int ebt_do_match (struct ebt_entry_match *m,
static inline int ebt_dev_check(char *entry, const struct net_device *device)
{
int i = 0;
- const char *devname = device->name;
+ const char *devname;
if (*entry == '\0')
return 0;
if (!device)
return 1;
+ devname = device->name;
/* 1 is the wildcard token */
while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i])
i++;
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 3dadb338addd..fa417ca6cbe6 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -414,6 +414,12 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
* The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
* filter for error frames (CAN_ERR_FLAG bit set in mask).
*
+ * The provided pointer to the sk_buff is guaranteed to be valid as long as
+ * the callback function is running. The callback function must *not* free
+ * the given sk_buff while processing it's task. When the given sk_buff is
+ * needed after the end of the callback function it must be cloned inside
+ * the callback function with skb_clone().
+ *
* Return:
* 0 on success
* -ENOMEM on missing cache mem to create subscription entry
@@ -569,13 +575,8 @@ EXPORT_SYMBOL(can_rx_unregister);
static inline void deliver(struct sk_buff *skb, struct receiver *r)
{
- struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
-
- if (clone) {
- clone->sk = skb->sk;
- r->func(clone, r->data);
- r->matches++;
- }
+ r->func(skb, r->data);
+ r->matches++;
}
static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 6248ae2502c7..1649c8ab2c2f 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -633,7 +633,7 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
hrtimer_cancel(&op->timer);
if (op->can_id != rxframe->can_id)
- goto rx_freeskb;
+ return;
/* save rx timestamp */
op->rx_stamp = skb->tstamp;
@@ -645,19 +645,19 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
if (op->flags & RX_RTR_FRAME) {
/* send reply for RTR-request (placed in op->frames[0]) */
bcm_can_tx(op);
- goto rx_freeskb;
+ return;
}
if (op->flags & RX_FILTER_ID) {
/* the easiest case */
bcm_rx_update_and_send(op, &op->last_frames[0], rxframe);
- goto rx_freeskb_starttimer;
+ goto rx_starttimer;
}
if (op->nframes == 1) {
/* simple compare with index 0 */
bcm_rx_cmp_to_index(op, 0, rxframe);
- goto rx_freeskb_starttimer;
+ goto rx_starttimer;
}
if (op->nframes > 1) {
@@ -678,10 +678,8 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
}
}
-rx_freeskb_starttimer:
+rx_starttimer:
bcm_rx_starttimer(op);
-rx_freeskb:
- kfree_skb(skb);
}
/*
diff --git a/net/can/raw.c b/net/can/raw.c
index 27aab63df467..0703cba4bf9f 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -99,13 +99,14 @@ static void raw_rcv(struct sk_buff *skb, void *data)
struct raw_sock *ro = raw_sk(sk);
struct sockaddr_can *addr;
- if (!ro->recv_own_msgs) {
- /* check the received tx sock reference */
- if (skb->sk == sk) {
- kfree_skb(skb);
- return;
- }
- }
+ /* check the received tx sock reference */
+ if (!ro->recv_own_msgs && skb->sk == sk)
+ return;
+
+ /* clone the given skb to be able to enqueue it into the rcv queue */
+ skb = skb_clone(skb, GFP_ATOMIC);
+ if (!skb)
+ return;
/*
* Put the datagram to the queue so that raw_recvmsg() can
diff --git a/net/core/dev.c b/net/core/dev.c
index 382df6c09eec..b715a55cccc4 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -170,25 +170,6 @@ static DEFINE_SPINLOCK(ptype_lock);
static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
static struct list_head ptype_all __read_mostly; /* Taps */
-#ifdef CONFIG_NET_DMA
-struct net_dma {
- struct dma_client client;
- spinlock_t lock;
- cpumask_t channel_mask;
- struct dma_chan **channels;
-};
-
-static enum dma_state_client
-netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
- enum dma_state state);
-
-static struct net_dma net_dma = {
- .client = {
- .event_callback = netdev_dma_event,
- },
-};
-#endif
-
/*
* The @dev_base_head list is protected by @dev_base_lock and the rtnl
* semaphore.
@@ -1107,6 +1088,11 @@ int dev_open(struct net_device *dev)
dev->flags |= IFF_UP;
/*
+ * Enable NET_DMA
+ */
+ dmaengine_get();
+
+ /*
* Initialize multicasting status
*/
dev_set_rx_mode(dev);
@@ -1183,6 +1169,11 @@ int dev_close(struct net_device *dev)
*/
call_netdevice_notifiers(NETDEV_DOWN, dev);
+ /*
+ * Shutdown NET_DMA
+ */
+ dmaengine_put();
+
return 0;
}
@@ -2387,7 +2378,7 @@ void napi_gro_flush(struct napi_struct *napi)
}
EXPORT_SYMBOL(napi_gro_flush);
-static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
+int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
{
struct sk_buff **pp = NULL;
struct packet_type *ptype;
@@ -2417,11 +2408,14 @@ static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
for (p = napi->gro_list; p; p = p->next) {
count++;
- NAPI_GRO_CB(p)->same_flow =
- p->mac_len == mac_len &&
- !memcmp(skb_mac_header(p), skb_mac_header(skb),
- mac_len);
- NAPI_GRO_CB(p)->flush = 0;
+
+ if (!NAPI_GRO_CB(p)->same_flow)
+ continue;
+
+ if (p->mac_len != mac_len ||
+ memcmp(skb_mac_header(p), skb_mac_header(skb),
+ mac_len))
+ NAPI_GRO_CB(p)->same_flow = 0;
}
pp = ptype->gro_receive(&napi->gro_list, skb);
@@ -2463,6 +2457,19 @@ ok:
normal:
return -1;
}
+EXPORT_SYMBOL(dev_gro_receive);
+
+static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
+{
+ struct sk_buff *p;
+
+ for (p = napi->gro_list; p; p = p->next) {
+ NAPI_GRO_CB(p)->same_flow = 1;
+ NAPI_GRO_CB(p)->flush = 0;
+ }
+
+ return dev_gro_receive(napi, skb);
+}
int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
{
@@ -2479,11 +2486,26 @@ int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
}
EXPORT_SYMBOL(napi_gro_receive);
-int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info)
+void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
+{
+ skb_shinfo(skb)->nr_frags = 0;
+
+ skb->len -= skb->data_len;
+ skb->truesize -= skb->data_len;
+ skb->data_len = 0;
+
+ __skb_pull(skb, skb_headlen(skb));
+ skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
+
+ napi->skb = skb;
+}
+EXPORT_SYMBOL(napi_reuse_skb);
+
+struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi,
+ struct napi_gro_fraginfo *info)
{
struct net_device *dev = napi->dev;
struct sk_buff *skb = napi->skb;
- int err = NET_RX_DROP;
napi->skb = NULL;
@@ -2503,16 +2525,31 @@ int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info)
skb->len += info->len;
skb->truesize += info->len;
- if (!pskb_may_pull(skb, ETH_HLEN))
- goto reuse;
-
- err = NET_RX_SUCCESS;
+ if (!pskb_may_pull(skb, ETH_HLEN)) {
+ napi_reuse_skb(napi, skb);
+ goto out;
+ }
skb->protocol = eth_type_trans(skb, dev);
skb->ip_summed = info->ip_summed;
skb->csum = info->csum;
+out:
+ return skb;
+}
+EXPORT_SYMBOL(napi_fraginfo_skb);
+
+int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info)
+{
+ struct sk_buff *skb = napi_fraginfo_skb(napi, info);
+ int err = NET_RX_DROP;
+
+ if (!skb)
+ goto out;
+
+ err = NET_RX_SUCCESS;
+
switch (__napi_gro_receive(napi, skb)) {
case -1:
return netif_receive_skb(skb);
@@ -2521,17 +2558,7 @@ int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info)
goto out;
}
-reuse:
- skb_shinfo(skb)->nr_frags = 0;
-
- skb->len -= skb->data_len;
- skb->truesize -= skb->data_len;
- skb->data_len = 0;
-
- __skb_pull(skb, skb_headlen(skb));
- skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
-
- napi->skb = skb;
+ napi_reuse_skb(napi, skb);
out:
return err;
@@ -2718,14 +2745,7 @@ out:
* There may not be any more sk_buffs coming right now, so push
* any pending DMA copies to hardware
*/
- if (!cpus_empty(net_dma.channel_mask)) {
- int chan_idx;
- for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) {
- struct dma_chan *chan = net_dma.channels[chan_idx];
- if (chan)
- dma_async_memcpy_issue_pending(chan);
- }
- }
+ dma_issue_pending_all();
#endif
return;
@@ -4916,122 +4936,6 @@ static int dev_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
-#ifdef CONFIG_NET_DMA
-/**
- * net_dma_rebalance - try to maintain one DMA channel per CPU
- * @net_dma: DMA client and associated data (lock, channels, channel_mask)
- *
- * This is called when the number of channels allocated to the net_dma client
- * changes. The net_dma client tries to have one DMA channel per CPU.
- */
-
-static void net_dma_rebalance(struct net_dma *net_dma)
-{
- unsigned int cpu, i, n, chan_idx;
- struct dma_chan *chan;
-
- if (cpus_empty(net_dma->channel_mask)) {
- for_each_online_cpu(cpu)
- rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
- return;
- }
-
- i = 0;
- cpu = first_cpu(cpu_online_map);
-
- for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) {
- chan = net_dma->channels[chan_idx];
-
- n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
- + (i < (num_online_cpus() %
- cpus_weight(net_dma->channel_mask)) ? 1 : 0));
-
- while(n) {
- per_cpu(softnet_data, cpu).net_dma = chan;
- cpu = next_cpu(cpu, cpu_online_map);
- n--;
- }
- i++;
- }
-}
-
-/**
- * netdev_dma_event - event callback for the net_dma_client
- * @client: should always be net_dma_client
- * @chan: DMA channel for the event
- * @state: DMA state to be handled
- */
-static enum dma_state_client
-netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
- enum dma_state state)
-{
- int i, found = 0, pos = -1;
- struct net_dma *net_dma =
- container_of(client, struct net_dma, client);
- enum dma_state_client ack = DMA_DUP; /* default: take no action */
-
- spin_lock(&net_dma->lock);
- switch (state) {
- case DMA_RESOURCE_AVAILABLE:
- for (i = 0; i < nr_cpu_ids; i++)
- if (net_dma->channels[i] == chan) {
- found = 1;
- break;
- } else if (net_dma->channels[i] == NULL && pos < 0)
- pos = i;
-
- if (!found && pos >= 0) {
- ack = DMA_ACK;
- net_dma->channels[pos] = chan;
- cpu_set(pos, net_dma->channel_mask);
- net_dma_rebalance(net_dma);
- }
- break;
- case DMA_RESOURCE_REMOVED:
- for (i = 0; i < nr_cpu_ids; i++)
- if (net_dma->channels[i] == chan) {
- found = 1;
- pos = i;
- break;
- }
-
- if (found) {
- ack = DMA_ACK;
- cpu_clear(pos, net_dma->channel_mask);
- net_dma->channels[i] = NULL;
- net_dma_rebalance(net_dma);
- }
- break;
- default:
- break;
- }
- spin_unlock(&net_dma->lock);
-
- return ack;
-}
-
-/**
- * netdev_dma_register - register the networking subsystem as a DMA client
- */
-static int __init netdev_dma_register(void)
-{
- net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma),
- GFP_KERNEL);
- if (unlikely(!net_dma.channels)) {
- printk(KERN_NOTICE
- "netdev_dma: no memory for net_dma.channels\n");
- return -ENOMEM;
- }
- spin_lock_init(&net_dma.lock);
- dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
- dma_async_client_register(&net_dma.client);
- dma_async_client_chan_request(&net_dma.client);
- return 0;
-}
-
-#else
-static int __init netdev_dma_register(void) { return -ENODEV; }
-#endif /* CONFIG_NET_DMA */
/**
* netdev_increment_features - increment feature set by one
@@ -5251,8 +5155,6 @@ static int __init net_dev_init(void)
if (register_pernet_device(&default_device_ops))
goto out;
- netdev_dma_register();
-
open_softirq(NET_TX_SOFTIRQ, net_tx_action);
open_softirq(NET_RX_SOFTIRQ, net_rx_action);
diff --git a/net/dccp/ccids/Kconfig b/net/dccp/ccids/Kconfig
index b28bf962edc3..4b5db44970aa 100644
--- a/net/dccp/ccids/Kconfig
+++ b/net/dccp/ccids/Kconfig
@@ -29,7 +29,7 @@ config IP_DCCP_CCID3
http://www.ietf.org/rfc/rfc4342.txt
The TFRC congestion control algorithms were initially described in
- RFC 5448.
+ RFC 5348.
This text was extracted from RFC 4340 (sec. 10.2),
http://www.ietf.org/rfc/rfc4340.txt
diff --git a/net/dccp/ccids/lib/tfrc.c b/net/dccp/ccids/lib/tfrc.c
index 60c412ccfeef..4902029854d8 100644
--- a/net/dccp/ccids/lib/tfrc.c
+++ b/net/dccp/ccids/lib/tfrc.c
@@ -36,7 +36,7 @@ out:
return rc;
}
-void __exit tfrc_lib_exit(void)
+void tfrc_lib_exit(void)
{
tfrc_rx_packet_history_exit();
tfrc_tx_packet_history_exit();
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index a3a410d20da0..a68fd79e9eca 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -286,6 +286,42 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = {
.get_sset_count = dsa_slave_get_sset_count,
};
+#ifdef CONFIG_NET_DSA_TAG_DSA
+static const struct net_device_ops dsa_netdev_ops = {
+ .ndo_open = dsa_slave_open,
+ .ndo_stop = dsa_slave_close,
+ .ndo_start_xmit = dsa_xmit,
+ .ndo_change_rx_flags = dsa_slave_change_rx_flags,
+ .ndo_set_rx_mode = dsa_slave_set_rx_mode,
+ .ndo_set_multicast_list = dsa_slave_set_rx_mode,
+ .ndo_set_mac_address = dsa_slave_set_mac_address,
+ .ndo_do_ioctl = dsa_slave_ioctl,
+};
+#endif
+#ifdef CONFIG_NET_DSA_TAG_EDSA
+static const struct net_device_ops edsa_netdev_ops = {
+ .ndo_open = dsa_slave_open,
+ .ndo_stop = dsa_slave_close,
+ .ndo_start_xmit = edsa_xmit,
+ .ndo_change_rx_flags = dsa_slave_change_rx_flags,
+ .ndo_set_rx_mode = dsa_slave_set_rx_mode,
+ .ndo_set_multicast_list = dsa_slave_set_rx_mode,
+ .ndo_set_mac_address = dsa_slave_set_mac_address,
+ .ndo_do_ioctl = dsa_slave_ioctl,
+};
+#endif
+#ifdef CONFIG_NET_DSA_TAG_TRAILER
+static const struct net_device_ops trailer_netdev_ops = {
+ .ndo_open = dsa_slave_open,
+ .ndo_stop = dsa_slave_close,
+ .ndo_start_xmit = trailer_xmit,
+ .ndo_change_rx_flags = dsa_slave_change_rx_flags,
+ .ndo_set_rx_mode = dsa_slave_set_rx_mode,
+ .ndo_set_multicast_list = dsa_slave_set_rx_mode,
+ .ndo_set_mac_address = dsa_slave_set_mac_address,
+ .ndo_do_ioctl = dsa_slave_ioctl,
+};
+#endif
/* slave device setup *******************************************************/
struct net_device *
@@ -306,32 +342,27 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent,
SET_ETHTOOL_OPS(slave_dev, &dsa_slave_ethtool_ops);
memcpy(slave_dev->dev_addr, master->dev_addr, ETH_ALEN);
slave_dev->tx_queue_len = 0;
+
switch (ds->tag_protocol) {
#ifdef CONFIG_NET_DSA_TAG_DSA
case htons(ETH_P_DSA):
- slave_dev->hard_start_xmit = dsa_xmit;
+ slave_dev->netdev_ops = &dsa_netdev_ops;
break;
#endif
#ifdef CONFIG_NET_DSA_TAG_EDSA
case htons(ETH_P_EDSA):
- slave_dev->hard_start_xmit = edsa_xmit;
+ slave_dev->netdev_ops = &edsa_netdev_ops;
break;
#endif
#ifdef CONFIG_NET_DSA_TAG_TRAILER
case htons(ETH_P_TRAILER):
- slave_dev->hard_start_xmit = trailer_xmit;
+ slave_dev->netdev_ops = &trailer_netdev_ops;
break;
#endif
default:
BUG();
}
- slave_dev->open = dsa_slave_open;
- slave_dev->stop = dsa_slave_close;
- slave_dev->change_rx_flags = dsa_slave_change_rx_flags;
- slave_dev->set_rx_mode = dsa_slave_set_rx_mode;
- slave_dev->set_multicast_list = dsa_slave_set_rx_mode;
- slave_dev->set_mac_address = dsa_slave_set_mac_address;
- slave_dev->do_ioctl = dsa_slave_ioctl;
+
SET_NETDEV_DEV(slave_dev, parent);
slave_dev->vlan_features = master->vlan_features;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 35bcddf8a932..ce572f9dff02 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1313,7 +1313,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if ((available < target) &&
(len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
!sysctl_tcp_low_latency &&
- __get_cpu_var(softnet_data).net_dma) {
+ dma_find_channel(DMA_MEMCPY)) {
preempt_enable_no_resched();
tp->ucopy.pinned_list =
dma_pin_iovec_pages(msg->msg_iov, len);
@@ -1523,7 +1523,7 @@ do_prequeue:
if (!(flags & MSG_TRUNC)) {
#ifdef CONFIG_NET_DMA
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
- tp->ucopy.dma_chan = get_softnet_dma();
+ tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
if (tp->ucopy.dma_chan) {
tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
@@ -1628,7 +1628,6 @@ skip_copy:
/* Safe to free early-copied skbs now */
__skb_queue_purge(&sk->sk_async_wait_queue);
- dma_chan_put(tp->ucopy.dma_chan);
tp->ucopy.dma_chan = NULL;
}
if (tp->ucopy.pinned_list) {
@@ -2542,6 +2541,7 @@ out:
return pp;
}
+EXPORT_SYMBOL(tcp_gro_receive);
int tcp_gro_complete(struct sk_buff *skb)
{
@@ -2558,6 +2558,7 @@ int tcp_gro_complete(struct sk_buff *skb)
return 0;
}
+EXPORT_SYMBOL(tcp_gro_complete);
#ifdef CONFIG_TCP_MD5SIG
static unsigned long tcp_md5sig_users;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 99b7ecbe8893..a6961d75c7ea 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5005,7 +5005,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
return 0;
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
- tp->ucopy.dma_chan = get_softnet_dma();
+ tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) {
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 9d839fa9331e..19d7b429a262 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1594,7 +1594,7 @@ process:
#ifdef CONFIG_NET_DMA
struct tcp_sock *tp = tcp_sk(sk);
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
- tp->ucopy.dma_chan = get_softnet_dma();
+ tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
if (tp->ucopy.dma_chan)
ret = tcp_v4_do_rcv(sk, skb);
else
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 437b750b98fd..94f74f5b0cbf 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -672,8 +672,7 @@ int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb)
EXPORT_SYMBOL_GPL(ipv6_opt_accepted);
-static struct inet6_protocol *ipv6_gso_pull_exthdrs(struct sk_buff *skb,
- int proto)
+static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
{
struct inet6_protocol *ops = NULL;
@@ -704,7 +703,7 @@ static struct inet6_protocol *ipv6_gso_pull_exthdrs(struct sk_buff *skb,
__skb_pull(skb, len);
}
- return ops;
+ return proto;
}
static int ipv6_gso_send_check(struct sk_buff *skb)
@@ -721,7 +720,9 @@ static int ipv6_gso_send_check(struct sk_buff *skb)
err = -EPROTONOSUPPORT;
rcu_read_lock();
- ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
+ ops = rcu_dereference(inet6_protos[
+ ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]);
+
if (likely(ops && ops->gso_send_check)) {
skb_reset_transport_header(skb);
err = ops->gso_send_check(skb);
@@ -757,7 +758,9 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
segs = ERR_PTR(-EPROTONOSUPPORT);
rcu_read_lock();
- ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
+ ops = rcu_dereference(inet6_protos[
+ ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]);
+
if (likely(ops && ops->gso_segment)) {
skb_reset_transport_header(skb);
segs = ops->gso_segment(skb, features);
@@ -777,11 +780,105 @@ out:
return segs;
}
+struct ipv6_gro_cb {
+ struct napi_gro_cb napi;
+ int proto;
+};
+
+#define IPV6_GRO_CB(skb) ((struct ipv6_gro_cb *)(skb)->cb)
+
+static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
+ struct sk_buff *skb)
+{
+ struct inet6_protocol *ops;
+ struct sk_buff **pp = NULL;
+ struct sk_buff *p;
+ struct ipv6hdr *iph;
+ unsigned int nlen;
+ int flush = 1;
+ int proto;
+
+ if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
+ goto out;
+
+ iph = ipv6_hdr(skb);
+ __skb_pull(skb, sizeof(*iph));
+
+ flush += ntohs(iph->payload_len) != skb->len;
+
+ rcu_read_lock();
+ proto = ipv6_gso_pull_exthdrs(skb, iph->nexthdr);
+ IPV6_GRO_CB(skb)->proto = proto;
+ ops = rcu_dereference(inet6_protos[proto]);
+ if (!ops || !ops->gro_receive)
+ goto out_unlock;
+
+ flush--;
+ skb_reset_transport_header(skb);
+ nlen = skb_network_header_len(skb);
+
+ for (p = *head; p; p = p->next) {
+ struct ipv6hdr *iph2;
+
+ if (!NAPI_GRO_CB(p)->same_flow)
+ continue;
+
+ iph2 = ipv6_hdr(p);
+
+ /* All fields must match except length. */
+ if (nlen != skb_network_header_len(p) ||
+ memcmp(iph, iph2, offsetof(struct ipv6hdr, payload_len)) ||
+ memcmp(&iph->nexthdr, &iph2->nexthdr,
+ nlen - offsetof(struct ipv6hdr, nexthdr))) {
+ NAPI_GRO_CB(p)->same_flow = 0;
+ continue;
+ }
+
+ NAPI_GRO_CB(p)->flush |= flush;
+ }
+
+ NAPI_GRO_CB(skb)->flush |= flush;
+
+ pp = ops->gro_receive(head, skb);
+
+out_unlock:
+ rcu_read_unlock();
+
+out:
+ NAPI_GRO_CB(skb)->flush |= flush;
+
+ return pp;
+}
+
+static int ipv6_gro_complete(struct sk_buff *skb)
+{
+ struct inet6_protocol *ops;
+ struct ipv6hdr *iph = ipv6_hdr(skb);
+ int err = -ENOSYS;
+
+ iph->payload_len = htons(skb->len - skb_network_offset(skb) -
+ sizeof(*iph));
+
+ rcu_read_lock();
+ ops = rcu_dereference(inet6_protos[IPV6_GRO_CB(skb)->proto]);
+ if (WARN_ON(!ops || !ops->gro_complete))
+ goto out_unlock;
+
+ err = ops->gro_complete(skb);
+
+out_unlock:
+ rcu_read_unlock();
+
+ return err;
+}
+
static struct packet_type ipv6_packet_type = {
.type = __constant_htons(ETH_P_IPV6),
.func = ipv6_rcv,
.gso_send_check = ipv6_gso_send_check,
.gso_segment = ipv6_gso_segment,
+ .gro_receive = ipv6_gro_receive,
+ .gro_complete = ipv6_gro_complete,
};
static int __init ipv6_packet_init(void)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 76f06b94ab9f..c4a59824ac2c 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2752,7 +2752,7 @@ int __init ip6_route_init(void)
kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!ip6_dst_ops_template.kmem_cachep)
- goto out;;
+ goto out;
ret = register_pernet_subsys(&ip6_route_net_ops);
if (ret)
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index 9048fe7e7ea7..a031034720b4 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -128,7 +128,7 @@ static struct ctl_table_header *ip6_header;
int ipv6_sysctl_register(void)
{
- int err = -ENOMEM;;
+ int err = -ENOMEM;
ip6_header = register_net_sysctl_rotable(net_ipv6_ctl_path, ipv6_table);
if (ip6_header == NULL)
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index e8b8337a8310..e5b85d45bee8 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -101,7 +101,7 @@ static void tcp_v6_hash(struct sock *sk)
}
}
-static __inline__ __sum16 tcp_v6_check(struct tcphdr *th, int len,
+static __inline__ __sum16 tcp_v6_check(int len,
struct in6_addr *saddr,
struct in6_addr *daddr,
__wsum base)
@@ -501,7 +501,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req)
if (skb) {
struct tcphdr *th = tcp_hdr(skb);
- th->check = tcp_v6_check(th, skb->len,
+ th->check = tcp_v6_check(skb->len,
&treq->loc_addr, &treq->rmt_addr,
csum_partial(th, skb->len, skb->csum));
@@ -942,6 +942,41 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb)
return 0;
}
+struct sk_buff **tcp6_gro_receive(struct sk_buff **head, struct sk_buff *skb)
+{
+ struct ipv6hdr *iph = ipv6_hdr(skb);
+
+ switch (skb->ip_summed) {
+ case CHECKSUM_COMPLETE:
+ if (!tcp_v6_check(skb->len, &iph->saddr, &iph->daddr,
+ skb->csum)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ break;
+ }
+
+ /* fall through */
+ case CHECKSUM_NONE:
+ NAPI_GRO_CB(skb)->flush = 1;
+ return NULL;
+ }
+
+ return tcp_gro_receive(head, skb);
+}
+EXPORT_SYMBOL(tcp6_gro_receive);
+
+int tcp6_gro_complete(struct sk_buff *skb)
+{
+ struct ipv6hdr *iph = ipv6_hdr(skb);
+ struct tcphdr *th = tcp_hdr(skb);
+
+ th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
+ &iph->saddr, &iph->daddr, 0);
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+
+ return tcp_gro_complete(skb);
+}
+EXPORT_SYMBOL(tcp6_gro_complete);
+
static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
u32 ts, struct tcp_md5sig_key *key, int rst)
{
@@ -1429,14 +1464,14 @@ out:
static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
{
if (skb->ip_summed == CHECKSUM_COMPLETE) {
- if (!tcp_v6_check(tcp_hdr(skb), skb->len, &ipv6_hdr(skb)->saddr,
+ if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr, skb->csum)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
return 0;
}
}
- skb->csum = ~csum_unfold(tcp_v6_check(tcp_hdr(skb), skb->len,
+ skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr, 0));
@@ -1640,7 +1675,7 @@ process:
#ifdef CONFIG_NET_DMA
struct tcp_sock *tp = tcp_sk(sk);
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
- tp->ucopy.dma_chan = get_softnet_dma();
+ tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
if (tp->ucopy.dma_chan)
ret = tcp_v6_do_rcv(sk, skb);
else
@@ -2062,6 +2097,8 @@ static struct inet6_protocol tcpv6_protocol = {
.err_handler = tcp_v6_err,
.gso_send_check = tcp_v6_gso_send_check,
.gso_segment = tcp_tso_segment,
+ .gro_receive = tcp6_gro_receive,
+ .gro_complete = tcp6_gro_complete,
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
};
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c
index b0ceac2d6cd1..6a91a32a80c1 100644
--- a/net/phonet/pep-gprs.c
+++ b/net/phonet/pep-gprs.c
@@ -227,6 +227,13 @@ static int gprs_set_mtu(struct net_device *dev, int new_mtu)
return 0;
}
+static const struct net_device_ops gprs_netdev_ops = {
+ .ndo_open = gprs_open,
+ .ndo_stop = gprs_close,
+ .ndo_start_xmit = gprs_xmit,
+ .ndo_change_mtu = gprs_set_mtu,
+};
+
static void gprs_setup(struct net_device *dev)
{
dev->features = NETIF_F_FRAGLIST;
@@ -237,11 +244,8 @@ static void gprs_setup(struct net_device *dev)
dev->addr_len = 0;
dev->tx_queue_len = 10;
+ dev->netdev_ops = &gprs_netdev_ops;
dev->destructor = free_netdev;
- dev->open = gprs_open;
- dev->stop = gprs_close;
- dev->hard_start_xmit = gprs_xmit; /* mandatory */
- dev->change_mtu = gprs_set_mtu;
}
/*
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index f3965df00559..33133d27b539 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -435,7 +435,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
int i;
q->perturb_timer.function = sfq_perturbation;
- q->perturb_timer.data = (unsigned long)sch;;
+ q->perturb_timer.data = (unsigned long)sch;
init_timer_deferrable(&q->perturb_timer);
for (i = 0; i < SFQ_HASH_DIVISOR; i++)
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index cfc8e7caba62..ec697cebb63b 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -289,9 +289,9 @@ restart:
do {
struct net_device *slave = qdisc_dev(q);
- struct netdev_queue *slave_txq;
+ struct netdev_queue *slave_txq = netdev_get_tx_queue(slave, 0);
+ const struct net_device_ops *slave_ops = slave->netdev_ops;
- slave_txq = netdev_get_tx_queue(slave, 0);
if (slave_txq->qdisc_sleeping != q)
continue;
if (__netif_subqueue_stopped(slave, subq) ||
@@ -305,7 +305,7 @@ restart:
if (__netif_tx_trylock(slave_txq)) {
if (!netif_tx_queue_stopped(slave_txq) &&
!netif_tx_queue_frozen(slave_txq) &&
- slave->hard_start_xmit(skb, slave) == 0) {
+ slave_ops->ndo_start_xmit(skb, slave) == 0) {
__netif_tx_unlock(slave_txq);
master->slaves = NEXT_SLAVE(q);
netif_wake_queue(dev);
@@ -420,6 +420,14 @@ static int teql_master_mtu(struct net_device *dev, int new_mtu)
return 0;
}
+static const struct net_device_ops teql_netdev_ops = {
+ .ndo_open = teql_master_open,
+ .ndo_stop = teql_master_close,
+ .ndo_start_xmit = teql_master_xmit,
+ .ndo_get_stats = teql_master_stats,
+ .ndo_change_mtu = teql_master_mtu,
+};
+
static __init void teql_master_setup(struct net_device *dev)
{
struct teql_master *master = netdev_priv(dev);
@@ -436,11 +444,7 @@ static __init void teql_master_setup(struct net_device *dev)
ops->destroy = teql_destroy;
ops->owner = THIS_MODULE;
- dev->open = teql_master_open;
- dev->hard_start_xmit = teql_master_xmit;
- dev->stop = teql_master_close;
- dev->get_stats = teql_master_stats;
- dev->change_mtu = teql_master_mtu;
+ dev->netdev_ops = &teql_netdev_ops;
dev->type = ARPHRD_VOID;
dev->mtu = 1500;
dev->tx_queue_len = 100;
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 20c576f530fa..56935bbc1496 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -489,7 +489,7 @@ int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp)
return 0;
out_err:
- /* Clean up any successfull allocations */
+ /* Clean up any successful allocations */
sctp_auth_destroy_hmacs(ep->auth_hmacs);
return -ENOMEM;
}
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 5aa024b99c55..2f2d731bc1c2 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -124,7 +124,7 @@ static inline int tipc_nmap_equal(struct tipc_node_map *nm_a, struct tipc_node_m
static inline void tipc_nmap_diff(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b,
struct tipc_node_map *nm_diff)
{
- int stop = sizeof(nm_a->map) / sizeof(u32);
+ int stop = ARRAY_SIZE(nm_a->map);
int w;
int b;
u32 map;
diff --git a/net/wimax/Kconfig b/net/wimax/Kconfig
index 0bdbb6928205..18495cdcd10d 100644
--- a/net/wimax/Kconfig
+++ b/net/wimax/Kconfig
@@ -1,9 +1,23 @@
#
# WiMAX LAN device configuration
#
+# Note the ugly 'depends on' on WIMAX: that disallows RFKILL to be a
+# module if WIMAX is to be linked in. The WiMAX code is done in such a
+# way that it doesn't require and explicit dependency on RFKILL in
+# case an embedded system wants to rip it out.
+#
+# As well, enablement of the RFKILL code means we need the INPUT layer
+# support to inject events coming from hw rfkill switches. That
+# dependency could be killed if input.h provided appropiate means to
+# work when input is disabled.
+
+comment "WiMAX Wireless Broadband support requires CONFIG_INPUT enabled"
+ depends on INPUT = n && RFKILL != n
menuconfig WIMAX
tristate "WiMAX Wireless Broadband support"
+ depends on (y && RFKILL != m) || m
+ depends on (INPUT && RFKILL != n) || RFKILL = n
help
Select to configure support for devices that provide
diff --git a/net/wimax/id-table.c b/net/wimax/id-table.c
index d3b88558682c..5e685f7eda90 100644
--- a/net/wimax/id-table.c
+++ b/net/wimax/id-table.c
@@ -123,15 +123,17 @@ void wimax_id_table_rm(struct wimax_dev *wimax_dev)
/*
* Release the gennetlink family id / mapping table
*
- * On debug, verify that the table is empty upon removal.
+ * On debug, verify that the table is empty upon removal. We want the
+ * code always compiled, to ensure it doesn't bit rot. It will be
+ * compiled out if CONFIG_BUG is disabled.
*/
void wimax_id_table_release(void)
{
+ struct wimax_dev *wimax_dev;
+
#ifndef CONFIG_BUG
return;
#endif
- struct wimax_dev *wimax_dev;
-
spin_lock(&wimax_id_table_lock);
list_for_each_entry(wimax_dev, &wimax_id_table, id_table_node) {
printk(KERN_ERR "BUG: %s wimax_dev %p ifindex %d not cleared\n",
diff --git a/net/wimax/op-rfkill.c b/net/wimax/op-rfkill.c
index 8745bac173f1..2b75aee04217 100644
--- a/net/wimax/op-rfkill.c
+++ b/net/wimax/op-rfkill.c
@@ -71,7 +71,7 @@
#define D_SUBMODULE op_rfkill
#include "debug-levels.h"
-#ifdef CONFIG_RFKILL
+#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
/**
diff --git a/net/wireless/wext.c b/net/wireless/wext.c
index e49a2d1ef1e4..cb6a5bb85d80 100644
--- a/net/wireless/wext.c
+++ b/net/wireless/wext.c
@@ -1055,8 +1055,8 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
return private(dev, iwr, cmd, info, handler);
}
/* Old driver API : call driver ioctl handler */
- if (dev->do_ioctl)
- return dev->do_ioctl(dev, ifr, cmd);
+ if (dev->netdev_ops->ndo_do_ioctl)
+ return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
return -EOPNOTSUPP;
}
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
index c609a4b98e15..42cd18391f46 100644
--- a/net/xfrm/xfrm_ipcomp.c
+++ b/net/xfrm/xfrm_ipcomp.c
@@ -63,7 +63,6 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
if (len > skb_tailroom(skb))
len = skb_tailroom(skb);
- skb->truesize += len;
__skb_put(skb, len);
len += plen;