aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-04-11 06:56:11 +0000
committerDavid S. Miller <davem@davemloft.net>2010-04-13 03:32:42 -0700
commitacbbc07145b919248c410e1852b953d385be5c97 (patch)
treebafccb1c98540a2e6758df44ef72b6c2d55a2a78 /net/core/dev.c
parentFix some #includes in CAN drivers (rebased for net-next-2.6) (diff)
downloadlinux-dev-acbbc07145b919248c410e1852b953d385be5c97.tar.xz
linux-dev-acbbc07145b919248c410e1852b953d385be5c97.zip
net: uninline skb_bond_should_drop()
skb_bond_should_drop() is too big to be inlined. This patch reduces kernel text size, and its compilation time as well (shrinking include/linux/netdevice.h) Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to '')
-rw-r--r--net/core/dev.c49
1 files changed, 49 insertions, 0 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index ca4cdef74a1b..876b1112d5ba 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2646,6 +2646,55 @@ void netif_nit_deliver(struct sk_buff *skb)
rcu_read_unlock();
}
+static inline void skb_bond_set_mac_by_master(struct sk_buff *skb,
+ struct net_device *master)
+{
+ if (skb->pkt_type == PACKET_HOST) {
+ u16 *dest = (u16 *) eth_hdr(skb)->h_dest;
+
+ memcpy(dest, master->dev_addr, ETH_ALEN);
+ }
+}
+
+/* On bonding slaves other than the currently active slave, suppress
+ * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
+ * ARP on active-backup slaves with arp_validate enabled.
+ */
+int __skb_bond_should_drop(struct sk_buff *skb, struct net_device *master)
+{
+ struct net_device *dev = skb->dev;
+
+ if (master->priv_flags & IFF_MASTER_ARPMON)
+ dev->last_rx = jiffies;
+
+ if ((master->priv_flags & IFF_MASTER_ALB) && master->br_port) {
+ /* Do address unmangle. The local destination address
+ * will be always the one master has. Provides the right
+ * functionality in a bridge.
+ */
+ skb_bond_set_mac_by_master(skb, master);
+ }
+
+ if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
+ if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
+ skb->protocol == __cpu_to_be16(ETH_P_ARP))
+ return 0;
+
+ if (master->priv_flags & IFF_MASTER_ALB) {
+ if (skb->pkt_type != PACKET_BROADCAST &&
+ skb->pkt_type != PACKET_MULTICAST)
+ return 0;
+ }
+ if (master->priv_flags & IFF_MASTER_8023AD &&
+ skb->protocol == __cpu_to_be16(ETH_P_SLOW))
+ return 0;
+
+ return 1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(__skb_bond_should_drop);
+
static int __netif_receive_skb(struct sk_buff *skb)
{
struct packet_type *ptype, *pt_prev;