aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_generic.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r--net/sched/sch_generic.c49
1 files changed, 8 insertions, 41 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index f18c35024207..269dd71b3828 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -108,35 +108,6 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
return skb;
}
-static inline int handle_dev_cpu_collision(struct sk_buff *skb,
- struct netdev_queue *dev_queue,
- struct Qdisc *q)
-{
- int ret;
-
- if (unlikely(dev_queue->xmit_lock_owner == smp_processor_id())) {
- /*
- * Same CPU holding the lock. It may be a transient
- * configuration error, when hard_start_xmit() recurses. We
- * detect it by checking xmit owner and drop the packet when
- * deadloop is detected. Return OK to try the next skb.
- */
- kfree_skb_list(skb);
- net_warn_ratelimited("Dead loop on netdevice %s, fix it urgently!\n",
- dev_queue->dev->name);
- ret = qdisc_qlen(q);
- } else {
- /*
- * Another cpu is holding lock, requeue & delay xmits for
- * some time.
- */
- __this_cpu_inc(softnet_data.cpu_collision);
- ret = dev_requeue_skb(skb, q);
- }
-
- return ret;
-}
-
/*
* Transmit possibly several skbs, and handle the return status as
* required. Holding the __QDISC___STATE_RUNNING bit guarantees that
@@ -159,21 +130,21 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
if (validate)
skb = validate_xmit_skb_list(skb, dev);
- if (skb) {
+ if (likely(skb)) {
HARD_TX_LOCK(dev, txq, smp_processor_id());
if (!netif_xmit_frozen_or_stopped(txq))
skb = dev_hard_start_xmit(skb, dev, txq, &ret);
HARD_TX_UNLOCK(dev, txq);
+ } else {
+ spin_lock(root_lock);
+ return qdisc_qlen(q);
}
spin_lock(root_lock);
if (dev_xmit_complete(ret)) {
/* Driver sent out skb successfully or skb was consumed */
ret = qdisc_qlen(q);
- } else if (ret == NETDEV_TX_LOCKED) {
- /* Driver try lock failed */
- ret = handle_dev_cpu_collision(skb, txq, q);
} else {
/* Driver returned NETDEV_TX_BUSY - requeue skb */
if (unlikely(ret != NETDEV_TX_BUSY))
@@ -256,13 +227,12 @@ unsigned long dev_trans_start(struct net_device *dev)
if (is_vlan_dev(dev))
dev = vlan_dev_real_dev(dev);
- res = dev->trans_start;
- for (i = 0; i < dev->num_tx_queues; i++) {
+ res = netdev_get_tx_queue(dev, 0)->trans_start;
+ for (i = 1; i < dev->num_tx_queues; i++) {
val = netdev_get_tx_queue(dev, i)->trans_start;
if (val && time_after(val, res))
res = val;
}
- dev->trans_start = res;
return res;
}
@@ -285,10 +255,7 @@ static void dev_watchdog(unsigned long arg)
struct netdev_queue *txq;
txq = netdev_get_tx_queue(dev, i);
- /*
- * old device drivers set dev->trans_start
- */
- trans_start = txq->trans_start ? : dev->trans_start;
+ trans_start = txq->trans_start;
if (netif_xmit_stopped(txq) &&
time_after(jiffies, (trans_start +
dev->watchdog_timeo))) {
@@ -804,7 +771,7 @@ void dev_activate(struct net_device *dev)
transition_one_qdisc(dev, dev_ingress_queue(dev), NULL);
if (need_watchdog) {
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
dev_watchdog_up(dev);
}
}