aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_generic.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r--net/sched/sch_generic.c49
1 files changed, 22 insertions, 27 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 39c144b6ff98..69078c82963e 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -346,9 +346,6 @@ bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
return false;
}
- if (ret && netif_xmit_frozen_or_stopped(txq))
- return false;
-
return true;
}
@@ -373,33 +370,24 @@ bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
*/
static inline bool qdisc_restart(struct Qdisc *q, int *packets)
{
- bool more, validate, nolock = q->flags & TCQ_F_NOLOCK;
spinlock_t *root_lock = NULL;
struct netdev_queue *txq;
struct net_device *dev;
struct sk_buff *skb;
+ bool validate;
/* Dequeue packet */
- if (nolock && test_and_set_bit(__QDISC_STATE_RUNNING, &q->state))
- return false;
-
skb = dequeue_skb(q, &validate, packets);
- if (unlikely(!skb)) {
- if (nolock)
- clear_bit(__QDISC_STATE_RUNNING, &q->state);
+ if (unlikely(!skb))
return false;
- }
- if (!nolock)
+ if (!(q->flags & TCQ_F_NOLOCK))
root_lock = qdisc_lock(q);
dev = qdisc_dev(q);
txq = skb_get_tx_queue(dev, skb);
- more = sch_direct_xmit(skb, q, dev, txq, root_lock, validate);
- if (nolock)
- clear_bit(__QDISC_STATE_RUNNING, &q->state);
- return more;
+ return sch_direct_xmit(skb, q, dev, txq, root_lock, validate);
}
void __qdisc_run(struct Qdisc *q)
@@ -665,7 +653,7 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
if (__skb_array_empty(q))
continue;
- skb = skb_array_consume_bh(q);
+ skb = __skb_array_consume(q);
}
if (likely(skb)) {
qdisc_qstats_cpu_backlog_dec(qdisc, skb);
@@ -706,7 +694,7 @@ static void pfifo_fast_reset(struct Qdisc *qdisc)
if (!q->ring.queue)
continue;
- while ((skb = skb_array_consume_bh(q)) != NULL)
+ while ((skb = __skb_array_consume(q)) != NULL)
kfree_skb(skb);
}
@@ -867,6 +855,11 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
lockdep_set_class(&sch->busylock,
dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
+ /* seqlock has the same scope of busylock, for NOLOCK qdisc */
+ spin_lock_init(&sch->seqlock);
+ lockdep_set_class(&sch->busylock,
+ dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
+
seqcount_init(&sch->running);
lockdep_set_class(&sch->running,
dev->qdisc_running_key ?: &qdisc_running_key);
@@ -1106,6 +1099,10 @@ static void dev_deactivate_queue(struct net_device *dev,
qdisc = rtnl_dereference(dev_queue->qdisc);
if (qdisc) {
+ bool nolock = qdisc->flags & TCQ_F_NOLOCK;
+
+ if (nolock)
+ spin_lock_bh(&qdisc->seqlock);
spin_lock_bh(qdisc_lock(qdisc));
if (!(qdisc->flags & TCQ_F_BUILTIN))
@@ -1115,6 +1112,8 @@ static void dev_deactivate_queue(struct net_device *dev,
qdisc_reset(qdisc);
spin_unlock_bh(qdisc_lock(qdisc));
+ if (nolock)
+ spin_unlock_bh(&qdisc->seqlock);
}
}
@@ -1131,17 +1130,13 @@ static bool some_qdisc_is_busy(struct net_device *dev)
dev_queue = netdev_get_tx_queue(dev, i);
q = dev_queue->qdisc_sleeping;
- if (q->flags & TCQ_F_NOLOCK) {
- val = test_bit(__QDISC_STATE_SCHED, &q->state);
- } else {
- root_lock = qdisc_lock(q);
- spin_lock_bh(root_lock);
+ root_lock = qdisc_lock(q);
+ spin_lock_bh(root_lock);
- val = (qdisc_is_running(q) ||
- test_bit(__QDISC_STATE_SCHED, &q->state));
+ val = (qdisc_is_running(q) ||
+ test_bit(__QDISC_STATE_SCHED, &q->state));
- spin_unlock_bh(root_lock);
- }
+ spin_unlock_bh(root_lock);
if (val)
return true;