aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-08 17:42:10 -0700
committerDavid S. Miller <davem@davemloft.net>2008-07-08 17:42:10 -0700
commitb0e1e6462df3c5944010b3328a546d8fe5d932cd (patch)
tree37e3f86d09d8b37deb06cf1c142baeb8246bbf97 /net/sched
parentnetdev: The ingress_lock member is no longer needed. (diff)
downloadlinux-dev-b0e1e6462df3c5944010b3328a546d8fe5d932cd.tar.xz
linux-dev-b0e1e6462df3c5944010b3328a546d8fe5d932cd.zip
netdev: Move rest of qdisc state into struct netdev_queue
Now qdisc, qdisc_sleeping, and qdisc_list also live there. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/cls_api.c7
-rw-r--r--net/sched/sch_api.c34
-rw-r--r--net/sched/sch_generic.c90
-rw-r--r--net/sched/sch_netem.c2
-rw-r--r--net/sched/sch_teql.c14
5 files changed, 93 insertions, 54 deletions
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index e2389f161e46..b483bbea6118 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -166,7 +166,8 @@ replay:
/* Find qdisc */
if (!parent) {
- q = dev->qdisc_sleeping;
+ struct netdev_queue *dev_queue = &dev->tx_queue;
+ q = dev_queue->qdisc_sleeping;
parent = q->handle;
} else {
q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent));
@@ -390,6 +391,7 @@ static int tcf_node_dump(struct tcf_proto *tp, unsigned long n,
static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
+ struct netdev_queue *dev_queue;
int t;
int s_t;
struct net_device *dev;
@@ -408,8 +410,9 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
return skb->len;
+ dev_queue = &dev->tx_queue;
if (!tcm->tcm_parent)
- q = dev->qdisc_sleeping;
+ q = dev_queue->qdisc_sleeping;
else
q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
if (!q)
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 570cef2a9c5f..2313fa7c97be 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -185,9 +185,10 @@ EXPORT_SYMBOL(unregister_qdisc);
struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
{
+ struct netdev_queue *dev_queue = &dev->tx_queue;
struct Qdisc *q;
- list_for_each_entry(q, &dev->qdisc_list, list) {
+ list_for_each_entry(q, &dev_queue->qdisc_list, list) {
if (q->handle == handle)
return q;
}
@@ -441,6 +442,7 @@ static u32 qdisc_alloc_handle(struct net_device *dev)
static struct Qdisc *
dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc)
{
+ struct netdev_queue *dev_queue;
struct Qdisc *oqdisc;
if (dev->flags & IFF_UP)
@@ -459,8 +461,8 @@ dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc)
}
} else {
-
- oqdisc = dev->qdisc_sleeping;
+ dev_queue = &dev->tx_queue;
+ oqdisc = dev_queue->qdisc_sleeping;
/* Prune old scheduler */
if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
@@ -469,8 +471,8 @@ dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc)
/* ... and graft new one */
if (qdisc == NULL)
qdisc = &noop_qdisc;
- dev->qdisc_sleeping = qdisc;
- dev->qdisc = &noop_qdisc;
+ dev_queue->qdisc_sleeping = qdisc;
+ dev_queue->qdisc = &noop_qdisc;
}
qdisc_unlock_tree(dev);
@@ -633,7 +635,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
}
}
qdisc_lock_tree(dev);
- list_add_tail(&sch->list, &dev->qdisc_list);
+ list_add_tail(&sch->list, &dev_queue->qdisc_list);
qdisc_unlock_tree(dev);
return sch;
@@ -740,7 +742,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
q = dev->qdisc_ingress;
}
} else {
- q = dev->qdisc_sleeping;
+ struct netdev_queue *dev_queue = &dev->tx_queue;
+ q = dev_queue->qdisc_sleeping;
}
if (!q)
return -ENOENT;
@@ -814,7 +817,8 @@ replay:
q = dev->qdisc_ingress;
}
} else {
- q = dev->qdisc_sleeping;
+ struct netdev_queue *dev_queue = &dev->tx_queue;
+ q = dev_queue->qdisc_sleeping;
}
/* It may be default qdisc, ignore it */
@@ -1015,12 +1019,14 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
read_lock(&dev_base_lock);
idx = 0;
for_each_netdev(&init_net, dev) {
+ struct netdev_queue *dev_queue;
if (idx < s_idx)
goto cont;
if (idx > s_idx)
s_q_idx = 0;
q_idx = 0;
- list_for_each_entry(q, &dev->qdisc_list, list) {
+ dev_queue = &dev->tx_queue;
+ list_for_each_entry(q, &dev_queue->qdisc_list, list) {
if (q_idx < s_q_idx) {
q_idx++;
continue;
@@ -1054,6 +1060,7 @@ done:
static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
{
struct net *net = sock_net(skb->sk);
+ struct netdev_queue *dev_queue;
struct tcmsg *tcm = NLMSG_DATA(n);
struct nlattr *tca[TCA_MAX + 1];
struct net_device *dev;
@@ -1091,6 +1098,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
/* Step 1. Determine qdisc handle X:0 */
+ dev_queue = &dev->tx_queue;
if (pid != TC_H_ROOT) {
u32 qid1 = TC_H_MAJ(pid);
@@ -1101,7 +1109,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
} else if (qid1) {
qid = qid1;
} else if (qid == 0)
- qid = dev->qdisc_sleeping->handle;
+ qid = dev_queue->qdisc_sleeping->handle;
/* Now qid is genuine qdisc handle consistent
both with parent and child.
@@ -1112,7 +1120,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
pid = TC_H_MAKE(qid, pid);
} else {
if (qid == 0)
- qid = dev->qdisc_sleeping->handle;
+ qid = dev_queue->qdisc_sleeping->handle;
}
/* OK. Locate qdisc */
@@ -1248,6 +1256,7 @@ static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walk
static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
+ struct netdev_queue *dev_queue;
int t;
int s_t;
struct net_device *dev;
@@ -1266,7 +1275,8 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
s_t = cb->args[0];
t = 0;
- list_for_each_entry(q, &dev->qdisc_list, list) {
+ dev_queue = &dev->tx_queue;
+ list_for_each_entry(q, &dev_queue->qdisc_list, list) {
if (t < s_t || !q->ops->cl_ops ||
(tcm->tcm_parent &&
TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 804d44b00348..3223e5ba76aa 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -122,7 +122,7 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
*
* __LINK_STATE_QDISC_RUNNING guarantees only one CPU can process this
* device at a time. queue->lock serializes queue accesses for
- * this device AND dev->qdisc pointer itself.
+ * this device AND txq->qdisc pointer itself.
*
* netif_tx_lock serializes accesses to device driver.
*
@@ -138,7 +138,8 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
*/
static inline int qdisc_restart(struct net_device *dev)
{
- struct Qdisc *q = dev->qdisc;
+ struct netdev_queue *txq = &dev->tx_queue;
+ struct Qdisc *q = txq->qdisc;
struct sk_buff *skb;
int ret = NETDEV_TX_BUSY;
@@ -148,15 +149,15 @@ static inline int qdisc_restart(struct net_device *dev)
/* And release queue */
- spin_unlock(&q->dev_queue->lock);
+ spin_unlock(&txq->lock);
HARD_TX_LOCK(dev, smp_processor_id());
if (!netif_subqueue_stopped(dev, skb))
ret = dev_hard_start_xmit(skb, dev);
HARD_TX_UNLOCK(dev);
- spin_lock(&q->dev_queue->lock);
- q = dev->qdisc;
+ spin_lock(&txq->lock);
+ q = txq->qdisc;
switch (ret) {
case NETDEV_TX_OK:
@@ -207,9 +208,10 @@ void __qdisc_run(struct net_device *dev)
static void dev_watchdog(unsigned long arg)
{
struct net_device *dev = (struct net_device *)arg;
+ struct netdev_queue *txq = &dev->tx_queue;
netif_tx_lock(dev);
- if (dev->qdisc != &noop_qdisc) {
+ if (txq->qdisc != &noop_qdisc) {
if (netif_device_present(dev) &&
netif_running(dev) &&
netif_carrier_ok(dev)) {
@@ -539,53 +541,63 @@ EXPORT_SYMBOL(qdisc_destroy);
void dev_activate(struct net_device *dev)
{
+ struct netdev_queue *txq = &dev->tx_queue;
+
/* No queueing discipline is attached to device;
create default one i.e. pfifo_fast for devices,
which need queueing and noqueue_qdisc for
virtual interfaces
*/
- if (dev->qdisc_sleeping == &noop_qdisc) {
+ if (txq->qdisc_sleeping == &noop_qdisc) {
struct Qdisc *qdisc;
if (dev->tx_queue_len) {
- qdisc = qdisc_create_dflt(dev, &dev->tx_queue,
+ qdisc = qdisc_create_dflt(dev, txq,
&pfifo_fast_ops,
TC_H_ROOT);
if (qdisc == NULL) {
printk(KERN_INFO "%s: activation failed\n", dev->name);
return;
}
- list_add_tail(&qdisc->list, &dev->qdisc_list);
+ list_add_tail(&qdisc->list, &txq->qdisc_list);
} else {
qdisc = &noqueue_qdisc;
}
- dev->qdisc_sleeping = qdisc;
+ txq->qdisc_sleeping = qdisc;
}
if (!netif_carrier_ok(dev))
/* Delay activation until next carrier-on event */
return;
- spin_lock_bh(&dev->tx_queue.lock);
- rcu_assign_pointer(dev->qdisc, dev->qdisc_sleeping);
- if (dev->qdisc != &noqueue_qdisc) {
+ spin_lock_bh(&txq->lock);
+ rcu_assign_pointer(txq->qdisc, txq->qdisc_sleeping);
+ if (txq->qdisc != &noqueue_qdisc) {
dev->trans_start = jiffies;
dev_watchdog_up(dev);
}
- spin_unlock_bh(&dev->tx_queue.lock);
+ spin_unlock_bh(&txq->lock);
+}
+
+static void dev_deactivate_queue(struct net_device *dev,
+ struct netdev_queue *dev_queue,
+ struct Qdisc *qdisc_default)
+{
+ struct Qdisc *qdisc = dev_queue->qdisc;
+
+ if (qdisc) {
+ dev_queue->qdisc = qdisc_default;
+ qdisc_reset(qdisc);
+ }
}
void dev_deactivate(struct net_device *dev)
{
- struct Qdisc *qdisc;
struct sk_buff *skb;
int running;
spin_lock_bh(&dev->tx_queue.lock);
- qdisc = dev->qdisc;
- dev->qdisc = &noop_qdisc;
-
- qdisc_reset(qdisc);
+ dev_deactivate_queue(dev, &dev->tx_queue, &noop_qdisc);
skb = dev->gso_skb;
dev->gso_skb = NULL;
@@ -622,32 +634,44 @@ void dev_deactivate(struct net_device *dev)
} while (WARN_ON_ONCE(running));
}
+static void dev_init_scheduler_queue(struct net_device *dev,
+ struct netdev_queue *dev_queue,
+ struct Qdisc *qdisc)
+{
+ dev_queue->qdisc = qdisc;
+ dev_queue->qdisc_sleeping = qdisc;
+ INIT_LIST_HEAD(&dev_queue->qdisc_list);
+}
+
void dev_init_scheduler(struct net_device *dev)
{
qdisc_lock_tree(dev);
- dev->qdisc = &noop_qdisc;
- dev->qdisc_sleeping = &noop_qdisc;
- INIT_LIST_HEAD(&dev->qdisc_list);
+ dev_init_scheduler_queue(dev, &dev->tx_queue, &noop_qdisc);
+ dev_init_scheduler_queue(dev, &dev->rx_queue, NULL);
qdisc_unlock_tree(dev);
setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev);
}
-void dev_shutdown(struct net_device *dev)
+static void dev_shutdown_scheduler_queue(struct net_device *dev,
+ struct netdev_queue *dev_queue,
+ struct Qdisc *qdisc_default)
{
- struct Qdisc *qdisc;
+ struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
+
+ if (qdisc) {
+ dev_queue->qdisc = qdisc_default;
+ dev_queue->qdisc_sleeping = qdisc_default;
- qdisc_lock_tree(dev);
- qdisc = dev->qdisc_sleeping;
- dev->qdisc = &noop_qdisc;
- dev->qdisc_sleeping = &noop_qdisc;
- qdisc_destroy(qdisc);
-#if defined(CONFIG_NET_SCH_INGRESS) || defined(CONFIG_NET_SCH_INGRESS_MODULE)
- if ((qdisc = dev->qdisc_ingress) != NULL) {
- dev->qdisc_ingress = NULL;
qdisc_destroy(qdisc);
}
-#endif
+}
+
+void dev_shutdown(struct net_device *dev)
+{
+ qdisc_lock_tree(dev);
+ dev_shutdown_scheduler_queue(dev, &dev->tx_queue, &noop_qdisc);
+ dev_shutdown_scheduler_queue(dev, &dev->rx_queue, NULL);
BUG_TRAP(!timer_pending(&dev->watchdog_timer));
qdisc_unlock_tree(dev);
}
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 71b73c528f9b..4093f1eaaf60 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -180,7 +180,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
* skb will be queued.
*/
if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
- struct Qdisc *rootq = qdisc_dev(sch)->qdisc;
+ struct Qdisc *rootq = qdisc_dev(sch)->tx_queue.qdisc;
u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
q->duplicate = 0;
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 4f3054e8e1ab..8ac05981be20 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -107,17 +107,19 @@ static struct sk_buff *
teql_dequeue(struct Qdisc* sch)
{
struct teql_sched_data *dat = qdisc_priv(sch);
+ struct netdev_queue *dat_queue;
struct sk_buff *skb;
skb = __skb_dequeue(&dat->q);
+ dat_queue = &dat->m->dev->tx_queue;
if (skb == NULL) {
- struct net_device *m = qdisc_dev(dat->m->dev->qdisc);
+ struct net_device *m = qdisc_dev(dat_queue->qdisc);
if (m) {
dat->m->slaves = sch;
netif_wake_queue(m);
}
}
- sch->q.qlen = dat->q.qlen + dat->m->dev->qdisc->q.qlen;
+ sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen;
return skb;
}
@@ -155,7 +157,7 @@ teql_destroy(struct Qdisc* sch)
if (q == master->slaves) {
master->slaves = NULL;
spin_lock_bh(&master->dev->tx_queue.lock);
- qdisc_reset(master->dev->qdisc);
+ qdisc_reset(master->dev->tx_queue.qdisc);
spin_unlock_bh(&master->dev->tx_queue.lock);
}
}
@@ -216,7 +218,7 @@ static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
static int
__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev)
{
- struct teql_sched_data *q = qdisc_priv(dev->qdisc);
+ struct teql_sched_data *q = qdisc_priv(dev->tx_queue.qdisc);
struct neighbour *mn = skb->dst->neighbour;
struct neighbour *n = q->ncache;
@@ -252,7 +254,7 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *
static inline int teql_resolve(struct sk_buff *skb,
struct sk_buff *skb_res, struct net_device *dev)
{
- if (dev->qdisc == &noop_qdisc)
+ if (dev->tx_queue.qdisc == &noop_qdisc)
return -ENODEV;
if (dev->header_ops == NULL ||
@@ -284,7 +286,7 @@ restart:
do {
struct net_device *slave = qdisc_dev(q);
- if (slave->qdisc_sleeping != q)
+ if (slave->tx_queue.qdisc_sleeping != q)
continue;
if (netif_queue_stopped(slave) ||
__netif_subqueue_stopped(slave, subq) ||