aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/act_api.c3
-rw-r--r--net/sched/act_bpf.c2
-rw-r--r--net/sched/act_vlan.c11
-rw-r--r--net/sched/cls_api.c2
-rw-r--r--net/sched/cls_bpf.c3
-rw-r--r--net/sched/cls_rsvp.h2
-rw-r--r--net/sched/cls_tcindex.c2
-rw-r--r--net/sched/ematch.c2
-rw-r--r--net/sched/sch_generic.c41
-rw-r--r--net/sched/sch_gred.c2
-rw-r--r--net/sched/sch_htb.c39
-rw-r--r--net/sched/sch_qfq.c8
-rw-r--r--net/sched/sch_taprio.c88
13 files changed, 133 insertions, 72 deletions
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index f6d5755d669e..d17a66aab8ee 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -381,7 +381,8 @@ static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
}
mutex_unlock(&idrinfo->lock);
- if (nla_put_u32(skb, TCA_FCNT, n_i))
+ ret = nla_put_u32(skb, TCA_FCNT, n_i);
+ if (ret)
goto nla_put_failure;
nla_nest_end(skb, nest);
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index e48e980c3b93..e409a0005717 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -43,7 +43,6 @@ static int tcf_bpf_act(struct sk_buff *skb, const struct tc_action *act,
tcf_lastuse_update(&prog->tcf_tm);
bstats_cpu_update(this_cpu_ptr(prog->common.cpu_bstats), skb);
- rcu_read_lock();
filter = rcu_dereference(prog->filter);
if (at_ingress) {
__skb_push(skb, skb->mac_len);
@@ -56,7 +55,6 @@ static int tcf_bpf_act(struct sk_buff *skb, const struct tc_action *act,
}
if (skb_sk_is_prefetched(skb) && filter_res != TC_ACT_OK)
skb_orphan(skb);
- rcu_read_unlock();
/* A BPF program may overwrite the default action opcode.
* Similarly as in cls_bpf, if filter_res == -1 we use the
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index 1cac3c6fbb49..71f2015c70ca 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -70,7 +70,7 @@ static int tcf_vlan_act(struct sk_buff *skb, const struct tc_action *a,
/* replace the vid */
tci = (tci & ~VLAN_VID_MASK) | p->tcfv_push_vid;
/* replace prio bits, if tcfv_push_prio specified */
- if (p->tcfv_push_prio) {
+ if (p->tcfv_push_prio_exists) {
tci &= ~VLAN_PRIO_MASK;
tci |= p->tcfv_push_prio << VLAN_PRIO_SHIFT;
}
@@ -121,6 +121,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
struct tc_action_net *tn = net_generic(net, vlan_net_id);
struct nlattr *tb[TCA_VLAN_MAX + 1];
struct tcf_chain *goto_ch = NULL;
+ bool push_prio_exists = false;
struct tcf_vlan_params *p;
struct tc_vlan *parm;
struct tcf_vlan *v;
@@ -189,7 +190,8 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
push_proto = htons(ETH_P_8021Q);
}
- if (tb[TCA_VLAN_PUSH_VLAN_PRIORITY])
+ push_prio_exists = !!tb[TCA_VLAN_PUSH_VLAN_PRIORITY];
+ if (push_prio_exists)
push_prio = nla_get_u8(tb[TCA_VLAN_PUSH_VLAN_PRIORITY]);
break;
case TCA_VLAN_ACT_POP_ETH:
@@ -241,6 +243,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
p->tcfv_action = action;
p->tcfv_push_vid = push_vid;
p->tcfv_push_prio = push_prio;
+ p->tcfv_push_prio_exists = push_prio_exists || action == TCA_VLAN_ACT_PUSH;
p->tcfv_push_proto = push_proto;
if (action == TCA_VLAN_ACT_PUSH_ETH) {
@@ -304,8 +307,8 @@ static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a,
(nla_put_u16(skb, TCA_VLAN_PUSH_VLAN_ID, p->tcfv_push_vid) ||
nla_put_be16(skb, TCA_VLAN_PUSH_VLAN_PROTOCOL,
p->tcfv_push_proto) ||
- (nla_put_u8(skb, TCA_VLAN_PUSH_VLAN_PRIORITY,
- p->tcfv_push_prio))))
+ (p->tcfv_push_prio_exists &&
+ nla_put_u8(skb, TCA_VLAN_PUSH_VLAN_PRIORITY, p->tcfv_push_prio))))
goto nla_put_failure;
if (p->tcfv_action == TCA_VLAN_ACT_PUSH_ETH) {
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 279f9e2a2319..d73b5c5514a9 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -1531,7 +1531,7 @@ static inline int __tcf_classify(struct sk_buff *skb,
u32 *last_executed_chain)
{
#ifdef CONFIG_NET_CLS_ACT
- const int max_reclassify_loop = 4;
+ const int max_reclassify_loop = 16;
const struct tcf_proto *first_tp;
int limit = 0;
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 6e3e63db0e01..fa739efa59f4 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -85,8 +85,6 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct cls_bpf_prog *prog;
int ret = -1;
- /* Needed here for accessing maps. */
- rcu_read_lock();
list_for_each_entry_rcu(prog, &head->plist, link) {
int filter_res;
@@ -131,7 +129,6 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
break;
}
- rcu_read_unlock();
return ret;
}
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index 2e288f88ff02..27a4b6dbcf57 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -7,7 +7,7 @@
/*
Comparing to general packet classification problem,
- RSVP needs only sevaral relatively simple rules:
+ RSVP needs only several relatively simple rules:
* (dst, protocol) are always specified,
so that we are able to hash them.
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index c4007b9cd16d..5b274534264c 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -304,7 +304,7 @@ static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp)
int i, err = 0;
cp->perfect = kcalloc(cp->hash, sizeof(struct tcindex_filter_result),
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_NOWARN);
if (!cp->perfect)
return -ENOMEM;
diff --git a/net/sched/ematch.c b/net/sched/ematch.c
index f885bea5b452..4ce681361851 100644
--- a/net/sched/ematch.c
+++ b/net/sched/ematch.c
@@ -141,7 +141,7 @@ errout:
EXPORT_SYMBOL(tcf_em_register);
/**
- * tcf_em_unregister - unregster and extended match
+ * tcf_em_unregister - unregister and extended match
*
* @ops: ematch operations lookup table
*
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index fc8b56bcabf3..d9ac60ffe927 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -52,6 +52,8 @@ static void qdisc_maybe_clear_missed(struct Qdisc *q,
*/
if (!netif_xmit_frozen_or_stopped(txq))
set_bit(__QDISC_STATE_MISSED, &q->state);
+ else
+ set_bit(__QDISC_STATE_DRAINING, &q->state);
}
/* Main transmission queue. */
@@ -164,9 +166,13 @@ static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
skb = next;
}
- if (lock)
+
+ if (lock) {
spin_unlock(lock);
- __netif_schedule(q);
+ set_bit(__QDISC_STATE_MISSED, &q->state);
+ } else {
+ __netif_schedule(q);
+ }
}
static void try_bulk_dequeue_skb(struct Qdisc *q,
@@ -409,7 +415,11 @@ void __qdisc_run(struct Qdisc *q)
while (qdisc_restart(q, &packets)) {
quota -= packets;
if (quota <= 0) {
- __netif_schedule(q);
+ if (q->flags & TCQ_F_NOLOCK)
+ set_bit(__QDISC_STATE_MISSED, &q->state);
+ else
+ __netif_schedule(q);
+
break;
}
}
@@ -540,6 +550,24 @@ void netif_carrier_off(struct net_device *dev)
}
EXPORT_SYMBOL(netif_carrier_off);
+/**
+ * netif_carrier_event - report carrier state event
+ * @dev: network device
+ *
+ * Device has detected a carrier event but the carrier state wasn't changed.
+ * Use in drivers when querying carrier state asynchronously, to avoid missing
+ * events (link flaps) if link recovers before it's queried.
+ */
+void netif_carrier_event(struct net_device *dev)
+{
+ if (dev->reg_state == NETREG_UNINITIALIZED)
+ return;
+ atomic_inc(&dev->carrier_up_count);
+ atomic_inc(&dev->carrier_down_count);
+ linkwatch_fire_event(dev);
+}
+EXPORT_SYMBOL_GPL(netif_carrier_event);
+
/* "NOOP" scheduler: the best scheduler, recommended for all interfaces
under all circumstances. It is difficult to invent anything faster or
cheaper.
@@ -680,13 +708,14 @@ retry:
if (likely(skb)) {
qdisc_update_stats_at_dequeue(qdisc, skb);
} else if (need_retry &&
- test_bit(__QDISC_STATE_MISSED, &qdisc->state)) {
+ READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY) {
/* Delay clearing the STATE_MISSED here to reduce
* the overhead of the second spin_trylock() in
* qdisc_run_begin() and __netif_schedule() calling
* in qdisc_run_end().
*/
clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
+ clear_bit(__QDISC_STATE_DRAINING, &qdisc->state);
/* Make sure dequeuing happens after clearing
* STATE_MISSED.
@@ -696,8 +725,6 @@ retry:
need_retry = false;
goto retry;
- } else {
- WRITE_ONCE(qdisc->empty, true);
}
return skb;
@@ -898,7 +925,6 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
sch->enqueue = ops->enqueue;
sch->dequeue = ops->dequeue;
sch->dev_queue = dev_queue;
- sch->empty = true;
dev_hold(dev);
refcount_set(&sch->refcnt, 1);
@@ -1204,6 +1230,7 @@ static void dev_reset_queue(struct net_device *dev,
spin_unlock_bh(qdisc_lock(qdisc));
if (nolock) {
clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
+ clear_bit(__QDISC_STATE_DRAINING, &qdisc->state);
spin_unlock_bh(&qdisc->seqlock);
}
}
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index f4132dc25ac0..621dc6afde8f 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -6,7 +6,7 @@
*
* 991129: - Bug fix with grio mode
* - a better sing. AvgQ mode with Grio(WRED)
- * - A finer grained VQ dequeue based on sugestion
+ * - A finer grained VQ dequeue based on suggestion
* from Ren Liu
* - More error checks
*
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 8827987ba903..5f7ac27a5264 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -52,7 +52,7 @@
*/
static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */
-#define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
+#define HTB_VER 0x30011 /* major must be matched with number supplied by TC as version */
#if HTB_VER >> 16 != TC_HTB_PROTOVER
#error "Mismatched sch_htb.c and pkt_sch.h"
@@ -273,6 +273,9 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
/**
* htb_add_to_id_tree - adds class to the round robin list
+ * @root: the root of the tree
+ * @cl: the class to add
+ * @prio: the give prio in class
*
* Routine adds class to the list (actually tree) sorted by classid.
* Make sure that class is not already on such list for given prio.
@@ -298,6 +301,9 @@ static void htb_add_to_id_tree(struct rb_root *root,
/**
* htb_add_to_wait_tree - adds class to the event queue with delay
+ * @q: the priority event queue
+ * @cl: the class to add
+ * @delay: delay in microseconds
*
* The class is added to priority event queue to indicate that class will
* change its mode in cl->pq_key microseconds. Make sure that class is not
@@ -331,6 +337,7 @@ static void htb_add_to_wait_tree(struct htb_sched *q,
/**
* htb_next_rb_node - finds next node in binary tree
+ * @n: the current node in binary tree
*
* When we are past last key we return NULL.
* Average complexity is 2 steps per call.
@@ -342,6 +349,9 @@ static inline void htb_next_rb_node(struct rb_node **n)
/**
* htb_add_class_to_row - add class to its row
+ * @q: the priority event queue
+ * @cl: the class to add
+ * @mask: the given priorities in class in bitmap
*
* The class is added to row at priorities marked in mask.
* It does nothing if mask == 0.
@@ -371,6 +381,9 @@ static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
/**
* htb_remove_class_from_row - removes class from its row
+ * @q: the priority event queue
+ * @cl: the class to add
+ * @mask: the given priorities in class in bitmap
*
* The class is removed from row at priorities marked in mask.
* It does nothing if mask == 0.
@@ -398,6 +411,8 @@ static inline void htb_remove_class_from_row(struct htb_sched *q,
/**
* htb_activate_prios - creates active classe's feed chain
+ * @q: the priority event queue
+ * @cl: the class to activate
*
* The class is connected to ancestors and/or appropriate rows
* for priorities it is participating on. cl->cmode must be new
@@ -433,6 +448,8 @@ static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
/**
* htb_deactivate_prios - remove class from feed chain
+ * @q: the priority event queue
+ * @cl: the class to deactivate
*
* cl->cmode must represent old mode (before deactivation). It does
* nothing if cl->prio_activity == 0. Class is removed from all feed
@@ -493,6 +510,8 @@ static inline s64 htb_hiwater(const struct htb_class *cl)
/**
* htb_class_mode - computes and returns current class mode
+ * @cl: the target class
+ * @diff: diff time in microseconds
*
* It computes cl's mode at time cl->t_c+diff and returns it. If mode
* is not HTB_CAN_SEND then cl->pq_key is updated to time difference
@@ -521,9 +540,12 @@ htb_class_mode(struct htb_class *cl, s64 *diff)
/**
* htb_change_class_mode - changes classe's mode
+ * @q: the priority event queue
+ * @cl: the target class
+ * @diff: diff time in microseconds
*
* This should be the only way how to change classe's mode under normal
- * cirsumstances. Routine will update feed lists linkage, change mode
+ * circumstances. Routine will update feed lists linkage, change mode
* and add class to the wait event queue if appropriate. New mode should
* be different from old one and cl->pq_key has to be valid if changing
* to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
@@ -553,6 +575,8 @@ htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
/**
* htb_activate - inserts leaf cl into appropriate active feeds
+ * @q: the priority event queue
+ * @cl: the target class
*
* Routine learns (new) priority of leaf and activates feed chain
* for the prio. It can be called on already active leaf safely.
@@ -570,6 +594,8 @@ static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
/**
* htb_deactivate - remove leaf cl from active feeds
+ * @q: the priority event queue
+ * @cl: the target class
*
* Make sure that leaf is active. In the other words it can't be called
* with non-active leaf. It also removes class from the drop list.
@@ -649,6 +675,10 @@ static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff)
/**
* htb_charge_class - charges amount "bytes" to leaf and ancestors
+ * @q: the priority event queue
+ * @cl: the class to start iterate
+ * @level: the minimum level to account
+ * @skb: the socket buffer
*
* Routine assumes that packet "bytes" long was dequeued from leaf cl
* borrowing from "level". It accounts bytes to ceil leaky bucket for
@@ -698,6 +728,9 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
/**
* htb_do_events - make mode changes to classes at the level
+ * @q: the priority event queue
+ * @level: which wait_pq in 'q->hlevel'
+ * @start: start jiffies
*
* Scans event queue for pending events and applies them. Returns time of
* next pending event (0 for no event in pq, q->now for too many events).
@@ -766,6 +799,8 @@ static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
/**
* htb_lookup_leaf - returns next leaf class in DRR order
+ * @hprio: the current one
+ * @prio: which prio in class
*
* Find leaf where current feed pointers points to.
*/
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 1db9d4a2ef5e..b692a0de1ad5 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -485,11 +485,6 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (cl->qdisc != &noop_qdisc)
qdisc_hash_add(cl->qdisc, true);
- sch_tree_lock(sch);
- qdisc_class_hash_insert(&q->clhash, &cl->common);
- sch_tree_unlock(sch);
-
- qdisc_class_hash_grow(sch, &q->clhash);
set_change_agg:
sch_tree_lock(sch);
@@ -507,8 +502,11 @@ set_change_agg:
}
if (existing)
qfq_deact_rm_from_agg(q, cl);
+ else
+ qdisc_class_hash_insert(&q->clhash, &cl->common);
qfq_add_to_agg(q, new_agg, cl);
sch_tree_unlock(sch);
+ qdisc_class_hash_grow(sch, &q->clhash);
*arg = (unsigned long)cl;
return 0;
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 5c91df52b8c2..66fe2b82af9a 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -114,9 +114,6 @@ static void taprio_free_sched_cb(struct rcu_head *head)
struct sched_gate_list *sched = container_of(head, struct sched_gate_list, rcu);
struct sched_entry *entry, *n;
- if (!sched)
- return;
-
list_for_each_entry_safe(entry, n, &sched->entries, list) {
list_del(&entry->list);
kfree(entry);
@@ -438,6 +435,11 @@ static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct Qdisc *child;
int queue;
+ if (unlikely(FULL_OFFLOAD_IS_ENABLED(q->flags))) {
+ WARN_ONCE(1, "Trying to enqueue skb into the root of a taprio qdisc configured with full offload\n");
+ return qdisc_drop(skb, sch, to_free);
+ }
+
queue = skb_get_queue_mapping(skb);
child = q->qdiscs[queue];
@@ -529,23 +531,7 @@ static struct sk_buff *taprio_peek_soft(struct Qdisc *sch)
static struct sk_buff *taprio_peek_offload(struct Qdisc *sch)
{
- struct taprio_sched *q = qdisc_priv(sch);
- struct net_device *dev = qdisc_dev(sch);
- struct sk_buff *skb;
- int i;
-
- for (i = 0; i < dev->num_tx_queues; i++) {
- struct Qdisc *child = q->qdiscs[i];
-
- if (unlikely(!child))
- continue;
-
- skb = child->ops->peek(child);
- if (!skb)
- continue;
-
- return skb;
- }
+ WARN_ONCE(1, "Trying to peek into the root of a taprio qdisc configured with full offload\n");
return NULL;
}
@@ -654,27 +640,7 @@ done:
static struct sk_buff *taprio_dequeue_offload(struct Qdisc *sch)
{
- struct taprio_sched *q = qdisc_priv(sch);
- struct net_device *dev = qdisc_dev(sch);
- struct sk_buff *skb;
- int i;
-
- for (i = 0; i < dev->num_tx_queues; i++) {
- struct Qdisc *child = q->qdiscs[i];
-
- if (unlikely(!child))
- continue;
-
- skb = child->ops->dequeue(child);
- if (unlikely(!skb))
- continue;
-
- qdisc_bstats_update(sch, skb);
- qdisc_qstats_backlog_dec(sch, skb);
- sch->q.qlen--;
-
- return skb;
- }
+ WARN_ONCE(1, "Trying to dequeue from the root of a taprio qdisc configured with full offload\n");
return NULL;
}
@@ -1759,6 +1725,37 @@ static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
return taprio_change(sch, opt, extack);
}
+static void taprio_attach(struct Qdisc *sch)
+{
+ struct taprio_sched *q = qdisc_priv(sch);
+ struct net_device *dev = qdisc_dev(sch);
+ unsigned int ntx;
+
+ /* Attach underlying qdisc */
+ for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
+ struct Qdisc *qdisc = q->qdiscs[ntx];
+ struct Qdisc *old;
+
+ if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
+ qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
+ old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
+ if (ntx < dev->real_num_tx_queues)
+ qdisc_hash_add(qdisc, false);
+ } else {
+ old = dev_graft_qdisc(qdisc->dev_queue, sch);
+ qdisc_refcount_inc(sch);
+ }
+ if (old)
+ qdisc_put(old);
+ }
+
+ /* access to the child qdiscs is not needed in offload mode */
+ if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
+ kfree(q->qdiscs);
+ q->qdiscs = NULL;
+ }
+}
+
static struct netdev_queue *taprio_queue_get(struct Qdisc *sch,
unsigned long cl)
{
@@ -1785,8 +1782,12 @@ static int taprio_graft(struct Qdisc *sch, unsigned long cl,
if (dev->flags & IFF_UP)
dev_deactivate(dev);
- *old = q->qdiscs[cl - 1];
- q->qdiscs[cl - 1] = new;
+ if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
+ *old = dev_graft_qdisc(dev_queue, new);
+ } else {
+ *old = q->qdiscs[cl - 1];
+ q->qdiscs[cl - 1] = new;
+ }
if (new)
new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
@@ -2020,6 +2021,7 @@ static struct Qdisc_ops taprio_qdisc_ops __read_mostly = {
.change = taprio_change,
.destroy = taprio_destroy,
.reset = taprio_reset,
+ .attach = taprio_attach,
.peek = taprio_peek,
.dequeue = taprio_dequeue,
.enqueue = taprio_enqueue,