aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/chelsio/cxgb4/sched.c
diff options
context:
space:
mode:
authorRahul Lakkireddy <rahul.lakkireddy@chelsio.com>2019-11-20 05:46:06 +0530
committerDavid S. Miller <davem@davemloft.net>2019-11-20 12:05:23 -0800
commit4ec4762d8ec6edcfe59fd806472d2b7518debe52 (patch)
tree07289ddff8b84242baa7a42e8d16edea45abb9ce /drivers/net/ethernet/chelsio/cxgb4/sched.c
parentMerge branch 'page_pool-API-for-numa-node-change-handling' (diff)
downloadlinux-dev-4ec4762d8ec6edcfe59fd806472d2b7518debe52.tar.xz
linux-dev-4ec4762d8ec6edcfe59fd806472d2b7518debe52.zip
cxgb4: add TC-MATCHALL classifier egress offload
Add TC-MATCHALL classifier offload with TC-POLICE action applied for all outgoing traffic on the underlying interface. Split flow block offload to support both egress and ingress classification. For example, to rate limit all outgoing traffic to 1 Gbps: $ tc qdisc add dev enp2s0f4 clsact $ tc filter add dev enp2s0f4 egress matchall skip_sw \ action police rate 1Gbit burst 8Kbit Note that skip_sw is important. Otherwise, both stack and hardware will end up doing policing. Policing can't be shared across flow blocks. Only 1 egress matchall rule can be active at a time on the underlying interface. v5: - No change. v4: - Removed check to reject police offload if prio is not 1. - Moved TC_SETUP_BLOCK code to separate function. v3: - Added check to reject police offload if prio is not 1. - Assign block_shared variable only for TC_SETUP_BLOCK. v2: - Added check to reject flow block sharing for policers. Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/chelsio/cxgb4/sched.c')
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c56
1 files changed, 39 insertions, 17 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
index 0a98c4dbb36b..3e61bd5d0c29 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -50,6 +50,7 @@ static int t4_sched_class_fw_cmd(struct port_info *pi,
e = &s->tab[p->u.params.class];
switch (op) {
case SCHED_FW_OP_ADD:
+ case SCHED_FW_OP_DEL:
err = t4_sched_params(adap, p->type,
p->u.params.level, p->u.params.mode,
p->u.params.rateunit,
@@ -188,10 +189,8 @@ static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
e = &pi->sched_tbl->tab[qe->param.class];
list_del(&qe->list);
kvfree(qe);
- if (atomic_dec_and_test(&e->refcnt)) {
- e->state = SCHED_STATE_UNUSED;
- memset(&e->info, 0, sizeof(e->info));
- }
+ if (atomic_dec_and_test(&e->refcnt))
+ cxgb4_sched_class_free(adap->port[pi->port_id], e->idx);
}
return err;
}
@@ -261,10 +260,8 @@ static int t4_sched_flowc_unbind(struct port_info *pi, struct ch_sched_flowc *p)
e = &pi->sched_tbl->tab[fe->param.class];
list_del(&fe->list);
kvfree(fe);
- if (atomic_dec_and_test(&e->refcnt)) {
- e->state = SCHED_STATE_UNUSED;
- memset(&e->info, 0, sizeof(e->info));
- }
+ if (atomic_dec_and_test(&e->refcnt))
+ cxgb4_sched_class_free(adap->port[pi->port_id], e->idx);
}
return err;
}
@@ -469,10 +466,7 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
struct sched_class *found = NULL;
struct sched_class *e, *end;
- /* Only allow tc to be shared among SCHED_FLOWC types. For
- * other types, always allocate a new tc.
- */
- if (!p || p->u.params.mode != SCHED_CLASS_MODE_FLOW) {
+ if (!p) {
/* Get any available unused class */
end = &s->tab[s->sched_size];
for (e = &s->tab[0]; e != end; ++e) {
@@ -514,7 +508,7 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
struct ch_sched_params *p)
{
- struct sched_class *e;
+ struct sched_class *e = NULL;
u8 class_id;
int err;
@@ -529,10 +523,13 @@ static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
if (class_id != SCHED_CLS_NONE)
return NULL;
- /* See if there's an exisiting class with same
- * requested sched params
+ /* See if there's an exisiting class with same requested sched
+ * params. Classes can only be shared among FLOWC types. For
+ * other types, always request a new class.
*/
- e = t4_sched_class_lookup(pi, p);
+ if (p->u.params.mode == SCHED_CLASS_MODE_FLOW)
+ e = t4_sched_class_lookup(pi, p);
+
if (!e) {
struct ch_sched_params np;
@@ -592,10 +589,35 @@ void cxgb4_sched_class_free(struct net_device *dev, u8 classid)
{
struct port_info *pi = netdev2pinfo(dev);
struct sched_table *s = pi->sched_tbl;
+ struct ch_sched_params p;
struct sched_class *e;
+ u32 speed;
+ int ret;
e = &s->tab[classid];
- if (!atomic_read(&e->refcnt)) {
+ if (!atomic_read(&e->refcnt) && e->state != SCHED_STATE_UNUSED) {
+ /* Port based rate limiting needs explicit reset back
+ * to max rate. But, we'll do explicit reset for all
+ * types, instead of just port based type, to be on
+ * the safer side.
+ */
+ memcpy(&p, &e->info, sizeof(p));
+ /* Always reset mode to 0. Otherwise, FLOWC mode will
+ * still be enabled even after resetting the traffic
+ * class.
+ */
+ p.u.params.mode = 0;
+ p.u.params.minrate = 0;
+ p.u.params.pktsize = 0;
+
+ ret = t4_get_link_params(pi, NULL, &speed, NULL);
+ if (!ret)
+ p.u.params.maxrate = speed * 1000; /* Mbps to Kbps */
+ else
+ p.u.params.maxrate = SCHED_MAX_RATE_KBPS;
+
+ t4_sched_class_fw_cmd(pi, &p, SCHED_FW_OP_DEL);
+
e->state = SCHED_STATE_UNUSED;
memset(&e->info, 0, sizeof(e->info));
}