From 2ce9c93eaca6c67e3fa8828a471738a32cd66770 Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Thu, 9 Aug 2018 11:13:51 -0700 Subject: qede: Ingress tc flower offload (drop action) support. The main motive of this patch is to lay down driver's tc offload infrastructure in place. With these changes tc can offload various supported flow profiles (4 tuples, src-ip, dst-ip, l4 port) for the drop action. Dropped flows statistic is a global counter for all the offloaded flows for drop action and is populated in ethtool statistics as common "gft_filter_drop". Examples - tc qdisc add dev p4p1 ingress tc filter add dev p4p1 protocol ipv4 parent ffff: flower \ skip_sw ip_proto tcp dst_ip 192.168.40.200 action drop tc filter add dev p4p1 protocol ipv4 parent ffff: flower \ skip_sw ip_proto udp src_ip 192.168.40.100 action drop tc filter add dev p4p1 protocol ipv4 parent ffff: flower \ skip_sw ip_proto tcp src_ip 192.168.40.100 dst_ip 192.168.40.200 \ src_port 453 dst_port 876 action drop tc filter add dev p4p1 protocol ipv4 parent ffff: flower \ skip_sw ip_proto tcp dst_port 98 action drop Signed-off-by: Manish Chopra Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede_filter.c | 308 ++++++++++++++++++++++++- 1 file changed, 300 insertions(+), 8 deletions(-) (limited to 'drivers/net/ethernet/qlogic/qede/qede_filter.c') diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index d0902573f38a..9673d19308e6 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -83,7 +83,7 @@ struct qede_arfs_fltr_node { struct qede_arfs_tuple tuple; u32 flow_id; - u16 sw_id; + u64 sw_id; u16 rxq_id; u16 next_rxq_id; u8 vfid; @@ -138,7 +138,7 @@ static void qede_configure_arfs_fltr(struct qede_dev *edev, n->tuple.stringify(&n->tuple, tuple_buffer); DP_VERBOSE(edev, NETIF_MSG_RX_STATUS, - "%s sw_id[0x%x]: %s [vf %u queue %d]\n", + "%s sw_id[0x%llx]: %s [vf %u queue %d]\n", add_fltr ? "Adding" : "Deleting", n->sw_id, tuple_buffer, n->vfid, rxq_id); } @@ -152,7 +152,10 @@ static void qede_free_arfs_filter(struct qede_dev *edev, struct qede_arfs_fltr_node *fltr) { kfree(fltr->data); - clear_bit(fltr->sw_id, edev->arfs->arfs_fltr_bmap); + + if (fltr->sw_id < QEDE_RFS_MAX_FLTR) + clear_bit(fltr->sw_id, edev->arfs->arfs_fltr_bmap); + kfree(fltr); } @@ -214,7 +217,7 @@ void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc) if (fw_rc) { DP_NOTICE(edev, - "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n", + "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=0x%llx, src_port=%d, dst_port=%d, rxq=%d\n", fw_rc, fltr->flow_id, fltr->sw_id, ntohs(fltr->tuple.src_port), ntohs(fltr->tuple.dst_port), fltr->rxq_id); @@ -1348,7 +1351,7 @@ out: } static struct qede_arfs_fltr_node * -qede_get_arfs_fltr_by_loc(struct hlist_head *head, u32 location) +qede_get_arfs_fltr_by_loc(struct hlist_head *head, u64 location) { struct qede_arfs_fltr_node *fltr; @@ -1959,9 +1962,8 @@ unlock: return rc; } -int qede_del_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info) +int qede_delete_flow_filter(struct qede_dev *edev, u64 cookie) { - struct ethtool_rx_flow_spec *fsp = &info->fs; struct qede_arfs_fltr_node *fltr = NULL; int rc = -EPERM; @@ -1970,7 +1972,7 @@ int qede_del_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info) goto unlock; fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0), - fsp->location); + cookie); if (!fltr) goto unlock; @@ -2000,3 +2002,293 @@ unlock: __qede_unlock(edev); return count; } + +static int qede_parse_actions(struct qede_dev *edev, + struct tcf_exts *exts) +{ + int rc = -EINVAL, num_act = 0; + const struct tc_action *a; + bool is_drop = false; + LIST_HEAD(actions); + + if (!tcf_exts_has_actions(exts)) { + DP_NOTICE(edev, "No tc actions received\n"); + return rc; + } + + tcf_exts_to_list(exts, &actions); + list_for_each_entry(a, &actions, list) { + num_act++; + + if (is_tcf_gact_shot(a)) + is_drop = true; + } + + if (num_act == 1 && is_drop) + return 0; + + return rc; +} + +static int +qede_tc_parse_ports(struct qede_dev *edev, + struct tc_cls_flower_offload *f, + struct qede_arfs_tuple *t) +{ + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_dissector_key_ports *key, *mask; + + key = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_PORTS, + f->key); + mask = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_PORTS, + f->mask); + + if ((key->src && mask->src != U16_MAX) || + (key->dst && mask->dst != U16_MAX)) { + DP_NOTICE(edev, "Do not support ports masks\n"); + return -EINVAL; + } + + t->src_port = key->src; + t->dst_port = key->dst; + } + + return 0; +} + +static int +qede_tc_parse_v6_common(struct qede_dev *edev, + struct tc_cls_flower_offload *f, + struct qede_arfs_tuple *t) +{ + struct in6_addr zero_addr, addr; + + memset(&zero_addr, 0, sizeof(addr)); + memset(&addr, 0xff, sizeof(addr)); + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { + struct flow_dissector_key_ipv6_addrs *key, *mask; + + key = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV6_ADDRS, + f->key); + mask = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV6_ADDRS, + f->mask); + + if ((memcmp(&key->src, &zero_addr, sizeof(addr)) && + memcmp(&mask->src, &addr, sizeof(addr))) || + (memcmp(&key->dst, &zero_addr, sizeof(addr)) && + memcmp(&mask->dst, &addr, sizeof(addr)))) { + DP_NOTICE(edev, + "Do not support IPv6 address prefix/mask\n"); + return -EINVAL; + } + + memcpy(&t->src_ipv6, &key->src, sizeof(addr)); + memcpy(&t->dst_ipv6, &key->dst, sizeof(addr)); + } + + if (qede_tc_parse_ports(edev, f, t)) + return -EINVAL; + + return qede_set_v6_tuple_to_profile(edev, t, &zero_addr); +} + +static int +qede_tc_parse_v4_common(struct qede_dev *edev, + struct tc_cls_flower_offload *f, + struct qede_arfs_tuple *t) +{ + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { + struct flow_dissector_key_ipv4_addrs *key, *mask; + + key = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV4_ADDRS, + f->key); + mask = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV4_ADDRS, + f->mask); + + if ((key->src && mask->src != U32_MAX) || + (key->dst && mask->dst != U32_MAX)) { + DP_NOTICE(edev, "Do not support ipv4 prefix/masks\n"); + return -EINVAL; + } + + t->src_ipv4 = key->src; + t->dst_ipv4 = key->dst; + } + + if (qede_tc_parse_ports(edev, f, t)) + return -EINVAL; + + return qede_set_v4_tuple_to_profile(edev, t); +} + +static int +qede_tc_parse_tcp_v6(struct qede_dev *edev, + struct tc_cls_flower_offload *f, + struct qede_arfs_tuple *tuple) +{ + tuple->ip_proto = IPPROTO_TCP; + tuple->eth_proto = htons(ETH_P_IPV6); + + return qede_tc_parse_v6_common(edev, f, tuple); +} + +static int +qede_tc_parse_tcp_v4(struct qede_dev *edev, + struct tc_cls_flower_offload *f, + struct qede_arfs_tuple *tuple) +{ + tuple->ip_proto = IPPROTO_TCP; + tuple->eth_proto = htons(ETH_P_IP); + + return qede_tc_parse_v4_common(edev, f, tuple); +} + +static int +qede_tc_parse_udp_v6(struct qede_dev *edev, + struct tc_cls_flower_offload *f, + struct qede_arfs_tuple *tuple) +{ + tuple->ip_proto = IPPROTO_UDP; + tuple->eth_proto = htons(ETH_P_IPV6); + + return qede_tc_parse_v6_common(edev, f, tuple); +} + +static int +qede_tc_parse_udp_v4(struct qede_dev *edev, + struct tc_cls_flower_offload *f, + struct qede_arfs_tuple *tuple) +{ + tuple->ip_proto = IPPROTO_UDP; + tuple->eth_proto = htons(ETH_P_IP); + + return qede_tc_parse_v4_common(edev, f, tuple); +} + +static int +qede_parse_flower_attr(struct qede_dev *edev, __be16 proto, + struct tc_cls_flower_offload *f, + struct qede_arfs_tuple *tuple) +{ + int rc = -EINVAL; + u8 ip_proto = 0; + + memset(tuple, 0, sizeof(*tuple)); + + if (f->dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_PORTS))) { + DP_NOTICE(edev, "Unsupported key set:0x%x\n", + f->dissector->used_keys); + return -EOPNOTSUPP; + } + + if (proto != htons(ETH_P_IP) && + proto != htons(ETH_P_IPV6)) { + DP_NOTICE(edev, "Unsupported proto=0x%x\n", proto); + return -EPROTONOSUPPORT; + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_dissector_key_basic *key; + + key = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_BASIC, + f->key); + ip_proto = key->ip_proto; + } + + if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IP)) + rc = qede_tc_parse_tcp_v4(edev, f, tuple); + else if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IPV6)) + rc = qede_tc_parse_tcp_v6(edev, f, tuple); + else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IP)) + rc = qede_tc_parse_udp_v4(edev, f, tuple); + else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IPV6)) + rc = qede_tc_parse_udp_v6(edev, f, tuple); + else + DP_NOTICE(edev, "Invalid tc protocol request\n"); + + return rc; +} + +int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto, + struct tc_cls_flower_offload *f) +{ + struct qede_arfs_fltr_node *n; + int min_hlen, rc = -EINVAL; + struct qede_arfs_tuple t; + + __qede_lock(edev); + + if (!edev->arfs) { + rc = -EPERM; + goto unlock; + } + + /* parse flower attribute and prepare filter */ + if (qede_parse_flower_attr(edev, proto, f, &t)) + goto unlock; + + /* Validate profile mode and number of filters */ + if ((edev->arfs->filter_count && edev->arfs->mode != t.mode) || + edev->arfs->filter_count == QEDE_RFS_MAX_FLTR) { + DP_NOTICE(edev, + "Filter configuration invalidated, filter mode=0x%x, configured mode=0x%x, filter count=0x%x\n", + t.mode, edev->arfs->mode, edev->arfs->filter_count); + goto unlock; + } + + /* parse tc actions and get the vf_id */ + if (qede_parse_actions(edev, f->exts)) + goto unlock; + + if (qede_flow_find_fltr(edev, &t)) { + rc = -EEXIST; + goto unlock; + } + + n = kzalloc(sizeof(*n), GFP_KERNEL); + if (!n) { + rc = -ENOMEM; + goto unlock; + } + + min_hlen = qede_flow_get_min_header_size(&t); + + n->data = kzalloc(min_hlen, GFP_KERNEL); + if (!n->data) { + kfree(n); + rc = -ENOMEM; + goto unlock; + } + + memcpy(&n->tuple, &t, sizeof(n->tuple)); + + n->buf_len = min_hlen; + n->b_is_drop = true; + n->sw_id = f->cookie; + + n->tuple.build_hdr(&n->tuple, n->data); + + rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0); + if (rc) + goto unlock; + + qede_configure_arfs_fltr(edev, n, n->rxq_id, true); + rc = qede_poll_arfs_filter_config(edev, n); + +unlock: + __qede_unlock(edev); + return rc; +} -- cgit v1.2.3-59-g8ed1b