aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/Makefile1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_common.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c31
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_filters.c876
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_filters.h36
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h29
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c55
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h21
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c162
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c109
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h48
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h135
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h58
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c289
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h17
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c39
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c39
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/ctrl.c105
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/main.c42
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/main.h108
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/qdisc.c712
-rw-r--r--drivers/net/ethernet/ti/cpsw.c51
-rw-r--r--drivers/net/ethernet/ti/cpts.c32
-rw-r--r--drivers/net/ethernet/ti/cpts.h38
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c32
-rw-r--r--drivers/net/phy/icplus.c23
-rw-r--r--include/linux/netdevice.h1
-rw-r--r--include/net/pkt_cls.h24
-rw-r--r--net/sched/sch_api.c17
-rw-r--r--net/sched/sch_mq.c9
-rw-r--r--net/sched/sch_red.c18
36 files changed, 2695 insertions, 490 deletions
diff --git a/drivers/net/ethernet/aquantia/atlantic/Makefile b/drivers/net/ethernet/aquantia/atlantic/Makefile
index 686f6d8c9e79..4556630ee286 100644
--- a/drivers/net/ethernet/aquantia/atlantic/Makefile
+++ b/drivers/net/ethernet/aquantia/atlantic/Makefile
@@ -36,6 +36,7 @@ atlantic-objs := aq_main.o \
aq_ring.o \
aq_hw_utils.o \
aq_ethtool.o \
+ aq_filters.o \
hw_atl/hw_atl_a0.o \
hw_atl/hw_atl_b0.o \
hw_atl/hw_atl_utils.o \
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
index becb578211ed..6b6d1724676e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_common.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
@@ -14,7 +14,7 @@
#include <linux/etherdevice.h>
#include <linux/pci.h>
-
+#include <linux/if_vlan.h>
#include "ver.h"
#include "aq_cfg.h"
#include "aq_utils.h"
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 99ef1daaa4d8..a5fd71692c8b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -12,6 +12,7 @@
#include "aq_ethtool.h"
#include "aq_nic.h"
#include "aq_vec.h"
+#include "aq_filters.h"
static void aq_ethtool_get_regs(struct net_device *ndev,
struct ethtool_regs *regs, void *p)
@@ -213,7 +214,36 @@ static int aq_ethtool_get_rxnfc(struct net_device *ndev,
case ETHTOOL_GRXRINGS:
cmd->data = cfg->vecs;
break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = aq_get_rxnfc_count_all_rules(aq_nic);
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ err = aq_get_rxnfc_rule(aq_nic, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ err = aq_get_rxnfc_all_rules(aq_nic, cmd, rule_locs);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+ return err;
+}
+
+static int aq_ethtool_set_rxnfc(struct net_device *ndev,
+ struct ethtool_rxnfc *cmd)
+{
+ int err = 0;
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ err = aq_add_rxnfc_rule(aq_nic, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ err = aq_del_rxnfc_rule(aq_nic, cmd);
+ break;
default:
err = -EOPNOTSUPP;
break;
@@ -520,6 +550,7 @@ const struct ethtool_ops aq_ethtool_ops = {
.get_rxfh_key_size = aq_ethtool_get_rss_key_size,
.get_rxfh = aq_ethtool_get_rss,
.get_rxnfc = aq_ethtool_get_rxnfc,
+ .set_rxnfc = aq_ethtool_set_rxnfc,
.get_sset_count = aq_ethtool_get_sset_count,
.get_ethtool_stats = aq_ethtool_stats,
.get_link_ksettings = aq_ethtool_get_link_ksettings,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
new file mode 100644
index 000000000000..f6145e7473b1
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
@@ -0,0 +1,876 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright (C) 2014-2017 aQuantia Corporation. */
+
+/* File aq_filters.c: RX filters related functions. */
+
+#include "aq_filters.h"
+
+static bool __must_check
+aq_rule_is_approve(struct ethtool_rx_flow_spec *fsp)
+{
+ if (fsp->flow_type & FLOW_MAC_EXT)
+ return false;
+
+ switch (fsp->flow_type & ~FLOW_EXT) {
+ case ETHER_FLOW:
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ return true;
+ case IP_USER_FLOW:
+ switch (fsp->h_u.usr_ip4_spec.proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ case IPPROTO_SCTP:
+ case IPPROTO_IP:
+ return true;
+ default:
+ return false;
+ }
+ case IPV6_USER_FLOW:
+ switch (fsp->h_u.usr_ip6_spec.l4_proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ case IPPROTO_SCTP:
+ case IPPROTO_IP:
+ return true;
+ default:
+ return false;
+ }
+ default:
+ return false;
+ }
+
+ return false;
+}
+
+static bool __must_check
+aq_match_filter(struct ethtool_rx_flow_spec *fsp1,
+ struct ethtool_rx_flow_spec *fsp2)
+{
+ if (fsp1->flow_type != fsp2->flow_type ||
+ memcmp(&fsp1->h_u, &fsp2->h_u, sizeof(fsp2->h_u)) ||
+ memcmp(&fsp1->h_ext, &fsp2->h_ext, sizeof(fsp2->h_ext)) ||
+ memcmp(&fsp1->m_u, &fsp2->m_u, sizeof(fsp2->m_u)) ||
+ memcmp(&fsp1->m_ext, &fsp2->m_ext, sizeof(fsp2->m_ext)))
+ return false;
+
+ return true;
+}
+
+static bool __must_check
+aq_rule_already_exists(struct aq_nic_s *aq_nic,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ struct aq_rx_filter *rule;
+ struct hlist_node *aq_node2;
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ if (rule->aq_fsp.location == fsp->location)
+ continue;
+ if (aq_match_filter(&rule->aq_fsp, fsp)) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: This filter is already set\n");
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int aq_check_approve_fl3l4(struct aq_nic_s *aq_nic,
+ struct aq_hw_rx_fltrs_s *rx_fltrs,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ if (fsp->location < AQ_RX_FIRST_LOC_FL3L4 ||
+ fsp->location > AQ_RX_LAST_LOC_FL3L4) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: location must be in range [%d, %d]",
+ AQ_RX_FIRST_LOC_FL3L4,
+ AQ_RX_LAST_LOC_FL3L4);
+ return -EINVAL;
+ }
+ if (rx_fltrs->fl3l4.is_ipv6 && rx_fltrs->fl3l4.active_ipv4) {
+ rx_fltrs->fl3l4.is_ipv6 = false;
+ netdev_err(aq_nic->ndev,
+ "ethtool: mixing ipv4 and ipv6 is not allowed");
+ return -EINVAL;
+ } else if (!rx_fltrs->fl3l4.is_ipv6 && rx_fltrs->fl3l4.active_ipv6) {
+ rx_fltrs->fl3l4.is_ipv6 = true;
+ netdev_err(aq_nic->ndev,
+ "ethtool: mixing ipv4 and ipv6 is not allowed");
+ return -EINVAL;
+ } else if (rx_fltrs->fl3l4.is_ipv6 &&
+ fsp->location != AQ_RX_FIRST_LOC_FL3L4 + 4 &&
+ fsp->location != AQ_RX_FIRST_LOC_FL3L4) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: The specified location for ipv6 must be %d or %d",
+ AQ_RX_FIRST_LOC_FL3L4, AQ_RX_FIRST_LOC_FL3L4 + 4);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __must_check
+aq_check_approve_fl2(struct aq_nic_s *aq_nic,
+ struct aq_hw_rx_fltrs_s *rx_fltrs,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ if (fsp->location < AQ_RX_FIRST_LOC_FETHERT ||
+ fsp->location > AQ_RX_LAST_LOC_FETHERT) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: location must be in range [%d, %d]",
+ AQ_RX_FIRST_LOC_FETHERT,
+ AQ_RX_LAST_LOC_FETHERT);
+ return -EINVAL;
+ }
+
+ if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_PRIO_MASK &&
+ fsp->m_u.ether_spec.h_proto == 0U) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: proto (ether_type) parameter must be specfied");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __must_check
+aq_check_approve_fvlan(struct aq_nic_s *aq_nic,
+ struct aq_hw_rx_fltrs_s *rx_fltrs,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ if (fsp->location < AQ_RX_FIRST_LOC_FVLANID ||
+ fsp->location > AQ_RX_LAST_LOC_FVLANID) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: location must be in range [%d, %d]",
+ AQ_RX_FIRST_LOC_FVLANID,
+ AQ_RX_LAST_LOC_FVLANID);
+ return -EINVAL;
+ }
+
+ if ((aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ (!test_bit(be16_to_cpu(fsp->h_ext.vlan_tci),
+ aq_nic->active_vlans))) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: unknown vlan-id specified");
+ return -EINVAL;
+ }
+
+ if (fsp->ring_cookie > aq_nic->aq_nic_cfg.num_rss_queues) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: queue number must be in range [0, %d]",
+ aq_nic->aq_nic_cfg.num_rss_queues - 1);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int __must_check
+aq_check_filter(struct aq_nic_s *aq_nic,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ int err = 0;
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+
+ if (fsp->flow_type & FLOW_EXT) {
+ if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_VID_MASK) {
+ err = aq_check_approve_fvlan(aq_nic, rx_fltrs, fsp);
+ } else if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_PRIO_MASK) {
+ err = aq_check_approve_fl2(aq_nic, rx_fltrs, fsp);
+ } else {
+ netdev_err(aq_nic->ndev,
+ "ethtool: invalid vlan mask 0x%x specified",
+ be16_to_cpu(fsp->m_ext.vlan_tci));
+ err = -EINVAL;
+ }
+ } else {
+ switch (fsp->flow_type & ~FLOW_EXT) {
+ case ETHER_FLOW:
+ err = aq_check_approve_fl2(aq_nic, rx_fltrs, fsp);
+ break;
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case IPV4_FLOW:
+ case IP_USER_FLOW:
+ rx_fltrs->fl3l4.is_ipv6 = false;
+ err = aq_check_approve_fl3l4(aq_nic, rx_fltrs, fsp);
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case IPV6_FLOW:
+ case IPV6_USER_FLOW:
+ rx_fltrs->fl3l4.is_ipv6 = true;
+ err = aq_check_approve_fl3l4(aq_nic, rx_fltrs, fsp);
+ break;
+ default:
+ netdev_err(aq_nic->ndev,
+ "ethtool: unknown flow-type specified");
+ err = -EINVAL;
+ }
+ }
+
+ return err;
+}
+
+static bool __must_check
+aq_rule_is_not_support(struct aq_nic_s *aq_nic,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ bool rule_is_not_support = false;
+
+ if (!(aq_nic->ndev->features & NETIF_F_NTUPLE)) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: Please, to enable the RX flow control:\n"
+ "ethtool -K %s ntuple on\n", aq_nic->ndev->name);
+ rule_is_not_support = true;
+ } else if (!aq_rule_is_approve(fsp)) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: The specified flow type is not supported\n");
+ rule_is_not_support = true;
+ } else if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW &&
+ (fsp->h_u.tcp_ip4_spec.tos ||
+ fsp->h_u.tcp_ip6_spec.tclass)) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: The specified tos tclass are not supported\n");
+ rule_is_not_support = true;
+ } else if (fsp->flow_type & FLOW_MAC_EXT) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: MAC_EXT is not supported");
+ rule_is_not_support = true;
+ }
+
+ return rule_is_not_support;
+}
+
+static bool __must_check
+aq_rule_is_not_correct(struct aq_nic_s *aq_nic,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ bool rule_is_not_correct = false;
+
+ if (!aq_nic) {
+ rule_is_not_correct = true;
+ } else if (fsp->location > AQ_RX_MAX_RXNFC_LOC) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: The specified number %u rule is invalid\n",
+ fsp->location);
+ rule_is_not_correct = true;
+ } else if (aq_check_filter(aq_nic, fsp)) {
+ rule_is_not_correct = true;
+ } else if (fsp->ring_cookie != RX_CLS_FLOW_DISC) {
+ if (fsp->ring_cookie >= aq_nic->aq_nic_cfg.num_rss_queues) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: The specified action is invalid.\n"
+ "Maximum allowable value action is %u.\n",
+ aq_nic->aq_nic_cfg.num_rss_queues - 1);
+ rule_is_not_correct = true;
+ }
+ }
+
+ return rule_is_not_correct;
+}
+
+static int __must_check
+aq_check_rule(struct aq_nic_s *aq_nic,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ int err = 0;
+
+ if (aq_rule_is_not_correct(aq_nic, fsp))
+ err = -EINVAL;
+ else if (aq_rule_is_not_support(aq_nic, fsp))
+ err = -EOPNOTSUPP;
+ else if (aq_rule_already_exists(aq_nic, fsp))
+ err = -EEXIST;
+
+ return err;
+}
+
+static void aq_set_data_fl2(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr,
+ struct aq_rx_filter_l2 *data, bool add)
+{
+ const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
+
+ memset(data, 0, sizeof(*data));
+
+ data->location = fsp->location - AQ_RX_FIRST_LOC_FETHERT;
+
+ if (fsp->ring_cookie != RX_CLS_FLOW_DISC)
+ data->queue = fsp->ring_cookie;
+ else
+ data->queue = -1;
+
+ data->ethertype = be16_to_cpu(fsp->h_u.ether_spec.h_proto);
+ data->user_priority_en = be16_to_cpu(fsp->m_ext.vlan_tci)
+ == VLAN_PRIO_MASK;
+ data->user_priority = (be16_to_cpu(fsp->h_ext.vlan_tci)
+ & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+}
+
+static int aq_add_del_fether(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr, bool add)
+{
+ struct aq_rx_filter_l2 data;
+ struct aq_hw_s *aq_hw = aq_nic->aq_hw;
+ const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
+
+ aq_set_data_fl2(aq_nic, aq_rx_fltr, &data, add);
+
+ if (unlikely(!aq_hw_ops->hw_filter_l2_set))
+ return -EOPNOTSUPP;
+ if (unlikely(!aq_hw_ops->hw_filter_l2_clear))
+ return -EOPNOTSUPP;
+
+ if (add)
+ return aq_hw_ops->hw_filter_l2_set(aq_hw, &data);
+ else
+ return aq_hw_ops->hw_filter_l2_clear(aq_hw, &data);
+}
+
+static bool aq_fvlan_is_busy(struct aq_rx_filter_vlan *aq_vlans, int vlan)
+{
+ int i;
+
+ for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
+ if (aq_vlans[i].enable &&
+ aq_vlans[i].queue != AQ_RX_QUEUE_NOT_ASSIGNED &&
+ aq_vlans[i].vlan_id == vlan) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/* Function rebuilds array of vlan filters so that filters with assigned
+ * queue have a precedence over just vlans on the interface.
+ */
+static void aq_fvlan_rebuild(struct aq_nic_s *aq_nic,
+ unsigned long *active_vlans,
+ struct aq_rx_filter_vlan *aq_vlans)
+{
+ bool vlan_busy = false;
+ int vlan = -1;
+ int i;
+
+ for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
+ if (aq_vlans[i].enable &&
+ aq_vlans[i].queue != AQ_RX_QUEUE_NOT_ASSIGNED)
+ continue;
+ do {
+ vlan = find_next_bit(active_vlans,
+ VLAN_N_VID,
+ vlan + 1);
+ if (vlan == VLAN_N_VID) {
+ aq_vlans[i].enable = 0U;
+ aq_vlans[i].queue = AQ_RX_QUEUE_NOT_ASSIGNED;
+ aq_vlans[i].vlan_id = 0;
+ continue;
+ }
+
+ vlan_busy = aq_fvlan_is_busy(aq_vlans, vlan);
+ if (!vlan_busy) {
+ aq_vlans[i].enable = 1U;
+ aq_vlans[i].queue = AQ_RX_QUEUE_NOT_ASSIGNED;
+ aq_vlans[i].vlan_id = vlan;
+ }
+ } while (vlan_busy && vlan != VLAN_N_VID);
+ }
+}
+
+static int aq_set_data_fvlan(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr,
+ struct aq_rx_filter_vlan *aq_vlans, bool add)
+{
+ const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
+ int location = fsp->location - AQ_RX_FIRST_LOC_FVLANID;
+ int i;
+
+ memset(&aq_vlans[location], 0, sizeof(aq_vlans[location]));
+
+ if (!add)
+ return 0;
+
+ /* remove vlan if it was in table without queue assignment */
+ for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
+ if (aq_vlans[i].vlan_id ==
+ (be16_to_cpu(fsp->h_ext.vlan_tci) & VLAN_VID_MASK)) {
+ aq_vlans[i].enable = false;
+ }
+ }
+
+ aq_vlans[location].location = location;
+ aq_vlans[location].vlan_id = be16_to_cpu(fsp->h_ext.vlan_tci)
+ & VLAN_VID_MASK;
+ aq_vlans[location].queue = fsp->ring_cookie & 0x1FU;
+ aq_vlans[location].enable = 1U;
+
+ return 0;
+}
+
+int aq_del_fvlan_by_vlan(struct aq_nic_s *aq_nic, u16 vlan_id)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct aq_rx_filter *rule = NULL;
+ struct hlist_node *aq_node2;
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ if (be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id)
+ break;
+ }
+ if (rule && be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) {
+ struct ethtool_rxnfc cmd;
+
+ cmd.fs.location = rule->aq_fsp.location;
+ return aq_del_rxnfc_rule(aq_nic, &cmd);
+ }
+
+ return -ENOENT;
+}
+
+static int aq_add_del_fvlan(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr, bool add)
+{
+ const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
+
+ if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
+ return -EOPNOTSUPP;
+
+ aq_set_data_fvlan(aq_nic,
+ aq_rx_fltr,
+ aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans,
+ add);
+
+ return aq_filters_vlans_update(aq_nic);
+}
+
+static int aq_set_data_fl3l4(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr,
+ struct aq_rx_filter_l3l4 *data, bool add)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
+
+ memset(data, 0, sizeof(*data));
+
+ data->is_ipv6 = rx_fltrs->fl3l4.is_ipv6;
+ data->location = HW_ATL_GET_REG_LOCATION_FL3L4(fsp->location);
+
+ if (!add) {
+ if (!data->is_ipv6)
+ rx_fltrs->fl3l4.active_ipv4 &= ~BIT(data->location);
+ else
+ rx_fltrs->fl3l4.active_ipv6 &=
+ ~BIT((data->location) / 4);
+
+ return 0;
+ }
+
+ data->cmd |= HW_ATL_RX_ENABLE_FLTR_L3L4;
+
+ switch (fsp->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ data->cmd |= HW_ATL_RX_UDP;
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ data->cmd |= HW_ATL_RX_SCTP;
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
+ break;
+ default:
+ break;
+ }
+
+ if (!data->is_ipv6) {
+ data->ip_src[0] =
+ ntohl(fsp->h_u.tcp_ip4_spec.ip4src);
+ data->ip_dst[0] =
+ ntohl(fsp->h_u.tcp_ip4_spec.ip4dst);
+ rx_fltrs->fl3l4.active_ipv4 |= BIT(data->location);
+ } else {
+ int i;
+
+ rx_fltrs->fl3l4.active_ipv6 |= BIT((data->location) / 4);
+ for (i = 0; i < HW_ATL_RX_CNT_REG_ADDR_IPV6; ++i) {
+ data->ip_dst[i] =
+ ntohl(fsp->h_u.tcp_ip6_spec.ip6dst[i]);
+ data->ip_src[i] =
+ ntohl(fsp->h_u.tcp_ip6_spec.ip6src[i]);
+ }
+ data->cmd |= HW_ATL_RX_ENABLE_L3_IPV6;
+ }
+ if (fsp->flow_type != IP_USER_FLOW &&
+ fsp->flow_type != IPV6_USER_FLOW) {
+ if (!data->is_ipv6) {
+ data->p_dst =
+ ntohs(fsp->h_u.tcp_ip4_spec.pdst);
+ data->p_src =
+ ntohs(fsp->h_u.tcp_ip4_spec.psrc);
+ } else {
+ data->p_dst =
+ ntohs(fsp->h_u.tcp_ip6_spec.pdst);
+ data->p_src =
+ ntohs(fsp->h_u.tcp_ip6_spec.psrc);
+ }
+ }
+ if (data->ip_src[0] && !data->is_ipv6)
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_SRC_ADDR_L3;
+ if (data->ip_dst[0] && !data->is_ipv6)
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_DEST_ADDR_L3;
+ if (data->p_dst)
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4;
+ if (data->p_src)
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_SRC_PORT_L4;
+ if (fsp->ring_cookie != RX_CLS_FLOW_DISC) {
+ data->cmd |= HW_ATL_RX_HOST << HW_ATL_RX_ACTION_FL3F4_SHIFT;
+ data->cmd |= fsp->ring_cookie << HW_ATL_RX_QUEUE_FL3L4_SHIFT;
+ data->cmd |= HW_ATL_RX_ENABLE_QUEUE_L3L4;
+ } else {
+ data->cmd |= HW_ATL_RX_DISCARD << HW_ATL_RX_ACTION_FL3F4_SHIFT;
+ }
+
+ return 0;
+}
+
+static int aq_set_fl3l4(struct aq_hw_s *aq_hw,
+ const struct aq_hw_ops *aq_hw_ops,
+ struct aq_rx_filter_l3l4 *data)
+{
+ if (unlikely(!aq_hw_ops->hw_filter_l3l4_set))
+ return -EOPNOTSUPP;
+
+ return aq_hw_ops->hw_filter_l3l4_set(aq_hw, data);
+}
+
+static int aq_add_del_fl3l4(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr, bool add)
+{
+ const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
+ struct aq_hw_s *aq_hw = aq_nic->aq_hw;
+ struct aq_rx_filter_l3l4 data;
+
+ if (unlikely(aq_rx_fltr->aq_fsp.location < AQ_RX_FIRST_LOC_FL3L4 ||
+ aq_rx_fltr->aq_fsp.location > AQ_RX_LAST_LOC_FL3L4 ||
+ aq_set_data_fl3l4(aq_nic, aq_rx_fltr, &data, add)))
+ return -EINVAL;
+
+ return aq_set_fl3l4(aq_hw, aq_hw_ops, &data);
+}
+
+static int aq_add_del_rule(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr, bool add)
+{
+ int err = -EINVAL;
+
+ if (aq_rx_fltr->aq_fsp.flow_type & FLOW_EXT) {
+ if (be16_to_cpu(aq_rx_fltr->aq_fsp.m_ext.vlan_tci)
+ == VLAN_VID_MASK) {
+ aq_rx_fltr->type = aq_rx_filter_vlan;
+ err = aq_add_del_fvlan(aq_nic, aq_rx_fltr, add);
+ } else if (be16_to_cpu(aq_rx_fltr->aq_fsp.m_ext.vlan_tci)
+ == VLAN_PRIO_MASK) {
+ aq_rx_fltr->type = aq_rx_filter_ethertype;
+ err = aq_add_del_fether(aq_nic, aq_rx_fltr, add);
+ }
+ } else {
+ switch (aq_rx_fltr->aq_fsp.flow_type & ~FLOW_EXT) {
+ case ETHER_FLOW:
+ aq_rx_fltr->type = aq_rx_filter_ethertype;
+ err = aq_add_del_fether(aq_nic, aq_rx_fltr, add);
+ break;
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case IP_USER_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case IPV6_USER_FLOW:
+ aq_rx_fltr->type = aq_rx_filter_l3l4;
+ err = aq_add_del_fl3l4(aq_nic, aq_rx_fltr, add);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ return err;
+}
+
+static int aq_update_table_filters(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr, u16 index,
+ struct ethtool_rxnfc *cmd)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct aq_rx_filter *rule = NULL, *parent = NULL;
+ struct hlist_node *aq_node2;
+ int err = -EINVAL;
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ if (rule->aq_fsp.location >= index)
+ break;
+ parent = rule;
+ }
+
+ if (rule && rule->aq_fsp.location == index) {
+ err = aq_add_del_rule(aq_nic, rule, false);
+ hlist_del(&rule->aq_node);
+ kfree(rule);
+ --rx_fltrs->active_filters;
+ }
+
+ if (unlikely(!aq_rx_fltr))
+ return err;
+
+ INIT_HLIST_NODE(&aq_rx_fltr->aq_node);
+
+ if (parent)
+ hlist_add_behind(&aq_rx_fltr->aq_node, &parent->aq_node);
+ else
+ hlist_add_head(&aq_rx_fltr->aq_node, &rx_fltrs->filter_list);
+
+ ++rx_fltrs->active_filters;
+
+ return 0;
+}
+
+u16 aq_get_rxnfc_count_all_rules(struct aq_nic_s *aq_nic)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+
+ return rx_fltrs->active_filters;
+}
+
+struct aq_hw_rx_fltrs_s *aq_get_hw_rx_fltrs(struct aq_nic_s *aq_nic)
+{
+ return &aq_nic->aq_hw_rx_fltrs;
+}
+
+int aq_add_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct aq_rx_filter *aq_rx_fltr;
+ int err = 0;
+
+ err = aq_check_rule(aq_nic, fsp);
+ if (err)
+ goto err_exit;
+
+ aq_rx_fltr = kzalloc(sizeof(*aq_rx_fltr), GFP_KERNEL);
+ if (unlikely(!aq_rx_fltr)) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+ memcpy(&aq_rx_fltr->aq_fsp, fsp, sizeof(*fsp));
+
+ err = aq_update_table_filters(aq_nic, aq_rx_fltr, fsp->location, NULL);
+ if (unlikely(err))
+ goto err_free;
+
+ err = aq_add_del_rule(aq_nic, aq_rx_fltr, true);
+ if (unlikely(err)) {
+ hlist_del(&aq_rx_fltr->aq_node);
+ --rx_fltrs->active_filters;
+ goto err_free;
+ }
+
+ return 0;
+
+err_free:
+ kfree(aq_rx_fltr);
+err_exit:
+ return err;
+}
+
+int aq_del_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct aq_rx_filter *rule = NULL;
+ struct hlist_node *aq_node2;
+ int err = -EINVAL;
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ if (rule->aq_fsp.location == cmd->fs.location)
+ break;
+ }
+
+ if (rule && rule->aq_fsp.location == cmd->fs.location) {
+ err = aq_add_del_rule(aq_nic, rule, false);
+ hlist_del(&rule->aq_node);
+ kfree(rule);
+ --rx_fltrs->active_filters;
+ }
+ return err;
+}
+
+int aq_get_rxnfc_rule(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct aq_rx_filter *rule = NULL;
+ struct hlist_node *aq_node2;
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node)
+ if (fsp->location <= rule->aq_fsp.location)
+ break;
+
+ if (unlikely(!rule || fsp->location != rule->aq_fsp.location))
+ return -EINVAL;
+
+ memcpy(fsp, &rule->aq_fsp, sizeof(*fsp));
+
+ return 0;
+}
+
+int aq_get_rxnfc_all_rules(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct hlist_node *aq_node2;
+ struct aq_rx_filter *rule;
+ int count = 0;
+
+ cmd->data = aq_get_rxnfc_count_all_rules(aq_nic);
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ if (unlikely(count == cmd->rule_cnt))
+ return -EMSGSIZE;
+
+ rule_locs[count++] = rule->aq_fsp.location;
+ }
+
+ cmd->rule_cnt = count;
+
+ return 0;
+}
+
+int aq_clear_rxnfc_all_rules(struct aq_nic_s *aq_nic)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct hlist_node *aq_node2;
+ struct aq_rx_filter *rule;
+ int err = 0;
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ err = aq_add_del_rule(aq_nic, rule, false);
+ if (err)
+ goto err_exit;
+ hlist_del(&rule->aq_node);
+ kfree(rule);
+ --rx_fltrs->active_filters;
+ }
+
+err_exit:
+ return err;
+}
+
+int aq_reapply_rxnfc_all_rules(struct aq_nic_s *aq_nic)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct hlist_node *aq_node2;
+ struct aq_rx_filter *rule;
+ int err = 0;
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ err = aq_add_del_rule(aq_nic, rule, true);
+ if (err)
+ goto err_exit;
+ }
+
+err_exit:
+ return err;
+}
+
+int aq_filters_vlans_update(struct aq_nic_s *aq_nic)
+{
+ const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
+ struct aq_hw_s *aq_hw = aq_nic->aq_hw;
+ int hweight = 0;
+ int err = 0;
+ int i;
+
+ if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
+ return -EOPNOTSUPP;
+ if (unlikely(!aq_hw_ops->hw_filter_vlan_ctrl))
+ return -EOPNOTSUPP;
+
+ aq_fvlan_rebuild(aq_nic, aq_nic->active_vlans,
+ aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
+
+ if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ for (i = 0; i < BITS_TO_LONGS(VLAN_N_VID); i++)
+ hweight += hweight_long(aq_nic->active_vlans[i]);
+
+ err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
+ if (err)
+ return err;
+ }
+
+ err = aq_hw_ops->hw_filter_vlan_set(aq_hw,
+ aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans
+ );
+ if (err)
+ return err;
+
+ if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ if (hweight < AQ_VLAN_MAX_FILTERS)
+ err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, true);
+ /* otherwise left in promiscue mode */
+ }
+
+ return err;
+}
+
+int aq_filters_vlan_offload_off(struct aq_nic_s *aq_nic)
+{
+ const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
+ struct aq_hw_s *aq_hw = aq_nic->aq_hw;
+ int err = 0;
+
+ memset(aq_nic->active_vlans, 0, sizeof(aq_nic->active_vlans));
+ aq_fvlan_rebuild(aq_nic, aq_nic->active_vlans,
+ aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
+
+ if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
+ return -EOPNOTSUPP;
+ if (unlikely(!aq_hw_ops->hw_filter_vlan_ctrl))
+ return -EOPNOTSUPP;
+
+ err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
+ if (err)
+ return err;
+ err = aq_hw_ops->hw_filter_vlan_set(aq_hw,
+ aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans
+ );
+ return err;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.h b/drivers/net/ethernet/aquantia/atlantic/aq_filters.h
new file mode 100644
index 000000000000..c6a08c6585d5
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (C) 2014-2017 aQuantia Corporation. */
+
+/* File aq_filters.h: RX filters related functions. */
+
+#ifndef AQ_FILTERS_H
+#define AQ_FILTERS_H
+
+#include "aq_nic.h"
+
+enum aq_rx_filter_type {
+ aq_rx_filter_ethertype,
+ aq_rx_filter_vlan,
+ aq_rx_filter_l3l4
+};
+
+struct aq_rx_filter {
+ struct hlist_node aq_node;
+ enum aq_rx_filter_type type;
+ struct ethtool_rx_flow_spec aq_fsp;
+};
+
+u16 aq_get_rxnfc_count_all_rules(struct aq_nic_s *aq_nic);
+struct aq_hw_rx_fltrs_s *aq_get_hw_rx_fltrs(struct aq_nic_s *aq_nic);
+int aq_add_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd);
+int aq_del_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd);
+int aq_get_rxnfc_rule(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd);
+int aq_get_rxnfc_all_rules(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs);
+int aq_del_fvlan_by_vlan(struct aq_nic_s *aq_nic, u16 vlan_id);
+int aq_clear_rxnfc_all_rules(struct aq_nic_s *aq_nic);
+int aq_reapply_rxnfc_all_rules(struct aq_nic_s *aq_nic);
+int aq_filters_vlans_update(struct aq_nic_s *aq_nic);
+int aq_filters_vlan_offload_off(struct aq_nic_s *aq_nic);
+
+#endif /* AQ_FILTERS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index a1e70da358ca..81aab73dc22f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -18,6 +18,17 @@
#include "aq_rss.h"
#include "hw_atl/hw_atl_utils.h"
+#define AQ_RX_FIRST_LOC_FVLANID 0U
+#define AQ_RX_LAST_LOC_FVLANID 15U
+#define AQ_RX_FIRST_LOC_FETHERT 16U
+#define AQ_RX_LAST_LOC_FETHERT 31U
+#define AQ_RX_FIRST_LOC_FL3L4 32U
+#define AQ_RX_LAST_LOC_FL3L4 39U
+#define AQ_RX_MAX_RXNFC_LOC AQ_RX_LAST_LOC_FL3L4
+#define AQ_VLAN_MAX_FILTERS \
+ (AQ_RX_LAST_LOC_FVLANID - AQ_RX_FIRST_LOC_FVLANID + 1U)
+#define AQ_RX_QUEUE_NOT_ASSIGNED 0xFFU
+
/* NIC H/W capabilities */
struct aq_hw_caps_s {
u64 hw_features;
@@ -130,6 +141,7 @@ struct aq_hw_s {
struct aq_ring_s;
struct aq_ring_param_s;
struct sk_buff;
+struct aq_rx_filter_l3l4;
struct aq_hw_ops {
@@ -183,6 +195,23 @@ struct aq_hw_ops {
int (*hw_packet_filter_set)(struct aq_hw_s *self,
unsigned int packet_filter);
+ int (*hw_filter_l3l4_set)(struct aq_hw_s *self,
+ struct aq_rx_filter_l3l4 *data);
+
+ int (*hw_filter_l3l4_clear)(struct aq_hw_s *self,
+ struct aq_rx_filter_l3l4 *data);
+
+ int (*hw_filter_l2_set)(struct aq_hw_s *self,
+ struct aq_rx_filter_l2 *data);
+
+ int (*hw_filter_l2_clear)(struct aq_hw_s *self,
+ struct aq_rx_filter_l2 *data);
+
+ int (*hw_filter_vlan_set)(struct aq_hw_s *self,
+ struct aq_rx_filter_vlan *aq_vlans);
+
+ int (*hw_filter_vlan_ctrl)(struct aq_hw_s *self, bool enable);
+
int (*hw_multicast_list_set)(struct aq_hw_s *self,
u8 ar_mac[AQ_HW_MULTICAST_ADDRESS_MAX]
[ETH_ALEN],
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index 7c07eef275eb..2a11c1eefd8f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -13,6 +13,7 @@
#include "aq_nic.h"
#include "aq_pci_func.h"
#include "aq_ethtool.h"
+#include "aq_filters.h"
#include <linux/netdevice.h>
#include <linux/module.h>
@@ -49,6 +50,11 @@ static int aq_ndev_open(struct net_device *ndev)
err = aq_nic_init(aq_nic);
if (err < 0)
goto err_exit;
+
+ err = aq_reapply_rxnfc_all_rules(aq_nic);
+ if (err < 0)
+ goto err_exit;
+
err = aq_nic_start(aq_nic);
if (err < 0)
goto err_exit;
@@ -101,6 +107,21 @@ static int aq_ndev_set_features(struct net_device *ndev,
bool is_lro = false;
int err = 0;
+ if (!(features & NETIF_F_NTUPLE)) {
+ if (aq_nic->ndev->features & NETIF_F_NTUPLE) {
+ err = aq_clear_rxnfc_all_rules(aq_nic);
+ if (unlikely(err))
+ goto err_exit;
+ }
+ }
+ if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
+ if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ err = aq_filters_vlan_offload_off(aq_nic);
+ if (unlikely(err))
+ goto err_exit;
+ }
+ }
+
aq_cfg->features = features;
if (aq_cfg->aq_hw_caps->hw_features & NETIF_F_LRO) {
@@ -119,6 +140,7 @@ static int aq_ndev_set_features(struct net_device *ndev,
err = aq_nic->aq_hw_ops->hw_set_offload(aq_nic->aq_hw,
aq_cfg);
+err_exit:
return err;
}
@@ -147,6 +169,35 @@ static void aq_ndev_set_multicast_settings(struct net_device *ndev)
aq_nic_set_multicast_list(aq_nic, ndev);
}
+static int aq_ndo_vlan_rx_add_vid(struct net_device *ndev, __be16 proto,
+ u16 vid)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ if (!aq_nic->aq_hw_ops->hw_filter_vlan_set)
+ return -EOPNOTSUPP;
+
+ set_bit(vid, aq_nic->active_vlans);
+
+ return aq_filters_vlans_update(aq_nic);
+}
+
+static int aq_ndo_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto,
+ u16 vid)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ if (!aq_nic->aq_hw_ops->hw_filter_vlan_set)
+ return -EOPNOTSUPP;
+
+ clear_bit(vid, aq_nic->active_vlans);
+
+ if (-ENOENT == aq_del_fvlan_by_vlan(aq_nic, vid))
+ return aq_filters_vlans_update(aq_nic);
+
+ return 0;
+}
+
static const struct net_device_ops aq_ndev_ops = {
.ndo_open = aq_ndev_open,
.ndo_stop = aq_ndev_close,
@@ -154,5 +205,7 @@ static const struct net_device_ops aq_ndev_ops = {
.ndo_set_rx_mode = aq_ndev_set_multicast_settings,
.ndo_change_mtu = aq_ndev_change_mtu,
.ndo_set_mac_address = aq_ndev_set_mac_address,
- .ndo_set_features = aq_ndev_set_features
+ .ndo_set_features = aq_ndev_set_features,
+ .ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 7abdc0952425..279ea58f4a9e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -84,8 +84,6 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
cfg->is_lro = AQ_CFG_IS_LRO_DEF;
- cfg->vlan_id = 0U;
-
aq_nic_rss_init(self, cfg->num_rss_queues);
/*descriptors */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 44ec47a3d60a..8e34c1e49bf2 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -35,7 +35,6 @@ struct aq_nic_cfg_s {
u32 mtu;
u32 flow_control;
u32 link_speed_msk;
- u32 vlan_id;
u32 wol;
u16 is_mc_list_enabled;
u16 mc_list_count;
@@ -61,6 +60,23 @@ struct aq_nic_cfg_s {
#define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \
((_TC_) * AQ_CFG_TCS_MAX + (_VEC_))
+struct aq_hw_rx_fl2 {
+ struct aq_rx_filter_vlan aq_vlans[AQ_VLAN_MAX_FILTERS];
+};
+
+struct aq_hw_rx_fl3l4 {
+ u8 active_ipv4;
+ u8 active_ipv6:2;
+ u8 is_ipv6;
+};
+
+struct aq_hw_rx_fltrs_s {
+ struct hlist_head filter_list;
+ u16 active_filters;
+ struct aq_hw_rx_fl2 fl2;
+ struct aq_hw_rx_fl3l4 fl3l4;
+};
+
struct aq_nic_s {
atomic_t flags;
struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
@@ -81,10 +97,13 @@ struct aq_nic_s {
u32 count;
u8 ar[AQ_HW_MULTICAST_ADDRESS_MAX][ETH_ALEN];
} mc_list;
+ /* Bitmask of currently assigned vlans from linux */
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct pci_dev *pdev;
unsigned int msix_entry_mask;
u32 irqvecs;
+ struct aq_hw_rx_fltrs_s aq_hw_rx_fltrs;
};
static inline struct device *aq_nic_get_dev(struct aq_nic_s *self)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 1d5d6b8df855..c8b44cdb91c1 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -19,6 +19,7 @@
#include "aq_pci_func.h"
#include "hw_atl/hw_atl_a0.h"
#include "hw_atl/hw_atl_b0.h"
+#include "aq_filters.h"
static const struct pci_device_id aq_pci_tbl[] = {
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_0001), },
@@ -309,6 +310,7 @@ static void aq_pci_remove(struct pci_dev *pdev)
struct aq_nic_s *self = pci_get_drvdata(pdev);
if (self->ndev) {
+ aq_clear_rxnfc_all_rules(self);
if (self->ndev->reg_state == NETREG_REGISTERED)
unregister_netdev(self->ndev);
aq_nic_free_vectors(self);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index f02592f43fe3..6af7d7f0cdca 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -41,7 +41,9 @@
NETIF_F_RXHASH | \
NETIF_F_SG | \
NETIF_F_TSO | \
- NETIF_F_LRO, \
+ NETIF_F_LRO | \
+ NETIF_F_NTUPLE | \
+ NETIF_F_HW_VLAN_CTAG_FILTER, \
.hw_priv_flags = IFF_UNICAST_FLT, \
.flow_control = true, \
.mtu = HW_ATL_B0_MTU_JUMBO, \
@@ -319,20 +321,11 @@ static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
- if (cfg->vlan_id) {
- hw_atl_rpf_vlan_flr_act_set(self, 1U, 0U);
- hw_atl_rpf_vlan_id_flr_set(self, 0U, 0U);
- hw_atl_rpf_vlan_flr_en_set(self, 0U, 0U);
+ hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
- hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U);
- hw_atl_rpf_vlan_untagged_act_set(self, 1U);
-
- hw_atl_rpf_vlan_flr_act_set(self, 1U, 1U);
- hw_atl_rpf_vlan_id_flr_set(self, cfg->vlan_id, 0U);
- hw_atl_rpf_vlan_flr_en_set(self, 1U, 1U);
- } else {
- hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
- }
+ // Always accept untagged packets
+ hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U);
+ hw_atl_rpf_vlan_untagged_act_set(self, 1U);
/* Rx Interrupts */
hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
@@ -945,6 +938,142 @@ static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
return aq_hw_err_from_flags(self);
}
+static int hw_atl_b0_hw_fl3l4_clear(struct aq_hw_s *self,
+ struct aq_rx_filter_l3l4 *data)
+{
+ u8 location = data->location;
+
+ if (!data->is_ipv6) {
+ hw_atl_rpfl3l4_cmd_clear(self, location);
+ hw_atl_rpf_l4_spd_set(self, 0U, location);
+ hw_atl_rpf_l4_dpd_set(self, 0U, location);
+ hw_atl_rpfl3l4_ipv4_src_addr_clear(self, location);
+ hw_atl_rpfl3l4_ipv4_dest_addr_clear(self, location);
+ } else {
+ int i;
+
+ for (i = 0; i < HW_ATL_RX_CNT_REG_ADDR_IPV6; ++i) {
+ hw_atl_rpfl3l4_cmd_clear(self, location + i);
+ hw_atl_rpf_l4_spd_set(self, 0U, location + i);
+ hw_atl_rpf_l4_dpd_set(self, 0U, location + i);
+ }
+ hw_atl_rpfl3l4_ipv6_src_addr_clear(self, location);
+ hw_atl_rpfl3l4_ipv6_dest_addr_clear(self, location);
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_fl3l4_set(struct aq_hw_s *self,
+ struct aq_rx_filter_l3l4 *data)
+{
+ u8 location = data->location;
+
+ hw_atl_b0_hw_fl3l4_clear(self, data);
+
+ if (data->cmd) {
+ if (!data->is_ipv6) {
+ hw_atl_rpfl3l4_ipv4_dest_addr_set(self,
+ location,
+ data->ip_dst[0]);
+ hw_atl_rpfl3l4_ipv4_src_addr_set(self,
+ location,
+ data->ip_src[0]);
+ } else {
+ hw_atl_rpfl3l4_ipv6_dest_addr_set(self,
+ location,
+ data->ip_dst);
+ hw_atl_rpfl3l4_ipv6_src_addr_set(self,
+ location,
+ data->ip_src);
+ }
+ }
+ hw_atl_rpf_l4_dpd_set(self, data->p_dst, location);
+ hw_atl_rpf_l4_spd_set(self, data->p_src, location);
+ hw_atl_rpfl3l4_cmd_set(self, location, data->cmd);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_fl2_set(struct aq_hw_s *self,
+ struct aq_rx_filter_l2 *data)
+{
+ hw_atl_rpf_etht_flr_en_set(self, 1U, data->location);
+ hw_atl_rpf_etht_flr_set(self, data->ethertype, data->location);
+ hw_atl_rpf_etht_user_priority_en_set(self,
+ !!data->user_priority_en,
+ data->location);
+ if (data->user_priority_en)
+ hw_atl_rpf_etht_user_priority_set(self,
+ data->user_priority,
+ data->location);
+
+ if (data->queue < 0) {
+ hw_atl_rpf_etht_flr_act_set(self, 0U, data->location);
+ hw_atl_rpf_etht_rx_queue_en_set(self, 0U, data->location);
+ } else {
+ hw_atl_rpf_etht_flr_act_set(self, 1U, data->location);
+ hw_atl_rpf_etht_rx_queue_en_set(self, 1U, data->location);
+ hw_atl_rpf_etht_rx_queue_set(self, data->queue, data->location);
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_fl2_clear(struct aq_hw_s *self,
+ struct aq_rx_filter_l2 *data)
+{
+ hw_atl_rpf_etht_flr_en_set(self, 0U, data->location);
+ hw_atl_rpf_etht_flr_set(self, 0U, data->location);
+ hw_atl_rpf_etht_user_priority_en_set(self, 0U, data->location);
+
+ return aq_hw_err_from_flags(self);
+}
+
+/**
+ * @brief Set VLAN filter table
+ * @details Configure VLAN filter table to accept (and assign the queue) traffic
+ * for the particular vlan ids.
+ * Note: use this function under vlan promisc mode not to lost the traffic
+ *
+ * @param aq_hw_s
+ * @param aq_rx_filter_vlan VLAN filter configuration
+ * @return 0 - OK, <0 - error
+ */
+static int hw_atl_b0_hw_vlan_set(struct aq_hw_s *self,
+ struct aq_rx_filter_vlan *aq_vlans)
+{
+ int i;
+
+ for (i = 0; i < AQ_VLAN_MAX_FILTERS; i++) {
+ hw_atl_rpf_vlan_flr_en_set(self, 0U, i);
+ hw_atl_rpf_vlan_rxq_en_flr_set(self, 0U, i);
+ if (aq_vlans[i].enable) {
+ hw_atl_rpf_vlan_id_flr_set(self,
+ aq_vlans[i].vlan_id,
+ i);
+ hw_atl_rpf_vlan_flr_act_set(self, 1U, i);
+ hw_atl_rpf_vlan_flr_en_set(self, 1U, i);
+ if (aq_vlans[i].queue != 0xFF) {
+ hw_atl_rpf_vlan_rxq_flr_set(self,
+ aq_vlans[i].queue,
+ i);
+ hw_atl_rpf_vlan_rxq_en_flr_set(self, 1U, i);
+ }
+ }
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_vlan_ctrl(struct aq_hw_s *self, bool enable)
+{
+ /* set promisc in case of disabing the vland filter */
+ hw_atl_rpf_vlan_prom_mode_en_set(self, !!!enable);
+
+ return aq_hw_err_from_flags(self);
+}
+
const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_set_mac_address = hw_atl_b0_hw_mac_addr_set,
.hw_init = hw_atl_b0_hw_init,
@@ -969,6 +1098,11 @@ const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_ring_rx_init = hw_atl_b0_hw_ring_rx_init,
.hw_ring_tx_init = hw_atl_b0_hw_ring_tx_init,
.hw_packet_filter_set = hw_atl_b0_hw_packet_filter_set,
+ .hw_filter_l2_set = hw_atl_b0_hw_fl2_set,
+ .hw_filter_l2_clear = hw_atl_b0_hw_fl2_clear,
+ .hw_filter_l3l4_set = hw_atl_b0_hw_fl3l4_set,
+ .hw_filter_vlan_set = hw_atl_b0_hw_vlan_set,
+ .hw_filter_vlan_ctrl = hw_atl_b0_hw_vlan_ctrl,
.hw_multicast_list_set = hw_atl_b0_hw_multicast_list_set,
.hw_interrupt_moderation_set = hw_atl_b0_hw_interrupt_moderation_set,
.hw_rss_set = hw_atl_b0_hw_rss_set,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
index 5502ec5f0f69..939f77e2e117 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -898,6 +898,24 @@ void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr,
vlan_id_flr);
}
+void hw_atl_rpf_vlan_rxq_en_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq_en,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_RXQ_EN_F_ADR(filter),
+ HW_ATL_RPF_VL_RXQ_EN_F_MSK,
+ HW_ATL_RPF_VL_RXQ_EN_F_SHIFT,
+ vlan_rxq_en);
+}
+
+void hw_atl_rpf_vlan_rxq_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_RXQ_F_ADR(filter),
+ HW_ATL_RPF_VL_RXQ_F_MSK,
+ HW_ATL_RPF_VL_RXQ_F_SHIFT,
+ vlan_rxq);
+};
+
void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en,
u32 filter)
{
@@ -965,6 +983,20 @@ void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter)
HW_ATL_RPF_ET_VALF_SHIFT, etht_flr);
}
+void hw_atl_rpf_l4_spd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L4_SPD_ADR(filter),
+ HW_ATL_RPF_L4_SPD_MSK,
+ HW_ATL_RPF_L4_SPD_SHIFT, val);
+}
+
+void hw_atl_rpf_l4_dpd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L4_DPD_ADR(filter),
+ HW_ATL_RPF_L4_DPD_MSK,
+ HW_ATL_RPF_L4_DPD_SHIFT, val);
+}
+
/* RPO: rx packet offload */
void hw_atl_rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
u32 ipv4header_crc_offload_en)
@@ -1476,3 +1508,80 @@ void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr)
HW_ATL_MCP_UP_FORCE_INTERRUPT_SHIFT,
up_force_intr);
}
+
+void hw_atl_rpfl3l4_ipv4_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_DSTA_ADR(location), 0U);
+}
+
+void hw_atl_rpfl3l4_ipv4_src_addr_clear(struct aq_hw_s *aq_hw, u8 location)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_SRCA_ADR(location), 0U);
+}
+
+void hw_atl_rpfl3l4_cmd_clear(struct aq_hw_s *aq_hw, u8 location)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_REG_CTRL_ADR(location), 0U);
+}
+
+void hw_atl_rpfl3l4_ipv6_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location)
+{
+ int i;
+
+ for (i = 0; i < 4; ++i)
+ aq_hw_write_reg(aq_hw,
+ HW_ATL_RPF_L3_DSTA_ADR(location + i),
+ 0U);
+}
+
+void hw_atl_rpfl3l4_ipv6_src_addr_clear(struct aq_hw_s *aq_hw, u8 location)
+{
+ int i;
+
+ for (i = 0; i < 4; ++i)
+ aq_hw_write_reg(aq_hw,
+ HW_ATL_RPF_L3_SRCA_ADR(location + i),
+ 0U);
+}
+
+void hw_atl_rpfl3l4_ipv4_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 ipv4_dest)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_DSTA_ADR(location),
+ ipv4_dest);
+}
+
+void hw_atl_rpfl3l4_ipv4_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 ipv4_src)
+{
+ aq_hw_write_reg(aq_hw,
+ HW_ATL_RPF_L3_SRCA_ADR(location),
+ ipv4_src);
+}
+
+void hw_atl_rpfl3l4_cmd_set(struct aq_hw_s *aq_hw, u8 location, u32 cmd)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_REG_CTRL_ADR(location), cmd);
+}
+
+void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 *ipv6_src)
+{
+ int i;
+
+ for (i = 0; i < 4; ++i)
+ aq_hw_write_reg(aq_hw,
+ HW_ATL_RPF_L3_SRCA_ADR(location + i),
+ ipv6_src[i]);
+}
+
+void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 *ipv6_dest)
+{
+ int i;
+
+ for (i = 0; i < 4; ++i)
+ aq_hw_write_reg(aq_hw,
+ HW_ATL_RPF_L3_DSTA_ADR(location + i),
+ ipv6_dest[i]);
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
index 41f239928c15..03c570d115fe 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
@@ -441,6 +441,14 @@ void hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act,
void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr,
u32 filter);
+/* Set VLAN RX queue assignment enable */
+void hw_atl_rpf_vlan_rxq_en_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq_en,
+ u32 filter);
+
+/* Set VLAN RX queue */
+void hw_atl_rpf_vlan_rxq_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq,
+ u32 filter);
+
/* set ethertype filter enable */
void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en,
u32 filter);
@@ -475,6 +483,12 @@ void hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
/* set ethertype filter */
void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter);
+/* set L4 source port */
+void hw_atl_rpf_l4_spd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter);
+
+/* set L4 destination port */
+void hw_atl_rpf_l4_dpd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter);
+
/* rpo */
/* set ipv4 header checksum offload enable */
@@ -704,4 +718,38 @@ void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis);
/* set uP Force Interrupt */
void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr);
+/* clear ipv4 filter destination address */
+void hw_atl_rpfl3l4_ipv4_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location);
+
+/* clear ipv4 filter source address */
+void hw_atl_rpfl3l4_ipv4_src_addr_clear(struct aq_hw_s *aq_hw, u8 location);
+
+/* clear command for filter l3-l4 */
+void hw_atl_rpfl3l4_cmd_clear(struct aq_hw_s *aq_hw, u8 location);
+
+/* clear ipv6 filter destination address */
+void hw_atl_rpfl3l4_ipv6_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location);
+
+/* clear ipv6 filter source address */
+void hw_atl_rpfl3l4_ipv6_src_addr_clear(struct aq_hw_s *aq_hw, u8 location);
+
+/* set ipv4 filter destination address */
+void hw_atl_rpfl3l4_ipv4_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 ipv4_dest);
+
+/* set ipv4 filter source address */
+void hw_atl_rpfl3l4_ipv4_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 ipv4_src);
+
+/* set command for filter l3-l4 */
+void hw_atl_rpfl3l4_cmd_set(struct aq_hw_s *aq_hw, u8 location, u32 cmd);
+
+/* set ipv6 filter source address */
+void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 *ipv6_src);
+
+/* set ipv6 filter destination address */
+void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 *ipv6_dest);
+
#endif /* HW_ATL_LLH_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index a715fa317b1c..8470d92db812 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -1092,24 +1092,43 @@
/* Default value of bitfield vl_id{F}[B:0] */
#define HW_ATL_RPF_VL_ID_F_DEFAULT 0x0
-/* RX et_en{F} Bitfield Definitions
- * Preprocessor definitions for the bitfield "et_en{F}".
+/* RX vl_rxq_en{F} Bitfield Definitions
+ * Preprocessor definitions for the bitfield "vl_rxq{F}".
* Parameter: filter {F} | stride size 0x4 | range [0, 15]
- * PORT="pif_rpf_et_en_i[0]"
- */
-
-/* Register address for bitfield et_en{F} */
-#define HW_ATL_RPF_ET_EN_F_ADR(filter) (0x00005300 + (filter) * 0x4)
-/* Bitmask for bitfield et_en{F} */
-#define HW_ATL_RPF_ET_EN_F_MSK 0x80000000
-/* Inverted bitmask for bitfield et_en{F} */
-#define HW_ATL_RPF_ET_EN_F_MSKN 0x7FFFFFFF
-/* Lower bit position of bitfield et_en{F} */
-#define HW_ATL_RPF_ET_EN_F_SHIFT 31
-/* Width of bitfield et_en{F} */
-#define HW_ATL_RPF_ET_EN_F_WIDTH 1
-/* Default value of bitfield et_en{F} */
-#define HW_ATL_RPF_ET_EN_F_DEFAULT 0x0
+ * PORT="pif_rpf_vl_rxq_en_i"
+ */
+
+/* Register address for bitfield vl_rxq_en{F} */
+#define HW_ATL_RPF_VL_RXQ_EN_F_ADR(filter) (0x00005290 + (filter) * 0x4)
+/* Bitmask for bitfield vl_rxq_en{F} */
+#define HW_ATL_RPF_VL_RXQ_EN_F_MSK 0x10000000
+/* Inverted bitmask for bitfield vl_rxq_en{F}[ */
+#define HW_ATL_RPF_VL_RXQ_EN_F_MSKN 0xEFFFFFFF
+/* Lower bit position of bitfield vl_rxq_en{F} */
+#define HW_ATL_RPF_VL_RXQ_EN_F_SHIFT 28
+/* Width of bitfield vl_rxq_en{F} */
+#define HW_ATL_RPF_VL_RXQ_EN_F_WIDTH 1
+/* Default value of bitfield vl_rxq_en{F} */
+#define HW_ATL_RPF_VL_RXQ_EN_F_DEFAULT 0x0
+
+/* RX vl_rxq{F}[4:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "vl_rxq{F}[4:0]".
+ * Parameter: filter {F} | stride size 0x4 | range [0, 15]
+ * PORT="pif_rpf_vl_rxq0_i[4:0]"
+ */
+
+/* Register address for bitfield vl_rxq{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_ADR(filter) (0x00005290 + (filter) * 0x4)
+/* Bitmask for bitfield vl_rxq{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_MSK 0x01F00000
+/* Inverted bitmask for bitfield vl_rxq{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_MSKN 0xFE0FFFFF
+/* Lower bit position of bitfield vl_rxq{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_SHIFT 20
+/* Width of bitfield vl_rxw{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_WIDTH 5
+/* Default value of bitfield vl_rxq{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_DEFAULT 0x0
/* rx et_en{f} bitfield definitions
* preprocessor definitions for the bitfield "et_en{f}".
@@ -1263,6 +1282,44 @@
/* default value of bitfield et_val{f}[f:0] */
#define HW_ATL_RPF_ET_VALF_DEFAULT 0x0
+/* RX l4_sp{D}[F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "l4_sp{D}[F:0]".
+ * Parameter: srcport {D} | stride size 0x4 | range [0, 7]
+ * PORT="pif_rpf_l4_sp0_i[15:0]"
+ */
+
+/* Register address for bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_ADR(srcport) (0x00005400u + (srcport) * 0x4)
+/* Bitmask for bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_MSK 0x0000FFFFu
+/* Inverted bitmask for bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_MSKN 0xFFFF0000u
+/* Lower bit position of bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_SHIFT 0
+/* Width of bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_WIDTH 16
+/* Default value of bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_DEFAULT 0x0
+
+/* RX l4_dp{D}[F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "l4_dp{D}[F:0]".
+ * Parameter: destport {D} | stride size 0x4 | range [0, 7]
+ * PORT="pif_rpf_l4_dp0_i[15:0]"
+ */
+
+/* Register address for bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_ADR(destport) (0x00005420u + (destport) * 0x4)
+/* Bitmask for bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_MSK 0x0000FFFFu
+/* Inverted bitmask for bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_MSKN 0xFFFF0000u
+/* Lower bit position of bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_SHIFT 0
+/* Width of bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_WIDTH 16
+/* Default value of bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_DEFAULT 0x0
+
/* rx ipv4_chk_en bitfield definitions
* preprocessor definitions for the bitfield "ipv4_chk_en".
* port="pif_rpo_ipv4_chk_en_i"
@@ -2418,4 +2475,48 @@
/* default value of bitfield uP Force Interrupt */
#define HW_ATL_MCP_UP_FORCE_INTERRUPT_DEFAULT 0x0
+#define HW_ATL_RX_CTRL_ADDR_BEGIN_FL3L4 0x00005380
+#define HW_ATL_RX_SRCA_ADDR_BEGIN_FL3L4 0x000053B0
+#define HW_ATL_RX_DESTA_ADDR_BEGIN_FL3L4 0x000053D0
+
+#define HW_ATL_RPF_L3_REG_CTRL_ADR(location) (0x00005380 + (location) * 0x4)
+
+/* RX rpf_l3_sa{D}[1F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "l3_sa{D}[1F:0]".
+ * Parameter: location {D} | stride size 0x4 | range [0, 7]
+ * PORT="pif_rpf_l3_sa0_i[31:0]"
+ */
+
+/* Register address for bitfield pif_rpf_l3_sa0_i[31:0] */
+#define HW_ATL_RPF_L3_SRCA_ADR(location) (0x000053B0 + (location) * 0x4)
+/* Bitmask for bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_MSK 0xFFFFFFFFu
+/* Inverted bitmask for bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_MSKN 0xFFFFFFFFu
+/* Lower bit position of bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_SHIFT 0
+/* Width of bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_WIDTH 32
+/* Default value of bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_DEFAULT 0x0
+
+/* RX rpf_l3_da{D}[1F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "l3_da{D}[1F:0]".
+ * Parameter: location {D} | stride size 0x4 | range [0, 7]
+ * PORT="pif_rpf_l3_da0_i[31:0]"
+ */
+
+ /* Register address for bitfield pif_rpf_l3_da0_i[31:0] */
+#define HW_ATL_RPF_L3_DSTA_ADR(location) (0x000053B0 + (location) * 0x4)
+/* Bitmask for bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_MSK 0xFFFFFFFFu
+/* Inverted bitmask for bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_MSKN 0xFFFFFFFFu
+/* Lower bit position of bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_SHIFT 0
+/* Width of bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_WIDTH 32
+/* Default value of bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_DEFAULT 0x0
+
#endif /* HW_ATL_LLH_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index 3613fca64b58..48278e333462 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -240,6 +240,64 @@ struct __packed offload_info {
u8 buf[0];
};
+enum hw_atl_rx_action_with_traffic {
+ HW_ATL_RX_DISCARD,
+ HW_ATL_RX_HOST,
+};
+
+struct aq_rx_filter_vlan {
+ u8 enable;
+ u8 location;
+ u16 vlan_id;
+ u8 queue;
+};
+
+struct aq_rx_filter_l2 {
+ s8 queue;
+ u8 location;
+ u8 user_priority_en;
+ u8 user_priority;
+ u16 ethertype;
+};
+
+struct aq_rx_filter_l3l4 {
+ u32 cmd;
+ u8 location;
+ u32 ip_dst[4];
+ u32 ip_src[4];
+ u16 p_dst;
+ u16 p_src;
+ u8 is_ipv6;
+};
+
+enum hw_atl_rx_protocol_value_l3l4 {
+ HW_ATL_RX_TCP,
+ HW_ATL_RX_UDP,
+ HW_ATL_RX_SCTP,
+ HW_ATL_RX_ICMP
+};
+
+enum hw_atl_rx_ctrl_registers_l3l4 {
+ HW_ATL_RX_ENABLE_MNGMNT_QUEUE_L3L4 = BIT(22),
+ HW_ATL_RX_ENABLE_QUEUE_L3L4 = BIT(23),
+ HW_ATL_RX_ENABLE_ARP_FLTR_L3 = BIT(24),
+ HW_ATL_RX_ENABLE_CMP_PROT_L4 = BIT(25),
+ HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4 = BIT(26),
+ HW_ATL_RX_ENABLE_CMP_SRC_PORT_L4 = BIT(27),
+ HW_ATL_RX_ENABLE_CMP_DEST_ADDR_L3 = BIT(28),
+ HW_ATL_RX_ENABLE_CMP_SRC_ADDR_L3 = BIT(29),
+ HW_ATL_RX_ENABLE_L3_IPV6 = BIT(30),
+ HW_ATL_RX_ENABLE_FLTR_L3L4 = BIT(31)
+};
+
+#define HW_ATL_RX_QUEUE_FL3L4_SHIFT 8U
+#define HW_ATL_RX_ACTION_FL3F4_SHIFT 16U
+
+#define HW_ATL_RX_CNT_REG_ADDR_IPV6 4U
+
+#define HW_ATL_GET_REG_LOCATION_FL3L4(location) \
+ ((location) - AQ_RX_FIRST_LOC_FL3L4)
+
#define HAL_ATLANTIC_UTILS_CHIP_MIPS 0x00000001U
#define HAL_ATLANTIC_UTILS_CHIP_TPO2 0x00000002U
#define HAL_ATLANTIC_UTILS_CHIP_RPF2 0x00000004U
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index f69d39f17bdd..1746172ffbf7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -52,6 +52,7 @@
#define HNAE3_UNIC_CLIENT_INITED_B 0x4
#define HNAE3_ROCE_CLIENT_INITED_B 0x5
#define HNAE3_DEV_SUPPORT_FD_B 0x6
+#define HNAE3_DEV_SUPPORT_GRO_B 0x7
#define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\
BIT(HNAE3_DEV_SUPPORT_ROCE_B))
@@ -65,6 +66,9 @@
#define hnae3_dev_fd_supported(hdev) \
hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B)
+#define hnae3_dev_gro_supported(hdev) \
+ hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B)
+
#define ring_ptr_move_fw(ring, p) \
((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
#define ring_ptr_move_bw(ring, p) \
@@ -301,6 +305,8 @@ struct hnae3_ae_dev {
* Set vlan filter config of vf
* enable_hw_strip_rxvtag()
* Enable/disable hardware strip vlan tag of packets received
+ * set_gro_en
+ * Enable/disable HW GRO
*/
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
@@ -445,6 +451,7 @@ struct hnae3_ae_ops {
bool (*get_hw_reset_stat)(struct hnae3_handle *handle);
bool (*ae_dev_resetting)(struct hnae3_handle *handle);
unsigned long (*ae_dev_reset_cnt)(struct hnae3_handle *handle);
+ int (*set_gro_en)(struct hnae3_handle *handle, int enable);
};
struct hnae3_dcb_ops {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 8d07ec668d5d..22220af92aa9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -15,6 +15,7 @@
#include <linux/vermagic.h>
#include <net/gre.h>
#include <net/pkt_cls.h>
+#include <net/tcp.h>
#include <net/vxlan.h>
#include "hnae3.h"
@@ -1345,6 +1346,15 @@ static int hns3_nic_set_features(struct net_device *netdev,
priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
}
+ if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) {
+ if (features & NETIF_F_GRO_HW)
+ ret = h->ae_algo->ops->set_gro_en(h, true);
+ else
+ ret = h->ae_algo->ops->set_gro_en(h, false);
+ if (ret)
+ return ret;
+ }
+
if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
h->ae_algo->ops->enable_vlan_filter) {
if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
@@ -1714,8 +1724,10 @@ static void hns3_disable_sriov(struct pci_dev *pdev)
static void hns3_get_dev_capability(struct pci_dev *pdev,
struct hnae3_ae_dev *ae_dev)
{
- if (pdev->revision >= 0x21)
+ if (pdev->revision >= 0x21) {
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1);
+ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B, 1);
+ }
}
/* hns3_probe - Device initialization routine
@@ -1927,7 +1939,9 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
if (pdev->revision >= 0x21) {
- netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_GRO_HW;
+ netdev->features |= NETIF_F_GRO_HW;
if (!(h->flags & HNAE3_SUPPORT_VF)) {
netdev->hw_features |= NETIF_F_NTUPLE;
@@ -2305,6 +2319,12 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
if (!(netdev->features & NETIF_F_RXCSUM))
return;
+ /* We MUST enable hardware checksum before enabling hardware GRO */
+ if (skb_shinfo(skb)->gso_size) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ return;
+ }
+
/* check if hardware has done checksum */
if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
return;
@@ -2348,6 +2368,9 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
{
+ if (skb_has_frag_list(skb))
+ napi_gro_flush(&ring->tqp_vector->napi, false);
+
napi_gro_receive(&ring->tqp_vector->napi, skb);
}
@@ -2381,6 +2404,153 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
}
}
+static int hns3_alloc_skb(struct hns3_enet_ring *ring, int length,
+ unsigned char *va)
+{
+#define HNS3_NEED_ADD_FRAG 1
+ struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
+ struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
+ struct sk_buff *skb;
+
+ ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE);
+ skb = ring->skb;
+ if (unlikely(!skb)) {
+ netdev_err(netdev, "alloc rx skb fail\n");
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.sw_err_cnt++;
+ u64_stats_update_end(&ring->syncp);
+
+ return -ENOMEM;
+ }
+
+ prefetchw(skb->data);
+
+ ring->pending_buf = 1;
+ ring->frag_num = 0;
+ ring->tail_skb = NULL;
+ if (length <= HNS3_RX_HEAD_SIZE) {
+ memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
+
+ /* We can reuse buffer as-is, just make sure it is local */
+ if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
+ desc_cb->reuse_flag = 1;
+ else /* This page cannot be reused so discard it */
+ put_page(desc_cb->priv);
+
+ ring_ptr_move_fw(ring, next_to_clean);
+ return 0;
+ }
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.seg_pkt_cnt++;
+ u64_stats_update_end(&ring->syncp);
+
+ ring->pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE);
+ __skb_put(skb, ring->pull_len);
+ hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len,
+ desc_cb);
+ ring_ptr_move_fw(ring, next_to_clean);
+
+ return HNS3_NEED_ADD_FRAG;
+}
+
+static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
+ struct sk_buff **out_skb, bool pending)
+{
+ struct sk_buff *skb = *out_skb;
+ struct sk_buff *head_skb = *out_skb;
+ struct sk_buff *new_skb;
+ struct hns3_desc_cb *desc_cb;
+ struct hns3_desc *pre_desc;
+ u32 bd_base_info;
+ int pre_bd;
+
+ /* if there is pending bd, the SW param next_to_clean has moved
+ * to next and the next is NULL
+ */
+ if (pending) {
+ pre_bd = (ring->next_to_clean - 1 + ring->desc_num) %
+ ring->desc_num;
+ pre_desc = &ring->desc[pre_bd];
+ bd_base_info = le32_to_cpu(pre_desc->rx.bd_base_info);
+ } else {
+ bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
+ }
+
+ while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
+ desc = &ring->desc[ring->next_to_clean];
+ desc_cb = &ring->desc_cb[ring->next_to_clean];
+ bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
+ if (!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))
+ return -ENXIO;
+
+ if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) {
+ new_skb = napi_alloc_skb(&ring->tqp_vector->napi,
+ HNS3_RX_HEAD_SIZE);
+ if (unlikely(!new_skb)) {
+ netdev_err(ring->tqp->handle->kinfo.netdev,
+ "alloc rx skb frag fail\n");
+ return -ENXIO;
+ }
+ ring->frag_num = 0;
+
+ if (ring->tail_skb) {
+ ring->tail_skb->next = new_skb;
+ ring->tail_skb = new_skb;
+ } else {
+ skb_shinfo(skb)->frag_list = new_skb;
+ ring->tail_skb = new_skb;
+ }
+ }
+
+ if (ring->tail_skb) {
+ head_skb->truesize += hnae3_buf_size(ring);
+ head_skb->data_len += le16_to_cpu(desc->rx.size);
+ head_skb->len += le16_to_cpu(desc->rx.size);
+ skb = ring->tail_skb;
+ }
+
+ hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb);
+ ring_ptr_move_fw(ring, next_to_clean);
+ ring->pending_buf++;
+ }
+
+ return 0;
+}
+
+static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info,
+ u32 bd_base_info)
+{
+ u16 gro_count;
+ u32 l3_type;
+
+ gro_count = hnae3_get_field(l234info, HNS3_RXD_GRO_COUNT_M,
+ HNS3_RXD_GRO_COUNT_S);
+ /* if there is no HW GRO, do not set gro params */
+ if (!gro_count)
+ return;
+
+ /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
+ * to skb_shinfo(skb)->gso_segs
+ */
+ NAPI_GRO_CB(skb)->count = gro_count;
+
+ l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
+ HNS3_RXD_L3ID_S);
+ if (l3_type == HNS3_L3_TYPE_IPV4)
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ else if (l3_type == HNS3_L3_TYPE_IPV6)
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ else
+ return;
+
+ skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
+ HNS3_RXD_GRO_SIZE_M,
+ HNS3_RXD_GRO_SIZE_S);
+ if (skb_shinfo(skb)->gso_size)
+ tcp_gro_complete(skb);
+}
+
static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
struct sk_buff *skb)
{
@@ -2397,18 +2567,16 @@ static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
}
static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
- struct sk_buff **out_skb, int *out_bnum)
+ struct sk_buff **out_skb)
{
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
+ struct sk_buff *skb = ring->skb;
struct hns3_desc_cb *desc_cb;
struct hns3_desc *desc;
- struct sk_buff *skb;
- unsigned char *va;
u32 bd_base_info;
- int pull_len;
u32 l234info;
int length;
- int bnum;
+ int ret;
desc = &ring->desc[ring->next_to_clean];
desc_cb = &ring->desc_cb[ring->next_to_clean];
@@ -2420,9 +2588,10 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
/* Check valid BD */
if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B)))
- return -EFAULT;
+ return -ENXIO;
- va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
+ if (!skb)
+ ring->va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
/* Prefetch first cache line of first page
* Idea is to cache few bytes of the header of the packet. Our L1 Cache
@@ -2431,62 +2600,42 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
* lines. In such a case, single fetch would suffice to cache in the
* relevant part of the header.
*/
- prefetch(va);
+ prefetch(ring->va);
#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
+ prefetch(ring->va + L1_CACHE_BYTES);
#endif
- skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi,
- HNS3_RX_HEAD_SIZE);
- if (unlikely(!skb)) {
- netdev_err(netdev, "alloc rx skb fail\n");
-
- u64_stats_update_begin(&ring->syncp);
- ring->stats.sw_err_cnt++;
- u64_stats_update_end(&ring->syncp);
-
- return -ENOMEM;
- }
-
- prefetchw(skb->data);
-
- bnum = 1;
- if (length <= HNS3_RX_HEAD_SIZE) {
- memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
+ if (!skb) {
+ ret = hns3_alloc_skb(ring, length, ring->va);
+ *out_skb = skb = ring->skb;
- /* We can reuse buffer as-is, just make sure it is local */
- if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
- desc_cb->reuse_flag = 1;
- else /* This page cannot be reused so discard it */
- put_page(desc_cb->priv);
+ if (ret < 0) /* alloc buffer fail */
+ return ret;
+ if (ret > 0) { /* need add frag */
+ ret = hns3_add_frag(ring, desc, &skb, false);
+ if (ret)
+ return ret;
- ring_ptr_move_fw(ring, next_to_clean);
+ /* As the head data may be changed when GRO enable, copy
+ * the head data in after other data rx completed
+ */
+ memcpy(skb->data, ring->va,
+ ALIGN(ring->pull_len, sizeof(long)));
+ }
} else {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.seg_pkt_cnt++;
- u64_stats_update_end(&ring->syncp);
-
- pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE);
-
- memcpy(__skb_put(skb, pull_len), va,
- ALIGN(pull_len, sizeof(long)));
-
- hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
- ring_ptr_move_fw(ring, next_to_clean);
+ ret = hns3_add_frag(ring, desc, &skb, true);
+ if (ret)
+ return ret;
- while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
- desc = &ring->desc[ring->next_to_clean];
- desc_cb = &ring->desc_cb[ring->next_to_clean];
- bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
- hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb);
- ring_ptr_move_fw(ring, next_to_clean);
- bnum++;
- }
+ /* As the head data may be changed when GRO enable, copy
+ * the head data in after other data rx completed
+ */
+ memcpy(skb->data, ring->va,
+ ALIGN(ring->pull_len, sizeof(long)));
}
- *out_bnum = bnum;
-
l234info = le32_to_cpu(desc->rx.l234_info);
+ bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
/* Based on hw strategy, the tag offloaded will be stored at
* ot_vlan_tag in two layer tag case, and stored at vlan_tag
@@ -2536,7 +2685,11 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
ring->tqp_vector->rx_group.total_bytes += skb->len;
+ /* This is needed in order to enable forwarding support */
+ hns3_set_gro_param(skb, l234info, bd_base_info);
+
hns3_rx_checksum(ring, skb, desc);
+ *out_skb = skb;
hns3_set_rx_skb_rss_type(ring, skb);
return 0;
@@ -2549,9 +2702,9 @@ int hns3_clean_rx_ring(
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
int recv_pkts, recv_bds, clean_count, err;
- int unused_count = hns3_desc_unused(ring);
- struct sk_buff *skb = NULL;
- int num, bnum = 0;
+ int unused_count = hns3_desc_unused(ring) - ring->pending_buf;
+ struct sk_buff *skb = ring->skb;
+ int num;
num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
rmb(); /* Make sure num taken effect before the other data is touched */
@@ -2565,24 +2718,32 @@ int hns3_clean_rx_ring(
hns3_nic_alloc_rx_buffers(ring,
clean_count + unused_count);
clean_count = 0;
- unused_count = hns3_desc_unused(ring);
+ unused_count = hns3_desc_unused(ring) -
+ ring->pending_buf;
}
/* Poll one pkt */
- err = hns3_handle_rx_bd(ring, &skb, &bnum);
+ err = hns3_handle_rx_bd(ring, &skb);
if (unlikely(!skb)) /* This fault cannot be repaired */
goto out;
- recv_bds += bnum;
- clean_count += bnum;
- if (unlikely(err)) { /* Do jump the err */
- recv_pkts++;
+ if (err == -ENXIO) { /* Do not get FE for the packet */
+ goto out;
+ } else if (unlikely(err)) { /* Do jump the err */
+ recv_bds += ring->pending_buf;
+ clean_count += ring->pending_buf;
+ ring->skb = NULL;
+ ring->pending_buf = 0;
continue;
}
/* Do update ip stack process */
skb->protocol = eth_type_trans(skb, netdev);
rx_fn(ring, skb);
+ recv_bds += ring->pending_buf;
+ clean_count += ring->pending_buf;
+ ring->skb = NULL;
+ ring->pending_buf = 0;
recv_pkts++;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 10ff18af3cc7..3365c9596983 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -109,6 +109,10 @@ enum hns3_nic_state {
#define HNS3_RXD_DOI_B 21
#define HNS3_RXD_OL3E_B 22
#define HNS3_RXD_OL4E_B 23
+#define HNS3_RXD_GRO_COUNT_S 24
+#define HNS3_RXD_GRO_COUNT_M (0x3f << HNS3_RXD_GRO_COUNT_S)
+#define HNS3_RXD_GRO_FIXID_B 30
+#define HNS3_RXD_GRO_ECN_B 31
#define HNS3_RXD_ODMAC_S 0
#define HNS3_RXD_ODMAC_M (0x3 << HNS3_RXD_ODMAC_S)
@@ -135,9 +139,8 @@ enum hns3_nic_state {
#define HNS3_RXD_TSIND_S 12
#define HNS3_RXD_TSIND_M (0x7 << HNS3_RXD_TSIND_S)
#define HNS3_RXD_LKBK_B 15
-#define HNS3_RXD_HDL_S 16
-#define HNS3_RXD_HDL_M (0x7ff << HNS3_RXD_HDL_S)
-#define HNS3_RXD_HSIND_B 31
+#define HNS3_RXD_GRO_SIZE_S 16
+#define HNS3_RXD_GRO_SIZE_M (0x3ff << HNS3_RXD_GRO_SIZE_S)
#define HNS3_TXD_L3T_S 0
#define HNS3_TXD_L3T_M (0x3 << HNS3_TXD_L3T_S)
@@ -401,11 +404,19 @@ struct hns3_enet_ring {
*/
int next_to_clean;
+ int pull_len; /* head length for current packet */
+ u32 frag_num;
+ unsigned char *va; /* first buffer address for current packet */
+
u32 flag; /* ring attribute */
int irq_init_flag;
int numa_node;
cpumask_t affinity_mask;
+
+ int pending_buf;
+ struct sk_buff *skb;
+ struct sk_buff *tail_skb;
};
struct hns_queue;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 872cd4bdd70d..aef044d08b11 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -152,6 +152,7 @@ enum hclge_opcode_type {
/* TSO command */
HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01,
+ HCLGE_OPC_GRO_GENERIC_CONFIG = 0x0C10,
/* RSS commands */
HCLGE_OPC_RSS_GENERIC_CONFIG = 0x0D01,
@@ -758,6 +759,12 @@ struct hclge_cfg_tso_status_cmd {
u8 rsv[20];
};
+#define HCLGE_GRO_EN_B 0
+struct hclge_cfg_gro_status_cmd {
+ __le16 gro_en;
+ u8 rsv[22];
+};
+
#define HCLGE_TSO_MSS_MIN 256
#define HCLGE_TSO_MSS_MAX 9668
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 43bfc730a62d..eb6b6a5f744f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -921,6 +921,28 @@ static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
+static int hclge_config_gro(struct hclge_dev *hdev, bool en)
+{
+ struct hclge_cfg_gro_status_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ if (!hnae3_dev_gro_supported(hdev))
+ return 0;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
+ req = (struct hclge_cfg_gro_status_cmd *)desc.data;
+
+ req->gro_en = cpu_to_le16(en ? 1 : 0);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "GRO hardware config cmd failed, ret = %d\n", ret);
+
+ return ret;
+}
+
static int hclge_alloc_tqps(struct hclge_dev *hdev)
{
struct hclge_tqp *tqp;
@@ -7090,6 +7112,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_mdiobus_unreg;
}
+ ret = hclge_config_gro(hdev, true);
+ if (ret)
+ goto err_mdiobus_unreg;
+
ret = hclge_init_vlan_config(hdev);
if (ret) {
dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
@@ -7221,6 +7247,10 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
+ ret = hclge_config_gro(hdev, true);
+ if (ret)
+ return ret;
+
ret = hclge_init_vlan_config(hdev);
if (ret) {
dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
@@ -7637,6 +7667,14 @@ static void hclge_get_link_mode(struct hnae3_handle *handle,
}
}
+static int hclge_gro_en(struct hnae3_handle *handle, int enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return hclge_config_gro(hdev, enable);
+}
+
static const struct hnae3_ae_ops hclge_ops = {
.init_ae_dev = hclge_init_ae_dev,
.uninit_ae_dev = hclge_uninit_ae_dev,
@@ -7708,6 +7746,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_hw_reset_stat = hclge_get_hw_reset_stat,
.ae_dev_resetting = hclge_ae_dev_resetting,
.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
+ .set_gro_en = hclge_gro_en,
};
static struct hnae3_ae_algo ae_algo = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
index 090541de0c7d..47030b42341f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -87,6 +87,8 @@ enum hclgevf_opcode_type {
HCLGEVF_OPC_QUERY_TX_STATUS = 0x0B03,
HCLGEVF_OPC_QUERY_RX_STATUS = 0x0B13,
HCLGEVF_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
+ /* GRO command */
+ HCLGEVF_OPC_GRO_GENERIC_CONFIG = 0x0C10,
/* RSS cmd */
HCLGEVF_OPC_RSS_GENERIC_CONFIG = 0x0D01,
HCLGEVF_OPC_RSS_INPUT_TUPLE = 0x0D02,
@@ -149,6 +151,12 @@ struct hclgevf_query_res_cmd {
__le16 rsv[7];
};
+#define HCLGEVF_GRO_EN_B 0
+struct hclgevf_cfg_gro_status_cmd {
+ __le16 gro_en;
+ u8 rsv[22];
+};
+
#define HCLGEVF_RSS_DEFAULT_OUTPORT_B 4
#define HCLGEVF_RSS_HASH_KEY_OFFSET_B 4
#define HCLGEVF_RSS_HASH_KEY_NUM 16
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 6b4d1477055f..e27d6fa85a74 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -1655,6 +1655,29 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
return 0;
}
+static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en)
+{
+ struct hclgevf_cfg_gro_status_cmd *req;
+ struct hclgevf_desc desc;
+ int ret;
+
+ if (!hnae3_dev_gro_supported(hdev))
+ return 0;
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG,
+ false);
+ req = (struct hclgevf_cfg_gro_status_cmd *)desc.data;
+
+ req->gro_en = cpu_to_le16(en ? 1 : 0);
+
+ ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "VF GRO hardware config cmd failed, ret = %d.\n", ret);
+
+ return ret;
+}
+
static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
{
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
@@ -2122,6 +2145,10 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
return ret;
}
+ ret = hclgevf_config_gro(hdev, true);
+ if (ret)
+ return ret;
+
ret = hclgevf_init_vlan_config(hdev);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -2199,6 +2226,10 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
}
+ ret = hclgevf_config_gro(hdev, true);
+ if (ret)
+ goto err_config;
+
/* Initialize RSS for this VF */
ret = hclgevf_rss_init_hw(hdev);
if (ret) {
@@ -2337,6 +2368,13 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
hdev->hw.mac.duplex = duplex;
}
+static int hclgevf_gro_en(struct hnae3_handle *handle, int enable)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hclgevf_config_gro(hdev, enable);
+}
+
static void hclgevf_get_media_type(struct hnae3_handle *handle,
u8 *media_type)
{
@@ -2411,6 +2449,7 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.get_hw_reset_stat = hclgevf_get_hw_reset_stat,
.ae_dev_resetting = hclgevf_ae_dev_resetting,
.ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt,
+ .set_gro_en = hclgevf_gro_en,
};
static struct hnae3_ae_algo ae_algovf = {
diff --git a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
index 3c661f422688..1629b07f727b 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
@@ -50,56 +50,37 @@ nfp_abm_ctrl_stat(struct nfp_abm_link *alink, const struct nfp_rtsym *sym,
return 0;
}
-static int
-nfp_abm_ctrl_stat_all(struct nfp_abm_link *alink, const struct nfp_rtsym *sym,
- unsigned int stride, unsigned int offset, bool is_u64,
- u64 *res)
-{
- u64 val, sum = 0;
- unsigned int i;
- int err;
-
- for (i = 0; i < alink->vnic->max_rx_rings; i++) {
- err = nfp_abm_ctrl_stat(alink, sym, stride, offset, i,
- is_u64, &val);
- if (err)
- return err;
- sum += val;
- }
-
- *res = sum;
- return 0;
-}
-
-int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int i, u32 val)
+int __nfp_abm_ctrl_set_q_lvl(struct nfp_abm *abm, unsigned int id, u32 val)
{
- struct nfp_cpp *cpp = alink->abm->app->cpp;
+ struct nfp_cpp *cpp = abm->app->cpp;
u64 sym_offset;
int err;
- sym_offset = (alink->queue_base + i) * NFP_QLVL_STRIDE + NFP_QLVL_THRS;
- err = __nfp_rtsym_writel(cpp, alink->abm->q_lvls, 4, 0,
- sym_offset, val);
+ __clear_bit(id, abm->threshold_undef);
+ if (abm->thresholds[id] == val)
+ return 0;
+
+ sym_offset = id * NFP_QLVL_STRIDE + NFP_QLVL_THRS;
+ err = __nfp_rtsym_writel(cpp, abm->q_lvls, 4, 0, sym_offset, val);
if (err) {
- nfp_err(cpp, "RED offload setting level failed on vNIC %d queue %d\n",
- alink->id, i);
+ nfp_err(cpp,
+ "RED offload setting level failed on subqueue %d\n",
+ id);
return err;
}
+ abm->thresholds[id] = val;
return 0;
}
-int nfp_abm_ctrl_set_all_q_lvls(struct nfp_abm_link *alink, u32 val)
+int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int queue,
+ u32 val)
{
- int i, err;
+ unsigned int threshold;
- for (i = 0; i < alink->vnic->max_rx_rings; i++) {
- err = nfp_abm_ctrl_set_q_lvl(alink, i, val);
- if (err)
- return err;
- }
+ threshold = alink->queue_base + queue;
- return 0;
+ return __nfp_abm_ctrl_set_q_lvl(alink->abm, threshold, val);
}
u64 nfp_abm_ctrl_stat_non_sto(struct nfp_abm_link *alink, unsigned int i)
@@ -153,42 +134,6 @@ int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink, unsigned int i,
i, true, &stats->overlimits);
}
-int nfp_abm_ctrl_read_stats(struct nfp_abm_link *alink,
- struct nfp_alink_stats *stats)
-{
- u64 pkts = 0, bytes = 0;
- int i, err;
-
- for (i = 0; i < alink->vnic->max_rx_rings; i++) {
- pkts += nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i));
- bytes += nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i) + 8);
- }
- stats->tx_pkts = pkts;
- stats->tx_bytes = bytes;
-
- err = nfp_abm_ctrl_stat_all(alink, alink->abm->q_lvls,
- NFP_QLVL_STRIDE, NFP_QLVL_BLOG_BYTES,
- false, &stats->backlog_bytes);
- if (err)
- return err;
-
- err = nfp_abm_ctrl_stat_all(alink, alink->abm->q_lvls,
- NFP_QLVL_STRIDE, NFP_QLVL_BLOG_PKTS,
- false, &stats->backlog_pkts);
- if (err)
- return err;
-
- err = nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats,
- NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP,
- true, &stats->drops);
- if (err)
- return err;
-
- return nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats,
- NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN,
- true, &stats->overlimits);
-}
-
int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink, unsigned int i,
struct nfp_alink_xstats *xstats)
{
@@ -205,22 +150,6 @@ int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink, unsigned int i,
i, true, &xstats->ecn_marked);
}
-int nfp_abm_ctrl_read_xstats(struct nfp_abm_link *alink,
- struct nfp_alink_xstats *xstats)
-{
- int err;
-
- err = nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats,
- NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP,
- true, &xstats->pdrop);
- if (err)
- return err;
-
- return nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats,
- NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN,
- true, &xstats->ecn_marked);
-}
-
int nfp_abm_ctrl_qm_enable(struct nfp_abm *abm)
{
return nfp_mbox_cmd(abm->app->pf, NFP_MBOX_PCIE_ABM_ENABLE,
diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c
index 3d15de0ae271..a5732d3bd1b7 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/main.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/main.c
@@ -2,10 +2,12 @@
/* Copyright (C) 2018 Netronome Systems, Inc. */
#include <linux/bitfield.h>
+#include <linux/bitmap.h>
#include <linux/etherdevice.h>
#include <linux/lockdep.h>
#include <linux/netdevice.h>
#include <linux/rcupdate.h>
+#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include "../nfpcore/nfp.h"
@@ -36,6 +38,8 @@ nfp_abm_setup_tc(struct nfp_app *app, struct net_device *netdev,
return -EOPNOTSUPP;
switch (type) {
+ case TC_SETUP_ROOT_QDISC:
+ return nfp_abm_setup_root(netdev, repr->app_priv, type_data);
case TC_SETUP_QDISC_MQ:
return nfp_abm_setup_tc_mq(netdev, repr->app_priv, type_data);
case TC_SETUP_QDISC_RED:
@@ -307,31 +311,23 @@ nfp_abm_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
alink->abm = abm;
alink->vnic = nn;
alink->id = id;
- alink->parent = TC_H_ROOT;
alink->total_queues = alink->vnic->max_rx_rings;
- alink->qdiscs = kvcalloc(alink->total_queues, sizeof(*alink->qdiscs),
- GFP_KERNEL);
- if (!alink->qdiscs) {
- err = -ENOMEM;
- goto err_free_alink;
- }
/* This is a multi-host app, make sure MAC/PHY is up, but don't
* make the MAC/PHY state follow the state of any of the ports.
*/
err = nfp_eth_set_configured(app->cpp, eth_port->index, true);
if (err < 0)
- goto err_free_qdiscs;
+ goto err_free_alink;
netif_keep_dst(nn->dp.netdev);
nfp_abm_vnic_set_mac(app->pf, abm, nn, id);
nfp_abm_ctrl_read_params(alink);
+ INIT_RADIX_TREE(&alink->qdiscs, GFP_KERNEL);
return 0;
-err_free_qdiscs:
- kvfree(alink->qdiscs);
err_free_alink:
kfree(alink);
return err;
@@ -342,7 +338,7 @@ static void nfp_abm_vnic_free(struct nfp_app *app, struct nfp_net *nn)
struct nfp_abm_link *alink = nn->app_priv;
nfp_abm_kill_reprs(alink->abm, alink);
- kvfree(alink->qdiscs);
+ WARN(!radix_tree_empty(&alink->qdiscs), "left over qdiscs\n");
kfree(alink);
}
@@ -398,6 +394,7 @@ static int nfp_abm_init(struct nfp_app *app)
struct nfp_pf *pf = app->pf;
struct nfp_reprs *reprs;
struct nfp_abm *abm;
+ unsigned int i;
int err;
if (!pf->eth_tbl) {
@@ -424,15 +421,28 @@ static int nfp_abm_init(struct nfp_app *app)
if (err)
goto err_free_abm;
+ err = -ENOMEM;
+ abm->num_thresholds = NFP_NET_MAX_RX_RINGS;
+ abm->threshold_undef = bitmap_zalloc(abm->num_thresholds, GFP_KERNEL);
+ if (!abm->threshold_undef)
+ goto err_free_abm;
+
+ abm->thresholds = kvcalloc(abm->num_thresholds,
+ sizeof(*abm->thresholds), GFP_KERNEL);
+ if (!abm->thresholds)
+ goto err_free_thresh_umap;
+ for (i = 0; i < NFP_NET_MAX_RX_RINGS; i++)
+ __nfp_abm_ctrl_set_q_lvl(abm, i, NFP_ABM_LVL_INFINITY);
+
/* We start in legacy mode, make sure advanced queuing is disabled */
err = nfp_abm_ctrl_qm_disable(abm);
if (err)
- goto err_free_abm;
+ goto err_free_thresh;
err = -ENOMEM;
reprs = nfp_reprs_alloc(pf->max_data_vnics);
if (!reprs)
- goto err_free_abm;
+ goto err_free_thresh;
RCU_INIT_POINTER(app->reprs[NFP_REPR_TYPE_PHYS_PORT], reprs);
reprs = nfp_reprs_alloc(pf->max_data_vnics);
@@ -444,6 +454,10 @@ static int nfp_abm_init(struct nfp_app *app)
err_free_phys:
nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT);
+err_free_thresh:
+ kvfree(abm->thresholds);
+err_free_thresh_umap:
+ bitmap_free(abm->threshold_undef);
err_free_abm:
kfree(abm);
app->priv = NULL;
@@ -457,6 +471,8 @@ static void nfp_abm_clean(struct nfp_app *app)
nfp_abm_eswitch_clean_up(abm);
nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF);
nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT);
+ bitmap_free(abm->threshold_undef);
+ kvfree(abm->thresholds);
kfree(abm);
app->priv = NULL;
}
diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.h b/drivers/net/ethernet/netronome/nfp/abm/main.h
index 3774c063e419..240e2c8683fe 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/main.h
+++ b/drivers/net/ethernet/netronome/nfp/abm/main.h
@@ -5,9 +5,15 @@
#define __NFP_ABM_H__ 1
#include <linux/bits.h>
+#include <linux/radix-tree.h>
#include <net/devlink.h>
#include <net/pkt_cls.h>
+/* Dump of 64 PRIOs and 256 REDs seems to take 850us on Xeon v4 @ 2.20GHz;
+ * 2.5ms / 400Hz seems more than sufficient for stats resolution.
+ */
+#define NFP_ABM_STATS_REFRESH_IVAL (2500 * 1000) /* ns */
+
#define NFP_ABM_LVL_INFINITY S32_MAX
struct nfp_app;
@@ -20,6 +26,11 @@ struct nfp_net;
* struct nfp_abm - ABM NIC app structure
* @app: back pointer to nfp_app
* @pf_id: ID of our PF link
+ *
+ * @thresholds: current threshold configuration
+ * @threshold_undef: bitmap of thresholds which have not been set
+ * @num_thresholds: number of @thresholds and bits in @threshold_undef
+ *
* @eswitch_mode: devlink eswitch mode, advanced functions only visible
* in switchdev mode
* @q_lvls: queue level control area
@@ -28,6 +39,11 @@ struct nfp_net;
struct nfp_abm {
struct nfp_app *app;
unsigned int pf_id;
+
+ u32 *thresholds;
+ unsigned long *threshold_undef;
+ size_t num_thresholds;
+
enum devlink_eswitch_mode eswitch_mode;
const struct nfp_rtsym *q_lvls;
const struct nfp_rtsym *qm_stats;
@@ -61,16 +77,67 @@ struct nfp_alink_xstats {
u64 pdrop;
};
+enum nfp_qdisc_type {
+ NFP_QDISC_NONE = 0,
+ NFP_QDISC_MQ,
+ NFP_QDISC_RED,
+};
+
+#define NFP_QDISC_UNTRACKED ((struct nfp_qdisc *)1UL)
+
/**
- * struct nfp_red_qdisc - representation of single RED Qdisc
- * @handle: handle of currently offloaded RED Qdisc
- * @stats: statistics from last refresh
- * @xstats: base of extended statistics
+ * struct nfp_qdisc - tracked TC Qdisc
+ * @netdev: netdev on which Qdisc was created
+ * @type: Qdisc type
+ * @handle: handle of this Qdisc
+ * @parent_handle: handle of the parent (unreliable if Qdisc was grafted)
+ * @use_cnt: number of attachment points in the hierarchy
+ * @num_children: current size of the @children array
+ * @children: pointers to children
+ *
+ * @params_ok: parameters of this Qdisc are OK for offload
+ * @offload_mark: offload refresh state - selected for offload
+ * @offloaded: Qdisc is currently offloaded to the HW
+ *
+ * @mq: MQ Qdisc specific parameters and state
+ * @mq.stats: current stats of the MQ Qdisc
+ * @mq.prev_stats: previously reported @mq.stats
+ *
+ * @red: RED Qdisc specific parameters and state
+ * @red.threshold: ECN marking threshold
+ * @red.stats: current stats of the RED Qdisc
+ * @red.prev_stats: previously reported @red.stats
+ * @red.xstats: extended stats for RED - current
+ * @red.prev_xstats: extended stats for RED - previously reported
*/
-struct nfp_red_qdisc {
+struct nfp_qdisc {
+ struct net_device *netdev;
+ enum nfp_qdisc_type type;
u32 handle;
- struct nfp_alink_stats stats;
- struct nfp_alink_xstats xstats;
+ u32 parent_handle;
+ unsigned int use_cnt;
+ unsigned int num_children;
+ struct nfp_qdisc **children;
+
+ bool params_ok;
+ bool offload_mark;
+ bool offloaded;
+
+ union {
+ /* NFP_QDISC_MQ */
+ struct {
+ struct nfp_alink_stats stats;
+ struct nfp_alink_stats prev_stats;
+ } mq;
+ /* TC_SETUP_QDISC_RED */
+ struct {
+ u32 threshold;
+ struct nfp_alink_stats stats;
+ struct nfp_alink_stats prev_stats;
+ struct nfp_alink_xstats xstats;
+ struct nfp_alink_xstats prev_xstats;
+ } red;
+ };
};
/**
@@ -80,9 +147,11 @@ struct nfp_red_qdisc {
* @id: id of the data vNIC
* @queue_base: id of base to host queue within PCIe (not QC idx)
* @total_queues: number of PF queues
- * @parent: handle of expected parent, i.e. handle of MQ, or TC_H_ROOT
- * @num_qdiscs: number of currently used qdiscs
- * @qdiscs: array of qdiscs
+ *
+ * @last_stats_update: ktime of last stats update
+ *
+ * @root_qdisc: pointer to the current root of the Qdisc hierarchy
+ * @qdiscs: all qdiscs recorded by major part of the handle
*/
struct nfp_abm_link {
struct nfp_abm *abm;
@@ -90,11 +159,16 @@ struct nfp_abm_link {
unsigned int id;
unsigned int queue_base;
unsigned int total_queues;
- u32 parent;
- unsigned int num_qdiscs;
- struct nfp_red_qdisc *qdiscs;
+
+ u64 last_stats_update;
+
+ struct nfp_qdisc *root_qdisc;
+ struct radix_tree_root qdiscs;
};
+void nfp_abm_qdisc_offload_update(struct nfp_abm_link *alink);
+int nfp_abm_setup_root(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_root_qopt_offload *opt);
int nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink,
struct tc_red_qopt_offload *opt);
int nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink,
@@ -102,15 +176,11 @@ int nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink,
void nfp_abm_ctrl_read_params(struct nfp_abm_link *alink);
int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm);
-int nfp_abm_ctrl_set_all_q_lvls(struct nfp_abm_link *alink, u32 val);
-int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int i,
+int __nfp_abm_ctrl_set_q_lvl(struct nfp_abm *abm, unsigned int id, u32 val);
+int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int queue,
u32 val);
-int nfp_abm_ctrl_read_stats(struct nfp_abm_link *alink,
- struct nfp_alink_stats *stats);
int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink, unsigned int i,
struct nfp_alink_stats *stats);
-int nfp_abm_ctrl_read_xstats(struct nfp_abm_link *alink,
- struct nfp_alink_xstats *xstats);
int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink, unsigned int i,
struct nfp_alink_xstats *xstats);
u64 nfp_abm_ctrl_stat_non_sto(struct nfp_abm_link *alink, unsigned int i);
diff --git a/drivers/net/ethernet/netronome/nfp/abm/qdisc.c b/drivers/net/ethernet/netronome/nfp/abm/qdisc.c
index bb05f9ee0401..16c4afe3a37f 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/qdisc.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/qdisc.c
@@ -1,242 +1,548 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2018 Netronome Systems, Inc. */
+#include <linux/rtnetlink.h>
#include <net/pkt_cls.h>
#include <net/pkt_sched.h>
#include <net/red.h>
#include "../nfpcore/nfp_cpp.h"
#include "../nfp_app.h"
+#include "../nfp_main.h"
+#include "../nfp_net.h"
#include "../nfp_port.h"
#include "main.h"
-static int
-__nfp_abm_reset_root(struct net_device *netdev, struct nfp_abm_link *alink,
- u32 handle, unsigned int qs, u32 init_val)
+static bool nfp_abm_qdisc_is_red(struct nfp_qdisc *qdisc)
{
- struct nfp_port *port = nfp_port_from_netdev(netdev);
- int ret;
+ return qdisc->type == NFP_QDISC_RED;
+}
- ret = nfp_abm_ctrl_set_all_q_lvls(alink, init_val);
- memset(alink->qdiscs, 0, sizeof(*alink->qdiscs) * alink->num_qdiscs);
+static bool nfp_abm_qdisc_child_valid(struct nfp_qdisc *qdisc, unsigned int id)
+{
+ return qdisc->children[id] &&
+ qdisc->children[id] != NFP_QDISC_UNTRACKED;
+}
- alink->parent = handle;
- alink->num_qdiscs = qs;
- port->tc_offload_cnt = qs;
+static void *nfp_abm_qdisc_tree_deref_slot(void __rcu **slot)
+{
+ return rtnl_dereference(*slot);
+}
- return ret;
+static void
+nfp_abm_stats_propagate(struct nfp_alink_stats *parent,
+ struct nfp_alink_stats *child)
+{
+ parent->tx_pkts += child->tx_pkts;
+ parent->tx_bytes += child->tx_bytes;
+ parent->backlog_pkts += child->backlog_pkts;
+ parent->backlog_bytes += child->backlog_bytes;
+ parent->overlimits += child->overlimits;
+ parent->drops += child->drops;
}
static void
-nfp_abm_reset_root(struct net_device *netdev, struct nfp_abm_link *alink,
- u32 handle, unsigned int qs)
+nfp_abm_stats_update_red(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc,
+ unsigned int queue)
{
- __nfp_abm_reset_root(netdev, alink, handle, qs, NFP_ABM_LVL_INFINITY);
+ struct nfp_cpp *cpp = alink->abm->app->cpp;
+ int err;
+
+ if (!qdisc->offloaded)
+ return;
+
+ err = nfp_abm_ctrl_read_q_stats(alink, queue, &qdisc->red.stats);
+ if (err)
+ nfp_err(cpp, "RED stats (%d) read failed with error %d\n",
+ queue, err);
+
+ err = nfp_abm_ctrl_read_q_xstats(alink, queue, &qdisc->red.xstats);
+ if (err)
+ nfp_err(cpp, "RED xstats (%d) read failed with error %d\n",
+ queue, err);
}
-static int
-nfp_abm_red_find(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
+static void
+nfp_abm_stats_update_mq(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc)
{
- unsigned int i = TC_H_MIN(opt->parent) - 1;
+ unsigned int i;
- if (opt->parent == TC_H_ROOT)
- i = 0;
- else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent))
- i = TC_H_MIN(opt->parent) - 1;
- else
- return -EOPNOTSUPP;
+ if (qdisc->type != NFP_QDISC_MQ)
+ return;
- if (i >= alink->num_qdiscs || opt->handle != alink->qdiscs[i].handle)
- return -EOPNOTSUPP;
+ for (i = 0; i < alink->total_queues; i++)
+ if (nfp_abm_qdisc_child_valid(qdisc, i))
+ nfp_abm_stats_update_red(alink, qdisc->children[i], i);
+}
- return i;
+static void __nfp_abm_stats_update(struct nfp_abm_link *alink, u64 time_now)
+{
+ alink->last_stats_update = time_now;
+ if (alink->root_qdisc)
+ nfp_abm_stats_update_mq(alink, alink->root_qdisc);
+}
+
+static void nfp_abm_stats_update(struct nfp_abm_link *alink)
+{
+ u64 now;
+
+ /* Limit the frequency of updates - stats of non-leaf qdiscs are a sum
+ * of all their leafs, so we would read the same stat multiple times
+ * for every dump.
+ */
+ now = ktime_get();
+ if (now - alink->last_stats_update < NFP_ABM_STATS_REFRESH_IVAL)
+ return;
+
+ __nfp_abm_stats_update(alink, now);
}
static void
-nfp_abm_red_destroy(struct net_device *netdev, struct nfp_abm_link *alink,
- u32 handle)
+nfp_abm_qdisc_unlink_children(struct nfp_qdisc *qdisc,
+ unsigned int start, unsigned int end)
{
unsigned int i;
- for (i = 0; i < alink->num_qdiscs; i++)
- if (handle == alink->qdiscs[i].handle)
- break;
- if (i == alink->num_qdiscs)
+ for (i = start; i < end; i++)
+ if (nfp_abm_qdisc_child_valid(qdisc, i)) {
+ qdisc->children[i]->use_cnt--;
+ qdisc->children[i] = NULL;
+ }
+}
+
+static void
+nfp_abm_qdisc_offload_stop(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc)
+{
+ /* Don't complain when qdisc is getting unlinked */
+ if (qdisc->use_cnt)
+ nfp_warn(alink->abm->app->cpp, "Offload of '%08x' stopped\n",
+ qdisc->handle);
+
+ if (!nfp_abm_qdisc_is_red(qdisc))
return;
- if (alink->parent == TC_H_ROOT) {
- nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 0);
- } else {
- nfp_abm_ctrl_set_q_lvl(alink, i, NFP_ABM_LVL_INFINITY);
- memset(&alink->qdiscs[i], 0, sizeof(*alink->qdiscs));
+ qdisc->red.stats.backlog_pkts = 0;
+ qdisc->red.stats.backlog_bytes = 0;
+}
+
+static int
+__nfp_abm_stats_init(struct nfp_abm_link *alink,
+ unsigned int queue, struct nfp_alink_stats *prev_stats,
+ struct nfp_alink_xstats *prev_xstats)
+{
+ u64 backlog_pkts, backlog_bytes;
+ int err;
+
+ /* Don't touch the backlog, backlog can only be reset after it has
+ * been reported back to the tc qdisc stats.
+ */
+ backlog_pkts = prev_stats->backlog_pkts;
+ backlog_bytes = prev_stats->backlog_bytes;
+
+ err = nfp_abm_ctrl_read_q_stats(alink, queue, prev_stats);
+ if (err) {
+ nfp_err(alink->abm->app->cpp,
+ "RED stats init (%d) failed with error %d\n",
+ queue, err);
+ return err;
}
+
+ err = nfp_abm_ctrl_read_q_xstats(alink, queue, prev_xstats);
+ if (err) {
+ nfp_err(alink->abm->app->cpp,
+ "RED xstats init (%d) failed with error %d\n",
+ queue, err);
+ return err;
+ }
+
+ prev_stats->backlog_pkts = backlog_pkts;
+ prev_stats->backlog_bytes = backlog_bytes;
+ return 0;
}
-static bool
-nfp_abm_red_check_params(struct nfp_abm_link *alink,
- struct tc_red_qopt_offload *opt)
+static int
+nfp_abm_stats_init(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc,
+ unsigned int queue)
{
- struct nfp_cpp *cpp = alink->abm->app->cpp;
+ return __nfp_abm_stats_init(alink, queue,
+ &qdisc->red.prev_stats,
+ &qdisc->red.prev_xstats);
+}
- if (!opt->set.is_ecn) {
- nfp_warn(cpp, "RED offload failed - drop is not supported (ECN option required) (p:%08x h:%08x)\n",
- opt->parent, opt->handle);
- return false;
+static void
+nfp_abm_offload_compile_red(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc,
+ unsigned int queue)
+{
+ qdisc->offload_mark = qdisc->type == NFP_QDISC_RED &&
+ qdisc->params_ok &&
+ qdisc->use_cnt == 1 &&
+ !qdisc->children[0];
+
+ /* If we are starting offload init prev_stats */
+ if (qdisc->offload_mark && !qdisc->offloaded)
+ if (nfp_abm_stats_init(alink, qdisc, queue))
+ qdisc->offload_mark = false;
+
+ if (!qdisc->offload_mark)
+ return;
+
+ nfp_abm_ctrl_set_q_lvl(alink, queue, qdisc->red.threshold);
+}
+
+static void
+nfp_abm_offload_compile_mq(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc)
+{
+ unsigned int i;
+
+ qdisc->offload_mark = qdisc->type == NFP_QDISC_MQ;
+ if (!qdisc->offload_mark)
+ return;
+
+ for (i = 0; i < alink->total_queues; i++) {
+ struct nfp_qdisc *child = qdisc->children[i];
+
+ if (!nfp_abm_qdisc_child_valid(qdisc, i))
+ continue;
+
+ nfp_abm_offload_compile_red(alink, child, i);
}
- if (opt->set.is_harddrop) {
- nfp_warn(cpp, "RED offload failed - harddrop is not supported (p:%08x h:%08x)\n",
- opt->parent, opt->handle);
- return false;
+}
+
+void nfp_abm_qdisc_offload_update(struct nfp_abm_link *alink)
+{
+ struct nfp_abm *abm = alink->abm;
+ struct radix_tree_iter iter;
+ struct nfp_qdisc *qdisc;
+ void __rcu **slot;
+ size_t i;
+
+ /* Mark all thresholds as unconfigured */
+ __bitmap_set(abm->threshold_undef,
+ alink->queue_base, alink->total_queues);
+
+ /* Clear offload marks */
+ radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
+ qdisc = nfp_abm_qdisc_tree_deref_slot(slot);
+ qdisc->offload_mark = false;
}
- if (opt->set.min != opt->set.max) {
- nfp_warn(cpp, "RED offload failed - unsupported min/max parameters (p:%08x h:%08x)\n",
- opt->parent, opt->handle);
- return false;
+
+ if (alink->root_qdisc)
+ nfp_abm_offload_compile_mq(alink, alink->root_qdisc);
+
+ /* Refresh offload status */
+ radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
+ qdisc = nfp_abm_qdisc_tree_deref_slot(slot);
+ if (!qdisc->offload_mark && qdisc->offloaded)
+ nfp_abm_qdisc_offload_stop(alink, qdisc);
+ qdisc->offloaded = qdisc->offload_mark;
}
- if (opt->set.min > NFP_ABM_LVL_INFINITY) {
- nfp_warn(cpp, "RED offload failed - threshold too large %d > %d (p:%08x h:%08x)\n",
- opt->set.min, NFP_ABM_LVL_INFINITY, opt->parent,
- opt->handle);
- return false;
+
+ /* Reset the unconfigured thresholds */
+ for (i = 0; i < abm->num_thresholds; i++)
+ if (test_bit(i, abm->threshold_undef))
+ __nfp_abm_ctrl_set_q_lvl(abm, i, NFP_ABM_LVL_INFINITY);
+
+ __nfp_abm_stats_update(alink, ktime_get());
+}
+
+static void
+nfp_abm_qdisc_clear_mq(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct nfp_qdisc *qdisc)
+{
+ struct radix_tree_iter iter;
+ unsigned int mq_refs = 0;
+ void __rcu **slot;
+
+ if (!qdisc->use_cnt)
+ return;
+ /* MQ doesn't notify well on destruction, we need special handling of
+ * MQ's children.
+ */
+ if (qdisc->type == NFP_QDISC_MQ &&
+ qdisc == alink->root_qdisc &&
+ netdev->reg_state == NETREG_UNREGISTERING)
+ return;
+
+ /* Count refs held by MQ instances and clear pointers */
+ radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
+ struct nfp_qdisc *mq = nfp_abm_qdisc_tree_deref_slot(slot);
+ unsigned int i;
+
+ if (mq->type != NFP_QDISC_MQ || mq->netdev != netdev)
+ continue;
+ for (i = 0; i < mq->num_children; i++)
+ if (mq->children[i] == qdisc) {
+ mq->children[i] = NULL;
+ mq_refs++;
+ }
}
- return true;
+ WARN(qdisc->use_cnt != mq_refs, "non-zero qdisc use count: %d (- %d)\n",
+ qdisc->use_cnt, mq_refs);
+}
+
+static void
+nfp_abm_qdisc_free(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct nfp_qdisc *qdisc)
+{
+ struct nfp_port *port = nfp_port_from_netdev(netdev);
+
+ if (!qdisc)
+ return;
+ nfp_abm_qdisc_clear_mq(netdev, alink, qdisc);
+ WARN_ON(radix_tree_delete(&alink->qdiscs,
+ TC_H_MAJ(qdisc->handle)) != qdisc);
+
+ kfree(qdisc->children);
+ kfree(qdisc);
+
+ port->tc_offload_cnt--;
+}
+
+static struct nfp_qdisc *
+nfp_abm_qdisc_alloc(struct net_device *netdev, struct nfp_abm_link *alink,
+ enum nfp_qdisc_type type, u32 parent_handle, u32 handle,
+ unsigned int children)
+{
+ struct nfp_port *port = nfp_port_from_netdev(netdev);
+ struct nfp_qdisc *qdisc;
+ int err;
+
+ qdisc = kzalloc(sizeof(*qdisc), GFP_KERNEL);
+ if (!qdisc)
+ return NULL;
+
+ qdisc->children = kcalloc(children, sizeof(void *), GFP_KERNEL);
+ if (!qdisc->children)
+ goto err_free_qdisc;
+
+ qdisc->netdev = netdev;
+ qdisc->type = type;
+ qdisc->parent_handle = parent_handle;
+ qdisc->handle = handle;
+ qdisc->num_children = children;
+
+ err = radix_tree_insert(&alink->qdiscs, TC_H_MAJ(qdisc->handle), qdisc);
+ if (err) {
+ nfp_err(alink->abm->app->cpp,
+ "Qdisc insertion into radix tree failed: %d\n", err);
+ goto err_free_child_tbl;
+ }
+
+ port->tc_offload_cnt++;
+ return qdisc;
+
+err_free_child_tbl:
+ kfree(qdisc->children);
+err_free_qdisc:
+ kfree(qdisc);
+ return NULL;
+}
+
+static struct nfp_qdisc *
+nfp_abm_qdisc_find(struct nfp_abm_link *alink, u32 handle)
+{
+ return radix_tree_lookup(&alink->qdiscs, TC_H_MAJ(handle));
}
static int
-nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink,
- struct tc_red_qopt_offload *opt)
+nfp_abm_qdisc_replace(struct net_device *netdev, struct nfp_abm_link *alink,
+ enum nfp_qdisc_type type, u32 parent_handle, u32 handle,
+ unsigned int children, struct nfp_qdisc **qdisc)
+{
+ *qdisc = nfp_abm_qdisc_find(alink, handle);
+ if (*qdisc) {
+ if (WARN_ON((*qdisc)->type != type))
+ return -EINVAL;
+ return 1;
+ }
+
+ *qdisc = nfp_abm_qdisc_alloc(netdev, alink, type, parent_handle, handle,
+ children);
+ return *qdisc ? 0 : -ENOMEM;
+}
+
+static void
+nfp_abm_qdisc_destroy(struct net_device *netdev, struct nfp_abm_link *alink,
+ u32 handle)
{
- bool existing;
- int i, err;
+ struct nfp_qdisc *qdisc;
- i = nfp_abm_red_find(alink, opt);
- existing = i >= 0;
+ qdisc = nfp_abm_qdisc_find(alink, handle);
+ if (!qdisc)
+ return;
+
+ /* We don't get TC_SETUP_ROOT_QDISC w/ MQ when netdev is unregistered */
+ if (alink->root_qdisc == qdisc)
+ qdisc->use_cnt--;
- if (!nfp_abm_red_check_params(alink, opt)) {
- err = -EINVAL;
- goto err_destroy;
+ nfp_abm_qdisc_unlink_children(qdisc, 0, qdisc->num_children);
+ nfp_abm_qdisc_free(netdev, alink, qdisc);
+
+ if (alink->root_qdisc == qdisc) {
+ alink->root_qdisc = NULL;
+ /* Only root change matters, other changes are acted upon on
+ * the graft notification.
+ */
+ nfp_abm_qdisc_offload_update(alink);
}
+}
- if (existing) {
- if (alink->parent == TC_H_ROOT)
- err = nfp_abm_ctrl_set_all_q_lvls(alink, opt->set.min);
- else
- err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min);
- if (err)
- goto err_destroy;
+static int
+nfp_abm_qdisc_graft(struct nfp_abm_link *alink, u32 handle, u32 child_handle,
+ unsigned int id)
+{
+ struct nfp_qdisc *parent, *child;
+
+ parent = nfp_abm_qdisc_find(alink, handle);
+ if (!parent)
return 0;
- }
- if (opt->parent == TC_H_ROOT) {
- i = 0;
- err = __nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 1,
- opt->set.min);
- } else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent)) {
- i = TC_H_MIN(opt->parent) - 1;
- err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min);
- } else {
+ if (WARN(id >= parent->num_children,
+ "graft child out of bound %d >= %d\n",
+ id, parent->num_children))
return -EINVAL;
- }
- /* Set the handle to try full clean up, in case IO failed */
- alink->qdiscs[i].handle = opt->handle;
- if (err)
- goto err_destroy;
- if (opt->parent == TC_H_ROOT)
- err = nfp_abm_ctrl_read_stats(alink, &alink->qdiscs[i].stats);
- else
- err = nfp_abm_ctrl_read_q_stats(alink, i,
- &alink->qdiscs[i].stats);
- if (err)
- goto err_destroy;
+ nfp_abm_qdisc_unlink_children(parent, id, id + 1);
- if (opt->parent == TC_H_ROOT)
- err = nfp_abm_ctrl_read_xstats(alink,
- &alink->qdiscs[i].xstats);
+ child = nfp_abm_qdisc_find(alink, child_handle);
+ if (child)
+ child->use_cnt++;
else
- err = nfp_abm_ctrl_read_q_xstats(alink, i,
- &alink->qdiscs[i].xstats);
- if (err)
- goto err_destroy;
+ child = NFP_QDISC_UNTRACKED;
+ parent->children[id] = child;
- alink->qdiscs[i].stats.backlog_pkts = 0;
- alink->qdiscs[i].stats.backlog_bytes = 0;
+ nfp_abm_qdisc_offload_update(alink);
return 0;
-err_destroy:
- /* If the qdisc keeps on living, but we can't offload undo changes */
- if (existing) {
- opt->set.qstats->qlen -= alink->qdiscs[i].stats.backlog_pkts;
- opt->set.qstats->backlog -=
- alink->qdiscs[i].stats.backlog_bytes;
- }
- nfp_abm_red_destroy(netdev, alink, opt->handle);
-
- return err;
}
static void
-nfp_abm_update_stats(struct nfp_alink_stats *new, struct nfp_alink_stats *old,
- struct tc_qopt_offload_stats *stats)
+nfp_abm_stats_calculate(struct nfp_alink_stats *new,
+ struct nfp_alink_stats *old,
+ struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_queue *qstats)
{
- _bstats_update(stats->bstats, new->tx_bytes - old->tx_bytes,
+ _bstats_update(bstats, new->tx_bytes - old->tx_bytes,
new->tx_pkts - old->tx_pkts);
- stats->qstats->qlen += new->backlog_pkts - old->backlog_pkts;
- stats->qstats->backlog += new->backlog_bytes - old->backlog_bytes;
- stats->qstats->overlimits += new->overlimits - old->overlimits;
- stats->qstats->drops += new->drops - old->drops;
+ qstats->qlen += new->backlog_pkts - old->backlog_pkts;
+ qstats->backlog += new->backlog_bytes - old->backlog_bytes;
+ qstats->overlimits += new->overlimits - old->overlimits;
+ qstats->drops += new->drops - old->drops;
+}
+
+static void
+nfp_abm_stats_red_calculate(struct nfp_alink_xstats *new,
+ struct nfp_alink_xstats *old,
+ struct red_stats *stats)
+{
+ stats->forced_mark += new->ecn_marked - old->ecn_marked;
+ stats->pdrop += new->pdrop - old->pdrop;
}
static int
-nfp_abm_red_stats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
+nfp_abm_red_xstats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
{
- struct nfp_alink_stats *prev_stats;
- struct nfp_alink_stats stats;
- int i, err;
+ struct nfp_qdisc *qdisc;
- i = nfp_abm_red_find(alink, opt);
- if (i < 0)
- return i;
- prev_stats = &alink->qdiscs[i].stats;
+ nfp_abm_stats_update(alink);
- if (alink->parent == TC_H_ROOT)
- err = nfp_abm_ctrl_read_stats(alink, &stats);
- else
- err = nfp_abm_ctrl_read_q_stats(alink, i, &stats);
- if (err)
- return err;
+ qdisc = nfp_abm_qdisc_find(alink, opt->handle);
+ if (!qdisc || !qdisc->offloaded)
+ return -EOPNOTSUPP;
- nfp_abm_update_stats(&stats, prev_stats, &opt->stats);
+ nfp_abm_stats_red_calculate(&qdisc->red.xstats,
+ &qdisc->red.prev_xstats,
+ opt->xstats);
+ qdisc->red.prev_xstats = qdisc->red.xstats;
+ return 0;
+}
- *prev_stats = stats;
+static int
+nfp_abm_red_stats(struct nfp_abm_link *alink, u32 handle,
+ struct tc_qopt_offload_stats *stats)
+{
+ struct nfp_qdisc *qdisc;
- return 0;
+ nfp_abm_stats_update(alink);
+
+ qdisc = nfp_abm_qdisc_find(alink, handle);
+ if (!qdisc)
+ return -EOPNOTSUPP;
+ /* If the qdisc offload has stopped we may need to adjust the backlog
+ * counters back so carry on even if qdisc is not currently offloaded.
+ */
+
+ nfp_abm_stats_calculate(&qdisc->red.stats,
+ &qdisc->red.prev_stats,
+ stats->bstats, stats->qstats);
+ qdisc->red.prev_stats = qdisc->red.stats;
+
+ return qdisc->offloaded ? 0 : -EOPNOTSUPP;
+}
+
+static bool
+nfp_abm_red_check_params(struct nfp_abm_link *alink,
+ struct tc_red_qopt_offload *opt)
+{
+ struct nfp_cpp *cpp = alink->abm->app->cpp;
+
+ if (!opt->set.is_ecn) {
+ nfp_warn(cpp, "RED offload failed - drop is not supported (ECN option required) (p:%08x h:%08x)\n",
+ opt->parent, opt->handle);
+ return false;
+ }
+ if (opt->set.is_harddrop) {
+ nfp_warn(cpp, "RED offload failed - harddrop is not supported (p:%08x h:%08x)\n",
+ opt->parent, opt->handle);
+ return false;
+ }
+ if (opt->set.min != opt->set.max) {
+ nfp_warn(cpp, "RED offload failed - unsupported min/max parameters (p:%08x h:%08x)\n",
+ opt->parent, opt->handle);
+ return false;
+ }
+ if (opt->set.min > NFP_ABM_LVL_INFINITY) {
+ nfp_warn(cpp, "RED offload failed - threshold too large %d > %d (p:%08x h:%08x)\n",
+ opt->set.min, NFP_ABM_LVL_INFINITY, opt->parent,
+ opt->handle);
+ return false;
+ }
+
+ return true;
}
static int
-nfp_abm_red_xstats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
+nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_red_qopt_offload *opt)
{
- struct nfp_alink_xstats *prev_xstats;
- struct nfp_alink_xstats xstats;
- int i, err;
+ struct nfp_qdisc *qdisc;
+ int ret;
- i = nfp_abm_red_find(alink, opt);
- if (i < 0)
- return i;
- prev_xstats = &alink->qdiscs[i].xstats;
+ ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_RED, opt->parent,
+ opt->handle, 1, &qdisc);
+ if (ret < 0)
+ return ret;
- if (alink->parent == TC_H_ROOT)
- err = nfp_abm_ctrl_read_xstats(alink, &xstats);
- else
- err = nfp_abm_ctrl_read_q_xstats(alink, i, &xstats);
- if (err)
- return err;
+ /* If limit != 0 child gets reset */
+ if (opt->set.limit) {
+ if (nfp_abm_qdisc_child_valid(qdisc, 0))
+ qdisc->children[0]->use_cnt--;
+ qdisc->children[0] = NULL;
+ } else {
+ /* Qdisc was just allocated without a limit will use noop_qdisc,
+ * i.e. a block hole.
+ */
+ if (!ret)
+ qdisc->children[0] = NFP_QDISC_UNTRACKED;
+ }
- opt->xstats->forced_mark += xstats.ecn_marked - prev_xstats->ecn_marked;
- opt->xstats->pdrop += xstats.pdrop - prev_xstats->pdrop;
+ qdisc->params_ok = nfp_abm_red_check_params(alink, opt);
+ if (qdisc->params_ok)
+ qdisc->red.threshold = opt->set.min;
- *prev_xstats = xstats;
+ if (qdisc->use_cnt == 1)
+ nfp_abm_qdisc_offload_update(alink);
return 0;
}
@@ -248,37 +554,76 @@ int nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink,
case TC_RED_REPLACE:
return nfp_abm_red_replace(netdev, alink, opt);
case TC_RED_DESTROY:
- nfp_abm_red_destroy(netdev, alink, opt->handle);
+ nfp_abm_qdisc_destroy(netdev, alink, opt->handle);
return 0;
case TC_RED_STATS:
- return nfp_abm_red_stats(alink, opt);
+ return nfp_abm_red_stats(alink, opt->handle, &opt->stats);
case TC_RED_XSTATS:
return nfp_abm_red_xstats(alink, opt);
+ case TC_RED_GRAFT:
+ return nfp_abm_qdisc_graft(alink, opt->handle,
+ opt->child_handle, 0);
default:
return -EOPNOTSUPP;
}
}
static int
-nfp_abm_mq_stats(struct nfp_abm_link *alink, struct tc_mq_qopt_offload *opt)
+nfp_abm_mq_create(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_mq_qopt_offload *opt)
{
- struct nfp_alink_stats stats;
+ struct nfp_qdisc *qdisc;
+ int ret;
+
+ ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_MQ,
+ TC_H_ROOT, opt->handle, alink->total_queues,
+ &qdisc);
+ if (ret < 0)
+ return ret;
+
+ qdisc->params_ok = true;
+ qdisc->offloaded = true;
+ nfp_abm_qdisc_offload_update(alink);
+ return 0;
+}
+
+static int
+nfp_abm_mq_stats(struct nfp_abm_link *alink, u32 handle,
+ struct tc_qopt_offload_stats *stats)
+{
+ struct nfp_qdisc *qdisc, *red;
unsigned int i;
- int err;
- for (i = 0; i < alink->num_qdiscs; i++) {
- if (alink->qdiscs[i].handle == TC_H_UNSPEC)
+ qdisc = nfp_abm_qdisc_find(alink, handle);
+ if (!qdisc)
+ return -EOPNOTSUPP;
+
+ nfp_abm_stats_update(alink);
+
+ /* MQ stats are summed over the children in the core, so we need
+ * to add up the unreported child values.
+ */
+ memset(&qdisc->mq.stats, 0, sizeof(qdisc->mq.stats));
+ memset(&qdisc->mq.prev_stats, 0, sizeof(qdisc->mq.prev_stats));
+
+ for (i = 0; i < qdisc->num_children; i++) {
+ if (!nfp_abm_qdisc_child_valid(qdisc, i))
continue;
- err = nfp_abm_ctrl_read_q_stats(alink, i, &stats);
- if (err)
- return err;
+ if (!nfp_abm_qdisc_is_red(qdisc->children[i]))
+ continue;
+ red = qdisc->children[i];
- nfp_abm_update_stats(&stats, &alink->qdiscs[i].stats,
- &opt->stats);
+ nfp_abm_stats_propagate(&qdisc->mq.stats,
+ &red->red.stats);
+ nfp_abm_stats_propagate(&qdisc->mq.prev_stats,
+ &red->red.prev_stats);
}
- return 0;
+ nfp_abm_stats_calculate(&qdisc->mq.stats, &qdisc->mq.prev_stats,
+ stats->bstats, stats->qstats);
+
+ return qdisc->offloaded ? 0 : -EOPNOTSUPP;
}
int nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink,
@@ -286,16 +631,33 @@ int nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink,
{
switch (opt->command) {
case TC_MQ_CREATE:
- nfp_abm_reset_root(netdev, alink, opt->handle,
- alink->total_queues);
- return 0;
+ return nfp_abm_mq_create(netdev, alink, opt);
case TC_MQ_DESTROY:
- if (opt->handle == alink->parent)
- nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 0);
+ nfp_abm_qdisc_destroy(netdev, alink, opt->handle);
return 0;
case TC_MQ_STATS:
- return nfp_abm_mq_stats(alink, opt);
+ return nfp_abm_mq_stats(alink, opt->handle, &opt->stats);
+ case TC_MQ_GRAFT:
+ return nfp_abm_qdisc_graft(alink, opt->handle,
+ opt->graft_params.child_handle,
+ opt->graft_params.queue);
default:
return -EOPNOTSUPP;
}
}
+
+int nfp_abm_setup_root(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_root_qopt_offload *opt)
+{
+ if (opt->ingress)
+ return -EOPNOTSUPP;
+ if (alink->root_qdisc)
+ alink->root_qdisc->use_cnt--;
+ alink->root_qdisc = nfp_abm_qdisc_find(alink, opt->handle);
+ if (alink->root_qdisc)
+ alink->root_qdisc->use_cnt++;
+
+ nfp_abm_qdisc_offload_update(alink);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 9434fd5a5477..e4aa030f1726 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -283,7 +283,7 @@ struct cpsw_ss_regs {
#define CTRL_V2_TS_BITS \
(TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
- TS_TTL_NONZERO | TS_ANNEX_D_EN | TS_LTYPE1_EN)
+ TS_TTL_NONZERO | TS_ANNEX_D_EN | TS_LTYPE1_EN | VLAN_LTYPE1_EN)
#define CTRL_V2_ALL_TS_MASK (CTRL_V2_TS_BITS | TS_TX_EN | TS_RX_EN)
#define CTRL_V2_TX_TS_BITS (CTRL_V2_TS_BITS | TS_TX_EN)
@@ -293,7 +293,7 @@ struct cpsw_ss_regs {
#define CTRL_V3_TS_BITS \
(TS_107 | TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\
- TS_LTYPE1_EN)
+ TS_LTYPE1_EN | VLAN_LTYPE1_EN)
#define CTRL_V3_ALL_TS_MASK (CTRL_V3_TS_BITS | TS_TX_EN | TS_RX_EN)
#define CTRL_V3_TX_TS_BITS (CTRL_V3_TS_BITS | TS_TX_EN)
@@ -466,6 +466,8 @@ struct cpsw_priv {
bool mqprio_hw;
int fifo_bw[CPSW_TC_NUM];
int shp_cfg_speed;
+ int tx_ts_enabled;
+ int rx_ts_enabled;
u32 emac_port;
struct cpsw_common *cpsw;
};
@@ -905,6 +907,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
struct net_device *ndev = skb->dev;
int ret = 0, port;
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ struct cpsw_priv *priv;
if (cpsw->data.dual_emac) {
port = CPDMA_RX_SOURCE_PORT(status);
@@ -939,7 +942,9 @@ static void cpsw_rx_handler(void *token, int len, int status)
skb_put(skb, len);
if (status & CPDMA_RX_VLAN_ENCAP)
cpsw_rx_vlan_encap(skb);
- cpts_rx_timestamp(cpsw->cpts, skb);
+ priv = netdev_priv(ndev);
+ if (priv->rx_ts_enabled)
+ cpts_rx_timestamp(cpsw->cpts, skb);
skb->protocol = eth_type_trans(skb, ndev);
netif_receive_skb(skb);
ndev->stats.rx_bytes += len;
@@ -2126,7 +2131,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
}
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
- cpts_is_tx_enabled(cpts) && cpts_can_timestamp(cpts, skb))
+ priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb))
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
q_idx = skb_get_queue_mapping(skb);
@@ -2170,13 +2175,13 @@ fail:
#if IS_ENABLED(CONFIG_TI_CPTS)
-static void cpsw_hwtstamp_v1(struct cpsw_common *cpsw)
+static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
{
+ struct cpsw_common *cpsw = priv->cpsw;
struct cpsw_slave *slave = &cpsw->slaves[cpsw->data.active_slave];
u32 ts_en, seq_id;
- if (!cpts_is_tx_enabled(cpsw->cpts) &&
- !cpts_is_rx_enabled(cpsw->cpts)) {
+ if (!priv->tx_ts_enabled && !priv->rx_ts_enabled) {
slave_write(slave, 0, CPSW1_TS_CTL);
return;
}
@@ -2184,10 +2189,10 @@ static void cpsw_hwtstamp_v1(struct cpsw_common *cpsw)
seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
- if (cpts_is_tx_enabled(cpsw->cpts))
+ if (priv->tx_ts_enabled)
ts_en |= CPSW_V1_TS_TX_EN;
- if (cpts_is_rx_enabled(cpsw->cpts))
+ if (priv->rx_ts_enabled)
ts_en |= CPSW_V1_TS_RX_EN;
slave_write(slave, ts_en, CPSW1_TS_CTL);
@@ -2207,20 +2212,20 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
case CPSW_VERSION_2:
ctrl &= ~CTRL_V2_ALL_TS_MASK;
- if (cpts_is_tx_enabled(cpsw->cpts))
+ if (priv->tx_ts_enabled)
ctrl |= CTRL_V2_TX_TS_BITS;
- if (cpts_is_rx_enabled(cpsw->cpts))
+ if (priv->rx_ts_enabled)
ctrl |= CTRL_V2_RX_TS_BITS;
break;
case CPSW_VERSION_3:
default:
ctrl &= ~CTRL_V3_ALL_TS_MASK;
- if (cpts_is_tx_enabled(cpsw->cpts))
+ if (priv->tx_ts_enabled)
ctrl |= CTRL_V3_TX_TS_BITS;
- if (cpts_is_rx_enabled(cpsw->cpts))
+ if (priv->rx_ts_enabled)
ctrl |= CTRL_V3_RX_TS_BITS;
break;
}
@@ -2230,6 +2235,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
slave_write(slave, ctrl, CPSW2_CONTROL);
writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype);
+ writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype);
}
static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
@@ -2237,7 +2243,6 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
struct cpsw_priv *priv = netdev_priv(dev);
struct hwtstamp_config cfg;
struct cpsw_common *cpsw = priv->cpsw;
- struct cpts *cpts = cpsw->cpts;
if (cpsw->version != CPSW_VERSION_1 &&
cpsw->version != CPSW_VERSION_2 &&
@@ -2256,7 +2261,7 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
switch (cfg.rx_filter) {
case HWTSTAMP_FILTER_NONE:
- cpts_rx_enable(cpts, 0);
+ priv->rx_ts_enabled = 0;
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_NTP_ALL:
@@ -2264,7 +2269,7 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V1_L4_EVENT);
+ priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
@@ -2276,18 +2281,18 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V2_EVENT);
+ priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
default:
return -ERANGE;
}
- cpts_tx_enable(cpts, cfg.tx_type == HWTSTAMP_TX_ON);
+ priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON;
switch (cpsw->version) {
case CPSW_VERSION_1:
- cpsw_hwtstamp_v1(cpsw);
+ cpsw_hwtstamp_v1(priv);
break;
case CPSW_VERSION_2:
case CPSW_VERSION_3:
@@ -2303,7 +2308,7 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
{
struct cpsw_common *cpsw = ndev_to_cpsw(dev);
- struct cpts *cpts = cpsw->cpts;
+ struct cpsw_priv *priv = netdev_priv(dev);
struct hwtstamp_config cfg;
if (cpsw->version != CPSW_VERSION_1 &&
@@ -2312,10 +2317,8 @@ static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
return -EOPNOTSUPP;
cfg.flags = 0;
- cfg.tx_type = cpts_is_tx_enabled(cpts) ?
- HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
- cfg.rx_filter = (cpts_is_rx_enabled(cpts) ?
- cpts->rx_enable : HWTSTAMP_FILTER_NONE);
+ cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ cfg.rx_filter = priv->rx_ts_enabled;
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index b96b93c686bf..054f78295d1d 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -86,6 +86,25 @@ static int cpts_purge_events(struct cpts *cpts)
return removed ? 0 : -1;
}
+static void cpts_purge_txq(struct cpts *cpts)
+{
+ struct cpts_skb_cb_data *skb_cb;
+ struct sk_buff *skb, *tmp;
+ int removed = 0;
+
+ skb_queue_walk_safe(&cpts->txq, skb, tmp) {
+ skb_cb = (struct cpts_skb_cb_data *)skb->cb;
+ if (time_after(jiffies, skb_cb->tmo)) {
+ __skb_unlink(skb, &cpts->txq);
+ dev_consume_skb_any(skb);
+ ++removed;
+ }
+ }
+
+ if (removed)
+ dev_dbg(cpts->dev, "txq cleaned up %d\n", removed);
+}
+
static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
{
struct sk_buff *skb, *tmp;
@@ -119,9 +138,7 @@ static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
if (time_after(jiffies, skb_cb->tmo)) {
/* timeout any expired skbs over 1s */
- dev_dbg(cpts->dev,
- "expiring tx timestamp mtype %u seqid %04x\n",
- mtype, seqid);
+ dev_dbg(cpts->dev, "expiring tx timestamp from txq\n");
__skb_unlink(skb, &cpts->txq);
dev_consume_skb_any(skb);
}
@@ -294,8 +311,11 @@ static long cpts_overflow_check(struct ptp_clock_info *ptp)
spin_lock_irqsave(&cpts->lock, flags);
ts = ns_to_timespec64(timecounter_read(&cpts->tc));
- if (!skb_queue_empty(&cpts->txq))
- delay = CPTS_SKB_TX_WORK_TIMEOUT;
+ if (!skb_queue_empty(&cpts->txq)) {
+ cpts_purge_txq(cpts);
+ if (!skb_queue_empty(&cpts->txq))
+ delay = CPTS_SKB_TX_WORK_TIMEOUT;
+ }
spin_unlock_irqrestore(&cpts->lock, flags);
pr_debug("cpts overflow check at %lld.%09ld\n",
@@ -410,8 +430,6 @@ void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
u64 ns;
struct skb_shared_hwtstamps *ssh;
- if (!cpts->rx_enable)
- return;
ns = cpts_find_ts(cpts, skb, CPTS_EV_RX);
if (!ns)
return;
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
index 73d73faf0f38..d2c7decd59b6 100644
--- a/drivers/net/ethernet/ti/cpts.h
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -136,26 +136,6 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
struct device_node *node);
void cpts_release(struct cpts *cpts);
-static inline void cpts_rx_enable(struct cpts *cpts, int enable)
-{
- cpts->rx_enable = enable;
-}
-
-static inline bool cpts_is_rx_enabled(struct cpts *cpts)
-{
- return !!cpts->rx_enable;
-}
-
-static inline void cpts_tx_enable(struct cpts *cpts, int enable)
-{
- cpts->tx_enable = enable;
-}
-
-static inline bool cpts_is_tx_enabled(struct cpts *cpts)
-{
- return !!cpts->tx_enable;
-}
-
static inline bool cpts_can_timestamp(struct cpts *cpts, struct sk_buff *skb)
{
unsigned int class = ptp_classify_raw(skb);
@@ -197,24 +177,6 @@ static inline void cpts_unregister(struct cpts *cpts)
{
}
-static inline void cpts_rx_enable(struct cpts *cpts, int enable)
-{
-}
-
-static inline bool cpts_is_rx_enabled(struct cpts *cpts)
-{
- return false;
-}
-
-static inline void cpts_tx_enable(struct cpts *cpts, int enable)
-{
-}
-
-static inline bool cpts_is_tx_enabled(struct cpts *cpts)
-{
- return false;
-}
-
static inline bool cpts_can_timestamp(struct cpts *cpts, struct sk_buff *skb)
{
return false;
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 0397ccb6597e..20d81e0b1c29 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -763,6 +763,8 @@ struct gbe_priv {
int cpts_registered;
struct cpts *cpts;
+ int rx_ts_enabled;
+ int tx_ts_enabled;
};
struct gbe_intf {
@@ -2564,7 +2566,7 @@ static int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
if (!(skb_shinfo(p_info->skb)->tx_flags & SKBTX_HW_TSTAMP) ||
- !cpts_is_tx_enabled(gbe_dev->cpts))
+ !gbe_dev->tx_ts_enabled)
return 0;
/* If phy has the txtstamp api, assume it will do it.
@@ -2598,7 +2600,9 @@ static int gbe_rxtstamp(struct gbe_intf *gbe_intf, struct netcp_packet *p_info)
return 0;
}
- cpts_rx_timestamp(gbe_dev->cpts, p_info->skb);
+ if (gbe_dev->rx_ts_enabled)
+ cpts_rx_timestamp(gbe_dev->cpts, p_info->skb);
+
p_info->rxtstamp_complete = true;
return 0;
@@ -2614,10 +2618,8 @@ static int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *ifr)
return -EOPNOTSUPP;
cfg.flags = 0;
- cfg.tx_type = cpts_is_tx_enabled(cpts) ?
- HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
- cfg.rx_filter = (cpts_is_rx_enabled(cpts) ?
- cpts->rx_enable : HWTSTAMP_FILTER_NONE);
+ cfg.tx_type = gbe_dev->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ cfg.rx_filter = gbe_dev->rx_ts_enabled;
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
@@ -2628,8 +2630,8 @@ static void gbe_hwtstamp(struct gbe_intf *gbe_intf)
struct gbe_slave *slave = gbe_intf->slave;
u32 ts_en, seq_id, ctl;
- if (!cpts_is_rx_enabled(gbe_dev->cpts) &&
- !cpts_is_tx_enabled(gbe_dev->cpts)) {
+ if (!gbe_dev->rx_ts_enabled &&
+ !gbe_dev->tx_ts_enabled) {
writel(0, GBE_REG_ADDR(slave, port_regs, ts_ctl));
return;
}
@@ -2641,10 +2643,10 @@ static void gbe_hwtstamp(struct gbe_intf *gbe_intf)
(slave->ts_ctl.uni ? TS_UNI_EN :
slave->ts_ctl.maddr_map << TS_CTL_MADDR_SHIFT);
- if (cpts_is_tx_enabled(gbe_dev->cpts))
+ if (gbe_dev->tx_ts_enabled)
ts_en |= (TS_TX_ANX_ALL_EN | TS_TX_VLAN_LT1_EN);
- if (cpts_is_rx_enabled(gbe_dev->cpts))
+ if (gbe_dev->rx_ts_enabled)
ts_en |= (TS_RX_ANX_ALL_EN | TS_RX_VLAN_LT1_EN);
writel(ts_en, GBE_REG_ADDR(slave, port_regs, ts_ctl));
@@ -2670,10 +2672,10 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
switch (cfg.tx_type) {
case HWTSTAMP_TX_OFF:
- cpts_tx_enable(cpts, 0);
+ gbe_dev->tx_ts_enabled = 0;
break;
case HWTSTAMP_TX_ON:
- cpts_tx_enable(cpts, 1);
+ gbe_dev->tx_ts_enabled = 1;
break;
default:
return -ERANGE;
@@ -2681,12 +2683,12 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
switch (cfg.rx_filter) {
case HWTSTAMP_FILTER_NONE:
- cpts_rx_enable(cpts, 0);
+ gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_NONE;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V1_L4_EVENT);
+ gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
@@ -2698,7 +2700,7 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V2_EVENT);
+ gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
default:
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 21ce68964204..ad87bd3280d7 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -42,8 +42,8 @@ MODULE_LICENSE("GPL");
#define IP1001_APS_ON 11 /* IP1001 APS Mode bit */
#define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */
#define IP101A_G_IRQ_CONF_STATUS 0x11 /* Conf Info IRQ & Status Reg */
-#define IP101A_G_IRQ_PIN_USED (1<<15) /* INTR pin used */
-#define IP101A_G_IRQ_DEFAULT IP101A_G_IRQ_PIN_USED
+#define IP101A_G_IRQ_PIN_USED BIT(15) /* INTR pin used */
+#define IP101A_G_NO_IRQ BIT(11) /* IRQ's inactive */
static int ip175c_config_init(struct phy_device *phydev)
{
@@ -170,11 +170,6 @@ static int ip101a_g_config_init(struct phy_device *phydev)
if (c < 0)
return c;
- /* INTR pin used: speed/link/duplex will cause an interrupt */
- c = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, IP101A_G_IRQ_DEFAULT);
- if (c < 0)
- return c;
-
/* Enable Auto Power Saving mode */
c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS);
c |= IP101A_G_APS_ON;
@@ -201,6 +196,19 @@ static int ip175c_config_aneg(struct phy_device *phydev)
return 0;
}
+static int ip101a_g_config_intr(struct phy_device *phydev)
+{
+ u16 val;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ /* INTR pin used: Speed/link/duplex will cause an interrupt */
+ val = IP101A_G_IRQ_PIN_USED;
+ else
+ val = IP101A_G_NO_IRQ;
+
+ return phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, val);
+}
+
static int ip101a_g_ack_interrupt(struct phy_device *phydev)
{
int err = phy_read(phydev, IP101A_G_IRQ_CONF_STATUS);
@@ -234,6 +242,7 @@ static struct phy_driver icplus_driver[] = {
.name = "ICPlus IP101A/G",
.phy_id_mask = 0x0ffffff0,
.features = PHY_BASIC_FEATURES,
+ .config_intr = ip101a_g_config_intr,
.ack_interrupt = ip101a_g_ack_interrupt,
.config_init = &ip101a_g_config_init,
.suspend = genphy_suspend,
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 487fa5e0e165..97b4233120e4 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -845,6 +845,7 @@ enum tc_setup_type {
TC_SETUP_QDISC_PRIO,
TC_SETUP_QDISC_MQ,
TC_SETUP_QDISC_ETF,
+ TC_SETUP_ROOT_QDISC,
};
/* These structures hold the attributes of bpf state that are being passed
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index f6c0cd29dea4..c497ada7f591 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -821,12 +821,21 @@ enum tc_mq_command {
TC_MQ_CREATE,
TC_MQ_DESTROY,
TC_MQ_STATS,
+ TC_MQ_GRAFT,
+};
+
+struct tc_mq_opt_offload_graft_params {
+ unsigned long queue;
+ u32 child_handle;
};
struct tc_mq_qopt_offload {
enum tc_mq_command command;
u32 handle;
- struct tc_qopt_offload_stats stats;
+ union {
+ struct tc_qopt_offload_stats stats;
+ struct tc_mq_opt_offload_graft_params graft_params;
+ };
};
enum tc_red_command {
@@ -834,12 +843,14 @@ enum tc_red_command {
TC_RED_DESTROY,
TC_RED_STATS,
TC_RED_XSTATS,
+ TC_RED_GRAFT,
};
struct tc_red_qopt_offload_params {
u32 min;
u32 max;
u32 probability;
+ u32 limit;
bool is_ecn;
bool is_harddrop;
struct gnet_stats_queue *qstats;
@@ -853,6 +864,7 @@ struct tc_red_qopt_offload {
struct tc_red_qopt_offload_params set;
struct tc_qopt_offload_stats stats;
struct red_stats *xstats;
+ u32 child_handle;
};
};
@@ -889,4 +901,14 @@ struct tc_prio_qopt_offload {
};
};
+enum tc_root_command {
+ TC_ROOT_GRAFT,
+};
+
+struct tc_root_qopt_offload {
+ enum tc_root_command command;
+ u32 handle;
+ bool ingress;
+};
+
#endif
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index f55bc50cd0a9..9c88cec7e8a2 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -860,6 +860,21 @@ void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
}
EXPORT_SYMBOL(qdisc_offload_graft_helper);
+static void qdisc_offload_graft_root(struct net_device *dev,
+ struct Qdisc *new, struct Qdisc *old,
+ struct netlink_ext_ack *extack)
+{
+ struct tc_root_qopt_offload graft_offload = {
+ .command = TC_ROOT_GRAFT,
+ .handle = new ? new->handle : 0,
+ .ingress = (new && new->flags & TCQ_F_INGRESS) ||
+ (old && old->flags & TCQ_F_INGRESS),
+ };
+
+ qdisc_offload_graft_helper(dev, NULL, new, old,
+ TC_SETUP_ROOT_QDISC, &graft_offload, extack);
+}
+
static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
u32 portid, u32 seq, u16 flags, int event)
{
@@ -1026,6 +1041,8 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
if (dev->flags & IFF_UP)
dev_deactivate(dev);
+ qdisc_offload_graft_root(dev, new, old, extack);
+
if (new && new->ops->attach)
goto skip;
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index 1db5c1bf6ddd..203659bc3906 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -193,6 +193,7 @@ static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
struct Qdisc **old, struct netlink_ext_ack *extack)
{
struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
+ struct tc_mq_qopt_offload graft_offload;
struct net_device *dev = qdisc_dev(sch);
if (dev->flags & IFF_UP)
@@ -203,6 +204,14 @@ static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
if (dev->flags & IFF_UP)
dev_activate(dev);
+
+ graft_offload.handle = sch->handle;
+ graft_offload.graft_params.queue = cl - 1;
+ graft_offload.graft_params.child_handle = new ? new->handle : 0;
+ graft_offload.command = TC_MQ_GRAFT;
+
+ qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, *old,
+ TC_SETUP_QDISC_MQ, &graft_offload, extack);
return 0;
}
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index a1d08bdd9357..9df9942340ea 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -166,6 +166,7 @@ static int red_offload(struct Qdisc *sch, bool enable)
opt.set.min = q->parms.qth_min >> q->parms.Wlog;
opt.set.max = q->parms.qth_max >> q->parms.Wlog;
opt.set.probability = q->parms.max_P;
+ opt.set.limit = q->limit;
opt.set.is_ecn = red_use_ecn(q);
opt.set.is_harddrop = red_use_harddrop(q);
opt.set.qstats = &sch->qstats;
@@ -367,6 +368,21 @@ static int red_dump_class(struct Qdisc *sch, unsigned long cl,
return 0;
}
+static void red_graft_offload(struct Qdisc *sch,
+ struct Qdisc *new, struct Qdisc *old,
+ struct netlink_ext_ack *extack)
+{
+ struct tc_red_qopt_offload graft_offload = {
+ .handle = sch->handle,
+ .parent = sch->parent,
+ .child_handle = new->handle,
+ .command = TC_RED_GRAFT,
+ };
+
+ qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, old,
+ TC_SETUP_QDISC_RED, &graft_offload, extack);
+}
+
static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
struct Qdisc **old, struct netlink_ext_ack *extack)
{
@@ -376,6 +392,8 @@ static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
new = &noop_qdisc;
*old = qdisc_replace(sch, new, &q->qdisc);
+
+ red_graft_offload(sch, new, *old, extack);
return 0;
}