aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/igc/igc_ethtool.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/igc/igc_ethtool.c')
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c839
1 files changed, 835 insertions, 4 deletions
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index eff37a6c0afa..ac98f1d96892 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -2,10 +2,120 @@
/* Copyright (c) 2018 Intel Corporation */
/* ethtool support for igc */
+#include <linux/if_vlan.h>
#include <linux/pm_runtime.h>
#include "igc.h"
+/* forward declaration */
+struct igc_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define IGC_STAT(_name, _stat) { \
+ .stat_string = _name, \
+ .sizeof_stat = FIELD_SIZEOF(struct igc_adapter, _stat), \
+ .stat_offset = offsetof(struct igc_adapter, _stat) \
+}
+
+static const struct igc_stats igc_gstrings_stats[] = {
+ IGC_STAT("rx_packets", stats.gprc),
+ IGC_STAT("tx_packets", stats.gptc),
+ IGC_STAT("rx_bytes", stats.gorc),
+ IGC_STAT("tx_bytes", stats.gotc),
+ IGC_STAT("rx_broadcast", stats.bprc),
+ IGC_STAT("tx_broadcast", stats.bptc),
+ IGC_STAT("rx_multicast", stats.mprc),
+ IGC_STAT("tx_multicast", stats.mptc),
+ IGC_STAT("multicast", stats.mprc),
+ IGC_STAT("collisions", stats.colc),
+ IGC_STAT("rx_crc_errors", stats.crcerrs),
+ IGC_STAT("rx_no_buffer_count", stats.rnbc),
+ IGC_STAT("rx_missed_errors", stats.mpc),
+ IGC_STAT("tx_aborted_errors", stats.ecol),
+ IGC_STAT("tx_carrier_errors", stats.tncrs),
+ IGC_STAT("tx_window_errors", stats.latecol),
+ IGC_STAT("tx_abort_late_coll", stats.latecol),
+ IGC_STAT("tx_deferred_ok", stats.dc),
+ IGC_STAT("tx_single_coll_ok", stats.scc),
+ IGC_STAT("tx_multi_coll_ok", stats.mcc),
+ IGC_STAT("tx_timeout_count", tx_timeout_count),
+ IGC_STAT("rx_long_length_errors", stats.roc),
+ IGC_STAT("rx_short_length_errors", stats.ruc),
+ IGC_STAT("rx_align_errors", stats.algnerrc),
+ IGC_STAT("tx_tcp_seg_good", stats.tsctc),
+ IGC_STAT("tx_tcp_seg_failed", stats.tsctfc),
+ IGC_STAT("rx_flow_control_xon", stats.xonrxc),
+ IGC_STAT("rx_flow_control_xoff", stats.xoffrxc),
+ IGC_STAT("tx_flow_control_xon", stats.xontxc),
+ IGC_STAT("tx_flow_control_xoff", stats.xofftxc),
+ IGC_STAT("rx_long_byte_count", stats.gorc),
+ IGC_STAT("tx_dma_out_of_sync", stats.doosync),
+ IGC_STAT("tx_smbus", stats.mgptc),
+ IGC_STAT("rx_smbus", stats.mgprc),
+ IGC_STAT("dropped_smbus", stats.mgpdc),
+ IGC_STAT("os2bmc_rx_by_bmc", stats.o2bgptc),
+ IGC_STAT("os2bmc_tx_by_bmc", stats.b2ospc),
+ IGC_STAT("os2bmc_tx_by_host", stats.o2bspc),
+ IGC_STAT("os2bmc_rx_by_host", stats.b2ogprc),
+ IGC_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
+ IGC_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped),
+ IGC_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
+};
+
+#define IGC_NETDEV_STAT(_net_stat) { \
+ .stat_string = __stringify(_net_stat), \
+ .sizeof_stat = FIELD_SIZEOF(struct rtnl_link_stats64, _net_stat), \
+ .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \
+}
+
+static const struct igc_stats igc_gstrings_net_stats[] = {
+ IGC_NETDEV_STAT(rx_errors),
+ IGC_NETDEV_STAT(tx_errors),
+ IGC_NETDEV_STAT(tx_dropped),
+ IGC_NETDEV_STAT(rx_length_errors),
+ IGC_NETDEV_STAT(rx_over_errors),
+ IGC_NETDEV_STAT(rx_frame_errors),
+ IGC_NETDEV_STAT(rx_fifo_errors),
+ IGC_NETDEV_STAT(tx_fifo_errors),
+ IGC_NETDEV_STAT(tx_heartbeat_errors)
+};
+
+enum igc_diagnostics_results {
+ TEST_REG = 0,
+ TEST_EEP,
+ TEST_IRQ,
+ TEST_LOOP,
+ TEST_LINK
+};
+
+static const char igc_gstrings_test[][ETH_GSTRING_LEN] = {
+ [TEST_REG] = "Register test (offline)",
+ [TEST_EEP] = "Eeprom test (offline)",
+ [TEST_IRQ] = "Interrupt test (offline)",
+ [TEST_LOOP] = "Loopback test (offline)",
+ [TEST_LINK] = "Link test (on/offline)"
+};
+
+#define IGC_TEST_LEN (sizeof(igc_gstrings_test) / ETH_GSTRING_LEN)
+
+#define IGC_GLOBAL_STATS_LEN \
+ (sizeof(igc_gstrings_stats) / sizeof(struct igc_stats))
+#define IGC_NETDEV_STATS_LEN \
+ (sizeof(igc_gstrings_net_stats) / sizeof(struct igc_stats))
+#define IGC_RX_QUEUE_STATS_LEN \
+ (sizeof(struct igc_rx_queue_stats) / sizeof(u64))
+#define IGC_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */
+#define IGC_QUEUE_STATS_LEN \
+ ((((struct igc_adapter *)netdev_priv(netdev))->num_rx_queues * \
+ IGC_RX_QUEUE_STATS_LEN) + \
+ (((struct igc_adapter *)netdev_priv(netdev))->num_tx_queues * \
+ IGC_TX_QUEUE_STATS_LEN))
+#define IGC_STATS_LEN \
+ (IGC_GLOBAL_STATS_LEN + IGC_NETDEV_STATS_LEN + IGC_QUEUE_STATS_LEN)
+
static const char igc_priv_flags_strings[][ETH_GSTRING_LEN] = {
#define IGC_PRIV_FLAGS_LEGACY_RX BIT(0)
"legacy-rx",
@@ -545,6 +655,127 @@ static int igc_set_pauseparam(struct net_device *netdev,
return retval;
}
+static void igc_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *igc_gstrings_test,
+ IGC_TEST_LEN * ETH_GSTRING_LEN);
+ break;
+ case ETH_SS_STATS:
+ for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, igc_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < IGC_NETDEV_STATS_LEN; i++) {
+ memcpy(p, igc_gstrings_net_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ sprintf(p, "tx_queue_%u_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_queue_%u_bytes", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_queue_%u_restart", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ sprintf(p, "rx_queue_%u_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_bytes", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_drops", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_csum_err", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_alloc_failed", i);
+ p += ETH_GSTRING_LEN;
+ }
+ /* BUG_ON(p - data != IGC_STATS_LEN * ETH_GSTRING_LEN); */
+ break;
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(data, igc_priv_flags_strings,
+ IGC_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
+ break;
+ }
+}
+
+static int igc_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return IGC_STATS_LEN;
+ case ETH_SS_TEST:
+ return IGC_TEST_LEN;
+ case ETH_SS_PRIV_FLAGS:
+ return IGC_PRIV_FLAGS_STR_LEN;
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static void igc_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct rtnl_link_stats64 *net_stats = &adapter->stats64;
+ unsigned int start;
+ struct igc_ring *ring;
+ int i, j;
+ char *p;
+
+ spin_lock(&adapter->stats64_lock);
+ igc_update_stats(adapter);
+
+ for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) {
+ p = (char *)adapter + igc_gstrings_stats[i].stat_offset;
+ data[i] = (igc_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+ for (j = 0; j < IGC_NETDEV_STATS_LEN; j++, i++) {
+ p = (char *)net_stats + igc_gstrings_net_stats[j].stat_offset;
+ data[i] = (igc_gstrings_net_stats[j].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+ for (j = 0; j < adapter->num_tx_queues; j++) {
+ u64 restart2;
+
+ ring = adapter->tx_ring[j];
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
+ data[i] = ring->tx_stats.packets;
+ data[i + 1] = ring->tx_stats.bytes;
+ data[i + 2] = ring->tx_stats.restart_queue;
+ } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->tx_syncp2);
+ restart2 = ring->tx_stats.restart_queue2;
+ } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start));
+ data[i + 2] += restart2;
+
+ i += IGC_TX_QUEUE_STATS_LEN;
+ }
+ for (j = 0; j < adapter->num_rx_queues; j++) {
+ ring = adapter->rx_ring[j];
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
+ data[i] = ring->rx_stats.packets;
+ data[i + 1] = ring->rx_stats.bytes;
+ data[i + 2] = ring->rx_stats.drops;
+ data[i + 3] = ring->rx_stats.csum_err;
+ data[i + 4] = ring->rx_stats.alloc_failed;
+ } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
+ i += IGC_RX_QUEUE_STATS_LEN;
+ }
+ spin_unlock(&adapter->stats64_lock);
+}
+
static int igc_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
@@ -643,6 +874,605 @@ static int igc_set_coalesce(struct net_device *netdev,
return 0;
}
+#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
+static int igc_get_ethtool_nfc_entry(struct igc_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp = &cmd->fs;
+ struct igc_nfc_filter *rule = NULL;
+
+ /* report total rule count */
+ cmd->data = IGC_MAX_RXNFC_FILTERS;
+
+ hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) {
+ if (fsp->location <= rule->sw_idx)
+ break;
+ }
+
+ if (!rule || fsp->location != rule->sw_idx)
+ return -EINVAL;
+
+ if (rule->filter.match_flags) {
+ fsp->flow_type = ETHER_FLOW;
+ fsp->ring_cookie = rule->action;
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
+ fsp->h_u.ether_spec.h_proto = rule->filter.etype;
+ fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK;
+ }
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
+ fsp->flow_type |= FLOW_EXT;
+ fsp->h_ext.vlan_tci = rule->filter.vlan_tci;
+ fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK);
+ }
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) {
+ ether_addr_copy(fsp->h_u.ether_spec.h_dest,
+ rule->filter.dst_addr);
+ /* As we only support matching by the full
+ * mask, return the mask to userspace
+ */
+ eth_broadcast_addr(fsp->m_u.ether_spec.h_dest);
+ }
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) {
+ ether_addr_copy(fsp->h_u.ether_spec.h_source,
+ rule->filter.src_addr);
+ /* As we only support matching by the full
+ * mask, return the mask to userspace
+ */
+ eth_broadcast_addr(fsp->m_u.ether_spec.h_source);
+ }
+
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int igc_get_ethtool_nfc_all(struct igc_adapter *adapter,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct igc_nfc_filter *rule;
+ int cnt = 0;
+
+ /* report total rule count */
+ cmd->data = IGC_MAX_RXNFC_FILTERS;
+
+ hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) {
+ if (cnt == cmd->rule_cnt)
+ return -EMSGSIZE;
+ rule_locs[cnt] = rule->sw_idx;
+ cnt++;
+ }
+
+ cmd->rule_cnt = cnt;
+
+ return 0;
+}
+
+static int igc_get_rss_hash_opts(struct igc_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ cmd->data = 0;
+
+ /* Report default options for RSS on igc */
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
+ case UDP_V4_FLOW:
+ if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
+ case SCTP_V4_FLOW:
+ /* Fall through */
+ case AH_ESP_V4_FLOW:
+ /* Fall through */
+ case AH_V4_FLOW:
+ /* Fall through */
+ case ESP_V4_FLOW:
+ /* Fall through */
+ case IPV4_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V6_FLOW:
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
+ case UDP_V6_FLOW:
+ if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
+ case SCTP_V6_FLOW:
+ /* Fall through */
+ case AH_ESP_V6_FLOW:
+ /* Fall through */
+ case AH_V6_FLOW:
+ /* Fall through */
+ case ESP_V6_FLOW:
+ /* Fall through */
+ case IPV6_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int igc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct igc_adapter *adapter = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = adapter->num_rx_queues;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = adapter->nfc_filter_count;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = igc_get_ethtool_nfc_entry(adapter, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = igc_get_ethtool_nfc_all(adapter, cmd, rule_locs);
+ break;
+ case ETHTOOL_GRXFH:
+ ret = igc_get_rss_hash_opts(adapter, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+#define UDP_RSS_FLAGS (IGC_FLAG_RSS_FIELD_IPV4_UDP | \
+ IGC_FLAG_RSS_FIELD_IPV6_UDP)
+static int igc_set_rss_hash_opt(struct igc_adapter *adapter,
+ struct ethtool_rxnfc *nfc)
+{
+ u32 flags = adapter->flags;
+
+ /* RSS does not support anything other than hashing
+ * to queues on src and dst IPs and ports
+ */
+ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST) ||
+ !(nfc->data & RXH_L4_B_0_1) ||
+ !(nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ break;
+ case UDP_V4_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ flags &= ~IGC_FLAG_RSS_FIELD_IPV4_UDP;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ flags |= IGC_FLAG_RSS_FIELD_IPV4_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case UDP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ flags &= ~IGC_FLAG_RSS_FIELD_IPV6_UDP;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ flags |= IGC_FLAG_RSS_FIELD_IPV6_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST) ||
+ (nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* if we changed something we need to update flags */
+ if (flags != adapter->flags) {
+ struct igc_hw *hw = &adapter->hw;
+ u32 mrqc = rd32(IGC_MRQC);
+
+ if ((flags & UDP_RSS_FLAGS) &&
+ !(adapter->flags & UDP_RSS_FLAGS))
+ dev_err(&adapter->pdev->dev,
+ "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
+
+ adapter->flags = flags;
+
+ /* Perform hash on these packet types */
+ mrqc |= IGC_MRQC_RSS_FIELD_IPV4 |
+ IGC_MRQC_RSS_FIELD_IPV4_TCP |
+ IGC_MRQC_RSS_FIELD_IPV6 |
+ IGC_MRQC_RSS_FIELD_IPV6_TCP;
+
+ mrqc &= ~(IGC_MRQC_RSS_FIELD_IPV4_UDP |
+ IGC_MRQC_RSS_FIELD_IPV6_UDP);
+
+ if (flags & IGC_FLAG_RSS_FIELD_IPV4_UDP)
+ mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP;
+
+ if (flags & IGC_FLAG_RSS_FIELD_IPV6_UDP)
+ mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP;
+
+ wr32(IGC_MRQC, mrqc);
+ }
+
+ return 0;
+}
+
+static int igc_rxnfc_write_etype_filter(struct igc_adapter *adapter,
+ struct igc_nfc_filter *input)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u8 i;
+ u32 etqf;
+ u16 etype;
+
+ /* find an empty etype filter register */
+ for (i = 0; i < MAX_ETYPE_FILTER; ++i) {
+ if (!adapter->etype_bitmap[i])
+ break;
+ }
+ if (i == MAX_ETYPE_FILTER) {
+ dev_err(&adapter->pdev->dev, "ethtool -N: etype filters are all used.\n");
+ return -EINVAL;
+ }
+
+ adapter->etype_bitmap[i] = true;
+
+ etqf = rd32(IGC_ETQF(i));
+ etype = ntohs(input->filter.etype & ETHER_TYPE_FULL_MASK);
+
+ etqf |= IGC_ETQF_FILTER_ENABLE;
+ etqf &= ~IGC_ETQF_ETYPE_MASK;
+ etqf |= (etype & IGC_ETQF_ETYPE_MASK);
+
+ etqf &= ~IGC_ETQF_QUEUE_MASK;
+ etqf |= ((input->action << IGC_ETQF_QUEUE_SHIFT)
+ & IGC_ETQF_QUEUE_MASK);
+ etqf |= IGC_ETQF_QUEUE_ENABLE;
+
+ wr32(IGC_ETQF(i), etqf);
+
+ input->etype_reg_index = i;
+
+ return 0;
+}
+
+static int igc_rxnfc_write_vlan_prio_filter(struct igc_adapter *adapter,
+ struct igc_nfc_filter *input)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u8 vlan_priority;
+ u16 queue_index;
+ u32 vlapqf;
+
+ vlapqf = rd32(IGC_VLAPQF);
+ vlan_priority = (ntohs(input->filter.vlan_tci) & VLAN_PRIO_MASK)
+ >> VLAN_PRIO_SHIFT;
+ queue_index = (vlapqf >> (vlan_priority * 4)) & IGC_VLAPQF_QUEUE_MASK;
+
+ /* check whether this vlan prio is already set */
+ if (vlapqf & IGC_VLAPQF_P_VALID(vlan_priority) &&
+ queue_index != input->action) {
+ dev_err(&adapter->pdev->dev, "ethtool rxnfc set vlan prio filter failed.\n");
+ return -EEXIST;
+ }
+
+ vlapqf |= IGC_VLAPQF_P_VALID(vlan_priority);
+ vlapqf |= IGC_VLAPQF_QUEUE_SEL(vlan_priority, input->action);
+
+ wr32(IGC_VLAPQF, vlapqf);
+
+ return 0;
+}
+
+int igc_add_filter(struct igc_adapter *adapter, struct igc_nfc_filter *input)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int err = -EINVAL;
+
+ if (hw->mac.type == igc_i225 &&
+ !(input->filter.match_flags & ~IGC_FILTER_FLAG_SRC_MAC_ADDR)) {
+ dev_err(&adapter->pdev->dev,
+ "i225 doesn't support flow classification rules specifying only source addresses.\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (input->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
+ err = igc_rxnfc_write_etype_filter(adapter, input);
+ if (err)
+ return err;
+ }
+
+ if (input->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) {
+ err = igc_add_mac_steering_filter(adapter,
+ input->filter.dst_addr,
+ input->action, 0);
+ err = min_t(int, err, 0);
+ if (err)
+ return err;
+ }
+
+ if (input->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) {
+ err = igc_add_mac_steering_filter(adapter,
+ input->filter.src_addr,
+ input->action,
+ IGC_MAC_STATE_SRC_ADDR);
+ err = min_t(int, err, 0);
+ if (err)
+ return err;
+ }
+
+ if (input->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI)
+ err = igc_rxnfc_write_vlan_prio_filter(adapter, input);
+
+ return err;
+}
+
+static void igc_clear_etype_filter_regs(struct igc_adapter *adapter,
+ u16 reg_index)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 etqf = rd32(IGC_ETQF(reg_index));
+
+ etqf &= ~IGC_ETQF_QUEUE_ENABLE;
+ etqf &= ~IGC_ETQF_QUEUE_MASK;
+ etqf &= ~IGC_ETQF_FILTER_ENABLE;
+
+ wr32(IGC_ETQF(reg_index), etqf);
+
+ adapter->etype_bitmap[reg_index] = false;
+}
+
+static void igc_clear_vlan_prio_filter(struct igc_adapter *adapter,
+ u16 vlan_tci)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u8 vlan_priority;
+ u32 vlapqf;
+
+ vlan_priority = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+
+ vlapqf = rd32(IGC_VLAPQF);
+ vlapqf &= ~IGC_VLAPQF_P_VALID(vlan_priority);
+ vlapqf &= ~IGC_VLAPQF_QUEUE_SEL(vlan_priority,
+ IGC_VLAPQF_QUEUE_MASK);
+
+ wr32(IGC_VLAPQF, vlapqf);
+}
+
+int igc_erase_filter(struct igc_adapter *adapter, struct igc_nfc_filter *input)
+{
+ if (input->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE)
+ igc_clear_etype_filter_regs(adapter,
+ input->etype_reg_index);
+
+ if (input->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI)
+ igc_clear_vlan_prio_filter(adapter,
+ ntohs(input->filter.vlan_tci));
+
+ if (input->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR)
+ igc_del_mac_steering_filter(adapter, input->filter.src_addr,
+ input->action,
+ IGC_MAC_STATE_SRC_ADDR);
+
+ if (input->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
+ igc_del_mac_steering_filter(adapter, input->filter.dst_addr,
+ input->action, 0);
+
+ return 0;
+}
+
+static int igc_update_ethtool_nfc_entry(struct igc_adapter *adapter,
+ struct igc_nfc_filter *input,
+ u16 sw_idx)
+{
+ struct igc_nfc_filter *rule, *parent;
+ int err = -EINVAL;
+
+ parent = NULL;
+ rule = NULL;
+
+ hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) {
+ /* hash found, or no matching entry */
+ if (rule->sw_idx >= sw_idx)
+ break;
+ parent = rule;
+ }
+
+ /* if there is an old rule occupying our place remove it */
+ if (rule && rule->sw_idx == sw_idx) {
+ if (!input)
+ err = igc_erase_filter(adapter, rule);
+
+ hlist_del(&rule->nfc_node);
+ kfree(rule);
+ adapter->nfc_filter_count--;
+ }
+
+ /* If no input this was a delete, err should be 0 if a rule was
+ * successfully found and removed from the list else -EINVAL
+ */
+ if (!input)
+ return err;
+
+ /* initialize node */
+ INIT_HLIST_NODE(&input->nfc_node);
+
+ /* add filter to the list */
+ if (parent)
+ hlist_add_behind(&input->nfc_node, &parent->nfc_node);
+ else
+ hlist_add_head(&input->nfc_node, &adapter->nfc_filter_list);
+
+ /* update counts */
+ adapter->nfc_filter_count++;
+
+ return 0;
+}
+
+static int igc_add_ethtool_nfc_entry(struct igc_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct igc_nfc_filter *input, *rule;
+ int err = 0;
+
+ if (!(netdev->hw_features & NETIF_F_NTUPLE))
+ return -EOPNOTSUPP;
+
+ /* Don't allow programming if the action is a queue greater than
+ * the number of online Rx queues.
+ */
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC ||
+ fsp->ring_cookie >= adapter->num_rx_queues) {
+ dev_err(&adapter->pdev->dev, "ethtool -N: The specified action is invalid\n");
+ return -EINVAL;
+ }
+
+ /* Don't allow indexes to exist outside of available space */
+ if (fsp->location >= IGC_MAX_RXNFC_FILTERS) {
+ dev_err(&adapter->pdev->dev, "Location out of range\n");
+ return -EINVAL;
+ }
+
+ if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW)
+ return -EINVAL;
+
+ input = kzalloc(sizeof(*input), GFP_KERNEL);
+ if (!input)
+ return -ENOMEM;
+
+ if (fsp->m_u.ether_spec.h_proto == ETHER_TYPE_FULL_MASK) {
+ input->filter.etype = fsp->h_u.ether_spec.h_proto;
+ input->filter.match_flags = IGC_FILTER_FLAG_ETHER_TYPE;
+ }
+
+ /* Only support matching addresses by the full mask */
+ if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_source)) {
+ input->filter.match_flags |= IGC_FILTER_FLAG_SRC_MAC_ADDR;
+ ether_addr_copy(input->filter.src_addr,
+ fsp->h_u.ether_spec.h_source);
+ }
+
+ /* Only support matching addresses by the full mask */
+ if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_dest)) {
+ input->filter.match_flags |= IGC_FILTER_FLAG_DST_MAC_ADDR;
+ ether_addr_copy(input->filter.dst_addr,
+ fsp->h_u.ether_spec.h_dest);
+ }
+
+ if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) {
+ if (fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) {
+ err = -EINVAL;
+ goto err_out;
+ }
+ input->filter.vlan_tci = fsp->h_ext.vlan_tci;
+ input->filter.match_flags |= IGC_FILTER_FLAG_VLAN_TCI;
+ }
+
+ input->action = fsp->ring_cookie;
+ input->sw_idx = fsp->location;
+
+ spin_lock(&adapter->nfc_lock);
+
+ hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) {
+ if (!memcmp(&input->filter, &rule->filter,
+ sizeof(input->filter))) {
+ err = -EEXIST;
+ dev_err(&adapter->pdev->dev,
+ "ethtool: this filter is already set\n");
+ goto err_out_w_lock;
+ }
+ }
+
+ err = igc_add_filter(adapter, input);
+ if (err)
+ goto err_out_w_lock;
+
+ igc_update_ethtool_nfc_entry(adapter, input, input->sw_idx);
+
+ spin_unlock(&adapter->nfc_lock);
+ return 0;
+
+err_out_w_lock:
+ spin_unlock(&adapter->nfc_lock);
+err_out:
+ kfree(input);
+ return err;
+}
+
+static int igc_del_ethtool_nfc_entry(struct igc_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ int err;
+
+ spin_lock(&adapter->nfc_lock);
+ err = igc_update_ethtool_nfc_entry(adapter, NULL, fsp->location);
+ spin_unlock(&adapter->nfc_lock);
+
+ return err;
+}
+
+static int igc_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct igc_adapter *adapter = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = igc_set_rss_hash_opt(adapter, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLINS:
+ ret = igc_add_ethtool_nfc_entry(adapter, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = igc_del_ethtool_nfc_entry(adapter, cmd);
+ default:
+ break;
+ }
+
+ return ret;
+}
+
void igc_write_rss_indir_tbl(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
@@ -885,17 +1715,13 @@ static int igc_get_link_ksettings(struct net_device *netdev,
if (hw->mac.type == igc_i225 &&
(status & IGC_STATUS_SPEED_2500)) {
speed = SPEED_2500;
- hw_dbg("2500 Mbs, ");
} else {
speed = SPEED_1000;
- hw_dbg("1000 Mbs, ");
}
} else if (status & IGC_STATUS_SPEED_100) {
speed = SPEED_100;
- hw_dbg("100 Mbs, ");
} else {
speed = SPEED_10;
- hw_dbg("10 Mbs, ");
}
if ((status & IGC_STATUS_FD) ||
hw->phy.media_type != igc_media_type_copper)
@@ -1011,8 +1837,13 @@ static const struct ethtool_ops igc_ethtool_ops = {
.set_ringparam = igc_set_ringparam,
.get_pauseparam = igc_get_pauseparam,
.set_pauseparam = igc_set_pauseparam,
+ .get_strings = igc_get_strings,
+ .get_sset_count = igc_get_sset_count,
+ .get_ethtool_stats = igc_get_ethtool_stats,
.get_coalesce = igc_get_coalesce,
.set_coalesce = igc_set_coalesce,
+ .get_rxnfc = igc_get_rxnfc,
+ .set_rxnfc = igc_set_rxnfc,
.get_rxfh_indir_size = igc_get_rxfh_indir_size,
.get_rxfh = igc_get_rxfh,
.set_rxfh = igc_set_rxfh,