aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/i40e/i40e_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/i40e/i40e_main.c')
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c1004
1 files changed, 710 insertions, 294 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 4a9873ec28c7..bb4612c159fd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -24,12 +24,24 @@
*
******************************************************************************/
+#include <linux/etherdevice.h>
+#include <linux/of_net.h>
+#include <linux/pci.h>
+
+#ifdef CONFIG_SPARC
+#include <asm/idprom.h>
+#include <asm/prom.h>
+#endif
+
/* Local includes */
#include "i40e.h"
#include "i40e_diag.h"
-#ifdef CONFIG_I40E_VXLAN
+#if IS_ENABLED(CONFIG_VXLAN)
#include <net/vxlan.h>
#endif
+#if IS_ENABLED(CONFIG_GENEVE)
+#include <net/geneve.h>
+#endif
const char i40e_driver_name[] = "i40e";
static const char i40e_driver_string[] =
@@ -38,8 +50,8 @@ static const char i40e_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 1
-#define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 46
+#define DRV_VERSION_MINOR 4
+#define DRV_VERSION_BUILD 8
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -55,6 +67,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
static int i40e_setup_misc_vector(struct i40e_pf *pf);
static void i40e_determine_queue_usage(struct i40e_pf *pf);
static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
+static void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
+ u16 rss_table_size, u16 rss_size);
static void i40e_fdir_sb_setup(struct i40e_pf *pf);
static int i40e_veb_get_bw_info(struct i40e_veb *veb);
@@ -68,7 +82,6 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb);
static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
- {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
@@ -790,75 +803,6 @@ static void i40e_update_fcoe_stats(struct i40e_vsi *vsi)
#endif
/**
- * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode
- * @pf: the corresponding PF
- *
- * Update the Rx XOFF counter (PAUSE frames) in link flow control mode
- **/
-static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
-{
- struct i40e_hw_port_stats *osd = &pf->stats_offsets;
- struct i40e_hw_port_stats *nsd = &pf->stats;
- struct i40e_hw *hw = &pf->hw;
- u64 xoff = 0;
-
- if ((hw->fc.current_mode != I40E_FC_FULL) &&
- (hw->fc.current_mode != I40E_FC_RX_PAUSE))
- return;
-
- xoff = nsd->link_xoff_rx;
- i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
- pf->stat_offsets_loaded,
- &osd->link_xoff_rx, &nsd->link_xoff_rx);
-
- /* No new LFC xoff rx */
- if (!(nsd->link_xoff_rx - xoff))
- return;
-
-}
-
-/**
- * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode
- * @pf: the corresponding PF
- *
- * Update the Rx XOFF counter (PAUSE frames) in PFC mode
- **/
-static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
-{
- struct i40e_hw_port_stats *osd = &pf->stats_offsets;
- struct i40e_hw_port_stats *nsd = &pf->stats;
- bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false};
- struct i40e_dcbx_config *dcb_cfg;
- struct i40e_hw *hw = &pf->hw;
- u16 i;
- u8 tc;
-
- dcb_cfg = &hw->local_dcbx_config;
-
- /* Collect Link XOFF stats when PFC is disabled */
- if (!dcb_cfg->pfc.pfcenable) {
- i40e_update_link_xoff_rx(pf);
- return;
- }
-
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- u64 prio_xoff = nsd->priority_xoff_rx[i];
-
- i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
- pf->stat_offsets_loaded,
- &osd->priority_xoff_rx[i],
- &nsd->priority_xoff_rx[i]);
-
- /* No new PFC xoff rx */
- if (!(nsd->priority_xoff_rx[i] - prio_xoff))
- continue;
- /* Get the TC for given priority */
- tc = dcb_cfg->etscfg.prioritytable[i];
- xoff[tc] = true;
- }
-}
-
-/**
* i40e_update_vsi_stats - Update the vsi statistics counters.
* @vsi: the VSI to be updated
*
@@ -881,6 +825,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
u64 bytes, packets;
unsigned int start;
u64 tx_linearize;
+ u64 tx_force_wb;
u64 rx_p, rx_b;
u64 tx_p, tx_b;
u16 q;
@@ -899,7 +844,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
*/
rx_b = rx_p = 0;
tx_b = tx_p = 0;
- tx_restart = tx_busy = tx_linearize = 0;
+ tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
rx_page = 0;
rx_buf = 0;
rcu_read_lock();
@@ -917,6 +862,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
tx_restart += p->tx_stats.restart_queue;
tx_busy += p->tx_stats.tx_busy;
tx_linearize += p->tx_stats.tx_linearize;
+ tx_force_wb += p->tx_stats.tx_force_wb;
/* Rx queue is part of the same block as Tx queue */
p = &p[1];
@@ -934,6 +880,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
vsi->tx_restart = tx_restart;
vsi->tx_busy = tx_busy;
vsi->tx_linearize = tx_linearize;
+ vsi->tx_force_wb = tx_force_wb;
vsi->rx_page_failed = rx_page;
vsi->rx_buf_failed = rx_buf;
@@ -1049,12 +996,18 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
pf->stat_offsets_loaded,
&osd->link_xon_tx, &nsd->link_xon_tx);
- i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */
+ i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xoff_rx, &nsd->link_xoff_rx);
i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
pf->stat_offsets_loaded,
&osd->link_xoff_tx, &nsd->link_xoff_tx);
for (i = 0; i < 8; i++) {
+ i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
+ pf->stat_offsets_loaded,
+ &osd->priority_xoff_rx[i],
+ &nsd->priority_xoff_rx[i]);
i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
pf->stat_offsets_loaded,
&osd->priority_xon_rx[i],
@@ -1317,6 +1270,42 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
}
/**
+ * i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS
+ * @vsi: the VSI to be searched
+ * @macaddr: the mac address to be removed
+ * @is_vf: true if it is a VF
+ * @is_netdev: true if it is a netdev
+ *
+ * Removes a given MAC address from a VSI, regardless of VLAN
+ *
+ * Returns 0 for success, or error
+ **/
+int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr,
+ bool is_vf, bool is_netdev)
+{
+ struct i40e_mac_filter *f = NULL;
+ int changed = 0;
+
+ WARN(!spin_is_locked(&vsi->mac_filter_list_lock),
+ "Missing mac_filter_list_lock\n");
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ if ((ether_addr_equal(macaddr, f->macaddr)) &&
+ (is_vf == f->is_vf) &&
+ (is_netdev == f->is_netdev)) {
+ f->counter--;
+ f->changed = true;
+ changed = 1;
+ }
+ }
+ if (changed) {
+ vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+ vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
+ return 0;
+ }
+ return -ENOENT;
+}
+
+/**
* i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
* @vsi: the PF Main VSI - inappropriate for any other VSI
* @macaddr: the MAC address
@@ -1547,10 +1536,9 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
spin_unlock_bh(&vsi->mac_filter_list_lock);
}
- i40e_sync_vsi_filters(vsi, false);
ether_addr_copy(netdev->dev_addr, addr->sa_data);
- return 0;
+ return i40e_sync_vsi_filters(vsi);
}
/**
@@ -1590,7 +1578,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
/* Find numtc from enabled TC bitmap */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- if (enabled_tc & BIT_ULL(i)) /* TC is enabled */
+ if (enabled_tc & BIT(i)) /* TC is enabled */
numtc++;
}
if (!numtc) {
@@ -1619,13 +1607,14 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
/* Setup queue offset/count for all TCs for given VSI */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
/* See if the given TC is enabled for the given VSI */
- if (vsi->tc_config.enabled_tc & BIT_ULL(i)) {
+ if (vsi->tc_config.enabled_tc & BIT(i)) {
/* TC is enabled */
int pow, num_qps;
switch (vsi->type) {
case I40E_VSI_MAIN:
- qcount = min_t(int, pf->rss_size, num_tc_qps);
+ qcount = min_t(int, pf->alloc_rss_size,
+ num_tc_qps);
break;
#ifdef I40E_FCOE
case I40E_VSI_FCOE:
@@ -1851,13 +1840,12 @@ static void i40e_cleanup_add_list(struct list_head *add_list)
/**
* i40e_sync_vsi_filters - Update the VSI filter list to the HW
* @vsi: ptr to the VSI
- * @grab_rtnl: whether RTNL needs to be grabbed
*
* Push any outstanding VSI filter changes through the AdminQ.
*
* Returns 0 or error value
**/
-int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
+int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
{
struct list_head tmp_del_list, tmp_add_list;
struct i40e_mac_filter *f, *ftmp, *fclone;
@@ -1865,8 +1853,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
bool add_happened = false;
int filter_list_len = 0;
u32 changed_flags = 0;
+ i40e_status aq_ret = 0;
bool err_cond = false;
- i40e_status ret = 0;
+ int retval = 0;
struct i40e_pf *pf;
int num_add = 0;
int num_del = 0;
@@ -1929,17 +1918,22 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
}
spin_unlock_bh(&vsi->mac_filter_list_lock);
- if (err_cond)
+ if (err_cond) {
i40e_cleanup_add_list(&tmp_add_list);
+ retval = -ENOMEM;
+ goto out;
+ }
}
/* Now process 'del_list' outside the lock */
if (!list_empty(&tmp_del_list)) {
+ int del_list_size;
+
filter_list_len = pf->hw.aq.asq_buf_size /
sizeof(struct i40e_aqc_remove_macvlan_element_data);
- del_list = kcalloc(filter_list_len,
- sizeof(struct i40e_aqc_remove_macvlan_element_data),
- GFP_KERNEL);
+ del_list_size = filter_list_len *
+ sizeof(struct i40e_aqc_remove_macvlan_element_data);
+ del_list = kzalloc(del_list_size, GFP_KERNEL);
if (!del_list) {
i40e_cleanup_add_list(&tmp_add_list);
@@ -1948,7 +1942,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
i40e_undo_del_filter_entries(vsi, &tmp_del_list);
i40e_undo_add_filter_entries(vsi);
spin_unlock_bh(&vsi->mac_filter_list_lock);
- return -ENOMEM;
+ retval = -ENOMEM;
+ goto out;
}
list_for_each_entry_safe(f, ftmp, &tmp_del_list, list) {
@@ -1966,18 +1961,22 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
/* flush a full buffer */
if (num_del == filter_list_len) {
- ret = i40e_aq_remove_macvlan(&pf->hw,
- vsi->seid, del_list, num_del,
- NULL);
+ aq_ret = i40e_aq_remove_macvlan(&pf->hw,
+ vsi->seid,
+ del_list,
+ num_del,
+ NULL);
aq_err = pf->hw.aq.asq_last_status;
num_del = 0;
- memset(del_list, 0, sizeof(*del_list));
+ memset(del_list, 0, del_list_size);
- if (ret && aq_err != I40E_AQ_RC_ENOENT)
+ if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) {
+ retval = -EIO;
dev_err(&pf->pdev->dev,
"ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n",
- i40e_stat_str(&pf->hw, ret),
+ i40e_stat_str(&pf->hw, aq_ret),
i40e_aq_str(&pf->hw, aq_err));
+ }
}
/* Release memory for MAC filter entries which were
* synced up with HW.
@@ -1987,15 +1986,16 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
}
if (num_del) {
- ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
- del_list, num_del, NULL);
+ aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
+ del_list, num_del,
+ NULL);
aq_err = pf->hw.aq.asq_last_status;
num_del = 0;
- if (ret && aq_err != I40E_AQ_RC_ENOENT)
+ if (aq_ret && aq_err != I40E_AQ_RC_ENOENT)
dev_info(&pf->pdev->dev,
"ignoring delete macvlan error, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ i40e_stat_str(&pf->hw, aq_ret),
i40e_aq_str(&pf->hw, aq_err));
}
@@ -2004,13 +2004,14 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
}
if (!list_empty(&tmp_add_list)) {
+ int add_list_size;
/* do all the adds now */
filter_list_len = pf->hw.aq.asq_buf_size /
sizeof(struct i40e_aqc_add_macvlan_element_data),
- add_list = kcalloc(filter_list_len,
- sizeof(struct i40e_aqc_add_macvlan_element_data),
- GFP_KERNEL);
+ add_list_size = filter_list_len *
+ sizeof(struct i40e_aqc_add_macvlan_element_data);
+ add_list = kzalloc(add_list_size, GFP_KERNEL);
if (!add_list) {
/* Purge element from temporary lists */
i40e_cleanup_add_list(&tmp_add_list);
@@ -2019,7 +2020,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
spin_lock_bh(&vsi->mac_filter_list_lock);
i40e_undo_add_filter_entries(vsi);
spin_unlock_bh(&vsi->mac_filter_list_lock);
- return -ENOMEM;
+ retval = -ENOMEM;
+ goto out;
}
list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) {
@@ -2040,15 +2042,15 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
/* flush a full buffer */
if (num_add == filter_list_len) {
- ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
- add_list, num_add,
- NULL);
+ aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+ add_list, num_add,
+ NULL);
aq_err = pf->hw.aq.asq_last_status;
num_add = 0;
- if (ret)
+ if (aq_ret)
break;
- memset(add_list, 0, sizeof(*add_list));
+ memset(add_list, 0, add_list_size);
}
/* Entries from tmp_add_list were cloned from MAC
* filter list, hence clean those cloned entries
@@ -2058,18 +2060,19 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
}
if (num_add) {
- ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
- add_list, num_add, NULL);
+ aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+ add_list, num_add, NULL);
aq_err = pf->hw.aq.asq_last_status;
num_add = 0;
}
kfree(add_list);
add_list = NULL;
- if (add_happened && ret && aq_err != I40E_AQ_RC_EINVAL) {
+ if (add_happened && aq_ret && aq_err != I40E_AQ_RC_EINVAL) {
+ retval = i40e_aq_rc_to_posix(aq_ret, aq_err);
dev_info(&pf->pdev->dev,
"add filter failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ i40e_stat_str(&pf->hw, aq_ret),
i40e_aq_str(&pf->hw, aq_err));
if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
!test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
@@ -2087,16 +2090,19 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
bool cur_multipromisc;
cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
- ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
- vsi->seid,
- cur_multipromisc,
- NULL);
- if (ret)
+ aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
+ vsi->seid,
+ cur_multipromisc,
+ NULL);
+ if (aq_ret) {
+ retval = i40e_aq_rc_to_posix(aq_ret,
+ pf->hw.aq.asq_last_status);
dev_info(&pf->pdev->dev,
"set multi promisc failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ i40e_stat_str(&pf->hw, aq_ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
+ }
}
if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
bool cur_promisc;
@@ -2112,44 +2118,50 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
*/
if (pf->cur_promisc != cur_promisc) {
pf->cur_promisc = cur_promisc;
- if (grab_rtnl)
- i40e_do_reset_safe(pf,
- BIT(__I40E_PF_RESET_REQUESTED));
- else
- i40e_do_reset(pf,
- BIT(__I40E_PF_RESET_REQUESTED));
+ set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
}
} else {
- ret = i40e_aq_set_vsi_unicast_promiscuous(
+ aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
&vsi->back->hw,
vsi->seid,
cur_promisc, NULL);
- if (ret)
+ if (aq_ret) {
+ retval =
+ i40e_aq_rc_to_posix(aq_ret,
+ pf->hw.aq.asq_last_status);
dev_info(&pf->pdev->dev,
"set unicast promisc failed, err %d, aq_err %d\n",
- ret, pf->hw.aq.asq_last_status);
- ret = i40e_aq_set_vsi_multicast_promiscuous(
+ aq_ret, pf->hw.aq.asq_last_status);
+ }
+ aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
&vsi->back->hw,
vsi->seid,
cur_promisc, NULL);
- if (ret)
+ if (aq_ret) {
+ retval =
+ i40e_aq_rc_to_posix(aq_ret,
+ pf->hw.aq.asq_last_status);
dev_info(&pf->pdev->dev,
"set multicast promisc failed, err %d, aq_err %d\n",
- ret, pf->hw.aq.asq_last_status);
+ aq_ret, pf->hw.aq.asq_last_status);
+ }
}
- ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
- vsi->seid,
- cur_promisc, NULL);
- if (ret)
+ aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
+ vsi->seid,
+ cur_promisc, NULL);
+ if (aq_ret) {
+ retval = i40e_aq_rc_to_posix(aq_ret,
+ pf->hw.aq.asq_last_status);
dev_info(&pf->pdev->dev,
"set brdcast promisc failed, err %s, aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ i40e_stat_str(&pf->hw, aq_ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
+ }
}
-
+out:
clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
- return 0;
+ return retval;
}
/**
@@ -2166,8 +2178,15 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v] &&
- (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
- i40e_sync_vsi_filters(pf->vsi[v], true);
+ (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
+ int ret = i40e_sync_vsi_filters(pf->vsi[v]);
+
+ if (ret) {
+ /* come back and try again later */
+ pf->flags |= I40E_FLAG_FILTER_SYNC;
+ break;
+ }
+ }
}
}
@@ -2377,16 +2396,13 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
}
}
- /* Make sure to release before sync_vsi_filter because that
- * function will lock/unlock as necessary
- */
spin_unlock_bh(&vsi->mac_filter_list_lock);
- if (test_bit(__I40E_DOWN, &vsi->back->state) ||
- test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
- return 0;
-
- return i40e_sync_vsi_filters(vsi, false);
+ /* schedule our worker thread which will take care of
+ * applying the new filter changes
+ */
+ i40e_service_event_schedule(vsi->back);
+ return 0;
}
/**
@@ -2459,16 +2475,13 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
}
}
- /* Make sure to release before sync_vsi_filter because that
- * function with lock/unlock as necessary
- */
spin_unlock_bh(&vsi->mac_filter_list_lock);
- if (test_bit(__I40E_DOWN, &vsi->back->state) ||
- test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
- return 0;
-
- return i40e_sync_vsi_filters(vsi, false);
+ /* schedule our worker thread which will take care of
+ * applying the new filter changes
+ */
+ i40e_service_event_schedule(vsi->back);
+ return 0;
}
/**
@@ -2711,6 +2724,11 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
netif_set_xps_queue(ring->netdev, mask, ring->queue_index);
free_cpumask_var(mask);
}
+
+ /* schedule our worker thread which will take care of
+ * applying the new filter changes
+ */
+ i40e_service_event_schedule(vsi->back);
}
/**
@@ -4360,17 +4378,41 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
else
val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
+ /* Bail out if interrupts are disabled because napi_poll
+ * execution in-progress or will get scheduled soon.
+ * napi_poll cleans TX and RX queues and updates 'next_to_clean'.
+ */
+ if (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))
+ return;
+
head = i40e_get_head(tx_ring);
tx_pending = i40e_get_tx_pending(tx_ring);
- /* Interrupts are disabled and TX pending is non-zero,
- * trigger the SW interrupt (don't wait). Worst case
- * there will be one extra interrupt which may result
- * into not cleaning any queues because queues are cleaned.
+ /* HW is done executing descriptors, updated HEAD write back,
+ * but SW hasn't processed those descriptors. If interrupt is
+ * not generated from this point ON, it could result into
+ * dev_watchdog detecting timeout on those netdev_queue,
+ * hence proactively trigger SW interrupt.
*/
- if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)))
- i40e_force_wb(vsi, tx_ring->q_vector);
+ if (tx_pending) {
+ /* NAPI Poll didn't run and clear since it was set */
+ if (test_and_clear_bit(I40E_Q_VECTOR_HUNG_DETECT,
+ &tx_ring->q_vector->hung_detected)) {
+ netdev_info(vsi->netdev, "VSI_seid %d, Hung TX queue %d, tx_pending: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n",
+ vsi->seid, q_idx, tx_pending,
+ tx_ring->next_to_clean, head,
+ tx_ring->next_to_use,
+ readl(tx_ring->tail));
+ netdev_info(vsi->netdev, "VSI_seid %d, Issuing force_wb for TX queue %d, Interrupt Reg: 0x%x\n",
+ vsi->seid, q_idx, val);
+ i40e_force_wb(vsi, tx_ring->q_vector);
+ } else {
+ /* First Chance - detected possible hung */
+ set_bit(I40E_Q_VECTOR_HUNG_DETECT,
+ &tx_ring->q_vector->hung_detected);
+ }
+ }
}
/**
@@ -4441,7 +4483,7 @@ static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
if (app.selector == I40E_APP_SEL_TCPIP &&
app.protocolid == I40E_APP_PROTOID_ISCSI) {
tc = dcbcfg->etscfg.prioritytable[app.priority];
- enabled_tc |= BIT_ULL(tc);
+ enabled_tc |= BIT(tc);
break;
}
}
@@ -4525,7 +4567,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
/* At least have TC0 */
enabled_tc = (enabled_tc ? enabled_tc : 0x1);
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- if (enabled_tc & BIT_ULL(i))
+ if (enabled_tc & BIT(i))
num_tc++;
}
return num_tc;
@@ -4547,7 +4589,7 @@ static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
/* Find the first enabled TC */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- if (enabled_tc & BIT_ULL(i))
+ if (enabled_tc & BIT(i))
break;
}
@@ -4707,7 +4749,7 @@ static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
* will set the numtc for netdev as 2 that will be
* referenced by the netdev layer as TC 0 and 1.
*/
- if (vsi->tc_config.enabled_tc & BIT_ULL(i))
+ if (vsi->tc_config.enabled_tc & BIT(i))
netdev_set_tc_queue(netdev,
vsi->tc_config.tc_info[i].netdev_tc,
vsi->tc_config.tc_info[i].qcount,
@@ -4769,7 +4811,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
/* Enable ETS TCs with equal BW Share for now across all VSIs */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- if (enabled_tc & BIT_ULL(i))
+ if (enabled_tc & BIT(i))
bw_share[i] = 1;
}
@@ -4843,7 +4885,7 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
/* Enable ETS TCs with equal BW Share for now */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- if (enabled_tc & BIT_ULL(i))
+ if (enabled_tc & BIT(i))
bw_data.tc_bw_share_credits[i] = 1;
}
@@ -5240,7 +5282,7 @@ static int i40e_setup_tc(struct net_device *netdev, u8 tc)
/* Generate TC map for number of tc requested */
for (i = 0; i < tc; i++)
- enabled_tc |= BIT_ULL(i);
+ enabled_tc |= BIT(i);
/* Requesting same TC configuration as already enabled */
if (enabled_tc == vsi->tc_config.enabled_tc)
@@ -5305,6 +5347,9 @@ int i40e_open(struct net_device *netdev)
#ifdef CONFIG_I40E_VXLAN
vxlan_get_rx_port(netdev);
#endif
+#ifdef CONFIG_I40E_GENEVE
+ geneve_get_rx_port(netdev);
+#endif
return 0;
}
@@ -5738,7 +5783,7 @@ static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
**/
static void i40e_service_event_complete(struct i40e_pf *pf)
{
- BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
+ WARN_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
/* flush memory to make sure state is correct before next watchog */
smp_mb__before_atomic();
@@ -6013,6 +6058,9 @@ static void i40e_link_event(struct i40e_pf *pf)
i40e_status status;
bool new_link, old_link;
+ /* save off old link status information */
+ pf->hw.phy.link_info_old = pf->hw.phy.link_info;
+
/* set this to force the get_link_status call to refresh state */
pf->hw.phy.get_link_info = true;
@@ -6101,23 +6149,23 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
rtnl_lock();
if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
- reset_flags |= BIT_ULL(__I40E_REINIT_REQUESTED);
+ reset_flags |= BIT(__I40E_REINIT_REQUESTED);
clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
}
if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
- reset_flags |= BIT_ULL(__I40E_PF_RESET_REQUESTED);
+ reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
}
if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
- reset_flags |= BIT_ULL(__I40E_CORE_RESET_REQUESTED);
+ reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
}
if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
- reset_flags |= BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED);
+ reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
}
if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
- reset_flags |= BIT_ULL(__I40E_DOWN_REQUESTED);
+ reset_flags |= BIT(__I40E_DOWN_REQUESTED);
clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
}
@@ -6147,13 +6195,9 @@ unlock:
static void i40e_handle_link_event(struct i40e_pf *pf,
struct i40e_arq_event_info *e)
{
- struct i40e_hw *hw = &pf->hw;
struct i40e_aqc_get_link_status *status =
(struct i40e_aqc_get_link_status *)&e->desc.params.raw;
- /* save off old link status information */
- hw->phy.link_info_old = hw->phy.link_info;
-
/* Do a new status request to re-enable LSE reporting
* and load new status information into the hw struct
* This completely ignores any state information
@@ -6192,15 +6236,18 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
val = rd32(&pf->hw, pf->hw.aq.arq.len);
oldval = val;
if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
- dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
+ if (hw->debug_mask & I40E_DEBUG_AQ)
+ dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
}
if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
- dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
+ if (hw->debug_mask & I40E_DEBUG_AQ)
+ dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
}
if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
- dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
+ if (hw->debug_mask & I40E_DEBUG_AQ)
+ dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
}
if (oldval != val)
@@ -6209,15 +6256,18 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
val = rd32(&pf->hw, pf->hw.aq.asq.len);
oldval = val;
if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
- dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
+ if (pf->hw.debug_mask & I40E_DEBUG_AQ)
+ dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
}
if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
- dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
+ if (pf->hw.debug_mask & I40E_DEBUG_AQ)
+ dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
}
if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
- dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
+ if (pf->hw.debug_mask & I40E_DEBUG_AQ)
+ dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
}
if (oldval != val)
@@ -6268,6 +6318,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
break;
case i40e_aqc_opc_nvm_erase:
case i40e_aqc_opc_nvm_update:
+ case i40e_aqc_opc_oem_post_update:
i40e_debug(&pf->hw, I40E_DEBUG_NVM, "ARQ NVM operation completed\n");
break;
default:
@@ -6685,6 +6736,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
struct i40e_hw *hw = &pf->hw;
u8 set_fc_aq_fail = 0;
i40e_status ret;
+ u32 val;
u32 v;
/* Now we wait for GRST to settle out.
@@ -6823,6 +6875,20 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
}
}
+ /* Reconfigure hardware for allowing smaller MSS in the case
+ * of TSO, so that we avoid the MDD being fired and causing
+ * a reset in the case of small MSS+TSO.
+ */
+#define I40E_REG_MSS 0x000E64DC
+#define I40E_REG_MSS_MIN_MASK 0x3FF0000
+#define I40E_64BYTE_MSS 0x400000
+ val = rd32(hw, I40E_REG_MSS);
+ if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
+ val &= ~I40E_REG_MSS_MIN_MASK;
+ val |= I40E_64BYTE_MSS;
+ wr32(hw, I40E_REG_MSS, val);
+ }
+
if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
(pf->hw.aq.fw_maj_ver < 4)) {
msleep(75);
@@ -6984,30 +7050,30 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
i40e_flush(hw);
}
-#ifdef CONFIG_I40E_VXLAN
/**
- * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW
+ * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
* @pf: board private structure
**/
-static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
+static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
{
+#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE)
struct i40e_hw *hw = &pf->hw;
i40e_status ret;
__be16 port;
int i;
- if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC))
+ if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC))
return;
- pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC;
+ pf->flags &= ~I40E_FLAG_UDP_FILTER_SYNC;
for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
- if (pf->pending_vxlan_bitmap & BIT_ULL(i)) {
- pf->pending_vxlan_bitmap &= ~BIT_ULL(i);
- port = pf->vxlan_ports[i];
+ if (pf->pending_udp_bitmap & BIT_ULL(i)) {
+ pf->pending_udp_bitmap &= ~BIT_ULL(i);
+ port = pf->udp_ports[i].index;
if (port)
ret = i40e_aq_add_udp_tunnel(hw, ntohs(port),
- I40E_AQC_TUNNEL_TYPE_VXLAN,
+ pf->udp_ports[i].type,
NULL, NULL);
else
ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
@@ -7020,13 +7086,13 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
i40e_stat_str(&pf->hw, ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
- pf->vxlan_ports[i] = 0;
+ pf->udp_ports[i].index = 0;
}
}
}
+#endif
}
-#endif
/**
* i40e_service_task - Run the driver's async subtasks
* @work: pointer to work_struct containing our data
@@ -7051,8 +7117,8 @@ static void i40e_service_task(struct work_struct *work)
i40e_watchdog_subtask(pf);
i40e_fdir_reinit_subtask(pf);
i40e_sync_filters_subtask(pf);
-#ifdef CONFIG_I40E_VXLAN
- i40e_sync_vxlan_filters_subtask(pf);
+#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE)
+ i40e_sync_udp_filters_subtask(pf);
#endif
i40e_clean_adminq_subtask(pf);
@@ -7282,6 +7348,23 @@ static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
}
/**
+ * i40e_clear_rss_config_user - clear the user configured RSS hash keys
+ * and lookup table
+ * @vsi: Pointer to VSI structure
+ */
+static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
+{
+ if (!vsi)
+ return;
+
+ kfree(vsi->rss_hkey_user);
+ vsi->rss_hkey_user = NULL;
+
+ kfree(vsi->rss_lut_user);
+ vsi->rss_lut_user = NULL;
+}
+
+/**
* i40e_vsi_clear - Deallocate the VSI provided
* @vsi: the VSI being un-configured
**/
@@ -7318,6 +7401,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
i40e_vsi_free_arrays(vsi, true);
+ i40e_clear_rss_config_user(vsi);
pf->vsi[vsi->idx] = NULL;
if (vsi->idx < pf->next_vsi)
@@ -7780,7 +7864,8 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
* @vsi: vsi structure
* @seed: RSS hash seed
**/
-static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed)
+static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
+ u8 *lut, u16 lut_size)
{
struct i40e_aqc_get_set_rss_key_data rss_key;
struct i40e_pf *pf = vsi->back;
@@ -7833,43 +7918,57 @@ static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
{
u8 seed[I40E_HKEY_ARRAY_SIZE];
struct i40e_pf *pf = vsi->back;
+ u8 *lut;
+ int ret;
- netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
- vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs);
+ if (!(pf->flags & I40E_FLAG_RSS_AQ_CAPABLE))
+ return 0;
- if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
- return i40e_config_rss_aq(vsi, seed);
+ lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
+ if (!lut)
+ return -ENOMEM;
- return 0;
+ i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
+ netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
+ vsi->rss_size = min_t(int, pf->alloc_rss_size, vsi->num_queue_pairs);
+ ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
+ kfree(lut);
+
+ return ret;
}
/**
- * i40e_config_rss_reg - Prepare for RSS if used
- * @pf: board private structure
+ * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
+ * @vsi: Pointer to vsi structure
* @seed: RSS hash seed
+ * @lut: Lookup table
+ * @lut_size: Lookup table size
+ *
+ * Returns 0 on success, negative on failure
**/
-static int i40e_config_rss_reg(struct i40e_pf *pf, const u8 *seed)
+static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
+ const u8 *lut, u16 lut_size)
{
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
- u32 *seed_dw = (u32 *)seed;
- u32 current_queue = 0;
- u32 lut = 0;
- int i, j;
+ u8 i;
/* Fill out hash function seed */
- for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
- wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
+ if (seed) {
+ u32 *seed_dw = (u32 *)seed;
- for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) {
- lut = 0;
- for (j = 0; j < 4; j++) {
- if (current_queue == vsi->rss_size)
- current_queue = 0;
- lut |= ((current_queue) << (8 * j));
- current_queue++;
- }
- wr32(&pf->hw, I40E_PFQF_HLUT(i), lut);
+ for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
+ wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
+ }
+
+ if (lut) {
+ u32 *lut_dw = (u32 *)lut;
+
+ if (lut_size != I40E_HLUT_ARRAY_SIZE)
+ return -EINVAL;
+
+ for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
+ wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
}
i40e_flush(hw);
@@ -7877,18 +7976,101 @@ static int i40e_config_rss_reg(struct i40e_pf *pf, const u8 *seed)
}
/**
- * i40e_config_rss - Prepare for RSS if used
+ * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
+ * @vsi: Pointer to VSI structure
+ * @seed: Buffer to store the keys
+ * @lut: Buffer to store the lookup table entries
+ * @lut_size: Size of buffer to store the lookup table entries
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
+ u8 *lut, u16 lut_size)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u16 i;
+
+ if (seed) {
+ u32 *seed_dw = (u32 *)seed;
+
+ for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
+ seed_dw[i] = rd32(hw, I40E_PFQF_HKEY(i));
+ }
+ if (lut) {
+ u32 *lut_dw = (u32 *)lut;
+
+ if (lut_size != I40E_HLUT_ARRAY_SIZE)
+ return -EINVAL;
+ for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
+ lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_config_rss - Configure RSS keys and lut
+ * @vsi: Pointer to VSI structure
+ * @seed: RSS hash seed
+ * @lut: Lookup table
+ * @lut_size: Lookup table size
+ *
+ * Returns 0 on success, negative on failure
+ */
+int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
+{
+ struct i40e_pf *pf = vsi->back;
+
+ if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
+ return i40e_config_rss_aq(vsi, seed, lut, lut_size);
+ else
+ return i40e_config_rss_reg(vsi, seed, lut, lut_size);
+}
+
+/**
+ * i40e_get_rss - Get RSS keys and lut
+ * @vsi: Pointer to VSI structure
+ * @seed: Buffer to store the keys
+ * @lut: Buffer to store the lookup table entries
+ * lut_size: Size of buffer to store the lookup table entries
+ *
+ * Returns 0 on success, negative on failure
+ */
+int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
+{
+ return i40e_get_rss_reg(vsi, seed, lut, lut_size);
+}
+
+/**
+ * i40e_fill_rss_lut - Fill the RSS lookup table with default values
+ * @pf: Pointer to board private structure
+ * @lut: Lookup table
+ * @rss_table_size: Lookup table size
+ * @rss_size: Range of queue number for hashing
+ */
+static void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
+ u16 rss_table_size, u16 rss_size)
+{
+ u16 i;
+
+ for (i = 0; i < rss_table_size; i++)
+ lut[i] = i % rss_size;
+}
+
+/**
+ * i40e_pf_config_rss - Prepare for RSS if used
* @pf: board private structure
**/
-static int i40e_config_rss(struct i40e_pf *pf)
+static int i40e_pf_config_rss(struct i40e_pf *pf)
{
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
u8 seed[I40E_HKEY_ARRAY_SIZE];
+ u8 *lut;
struct i40e_hw *hw = &pf->hw;
u32 reg_val;
u64 hena;
-
- netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
+ int ret;
/* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
@@ -7898,8 +8080,6 @@ static int i40e_config_rss(struct i40e_pf *pf)
wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
- vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs);
-
/* Determine the RSS table size based on the hardware capabilities */
reg_val = rd32(hw, I40E_PFQF_CTL_0);
reg_val = (pf->rss_table_size == 512) ?
@@ -7907,10 +8087,32 @@ static int i40e_config_rss(struct i40e_pf *pf)
(reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
wr32(hw, I40E_PFQF_CTL_0, reg_val);
- if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
- return i40e_config_rss_aq(pf->vsi[pf->lan_vsi], seed);
+ /* Determine the RSS size of the VSI */
+ if (!vsi->rss_size)
+ vsi->rss_size = min_t(int, pf->alloc_rss_size,
+ vsi->num_queue_pairs);
+
+ lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
+ if (!lut)
+ return -ENOMEM;
+
+ /* Use user configured lut if there is one, otherwise use default */
+ if (vsi->rss_lut_user)
+ memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
+ else
+ i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
+
+ /* Use user configured hash key if there is one, otherwise
+ * use default.
+ */
+ if (vsi->rss_hkey_user)
+ memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
else
- return i40e_config_rss_reg(pf, seed);
+ netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
+ ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
+ kfree(lut);
+
+ return ret;
}
/**
@@ -7935,13 +8137,28 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
vsi->req_queue_pairs = queue_count;
i40e_prep_for_reset(pf);
- pf->rss_size = new_rss_size;
+ pf->alloc_rss_size = new_rss_size;
i40e_reset_and_rebuild(pf, true);
- i40e_config_rss(pf);
+
+ /* Discard the user configured hash keys and lut, if less
+ * queues are enabled.
+ */
+ if (queue_count < vsi->rss_size) {
+ i40e_clear_rss_config_user(vsi);
+ dev_dbg(&pf->pdev->dev,
+ "discard user configured hash keys and lut\n");
+ }
+
+ /* Reset vsi->rss_size, as number of enabled queues changed */
+ vsi->rss_size = min_t(int, pf->alloc_rss_size,
+ vsi->num_queue_pairs);
+
+ i40e_pf_config_rss(pf);
}
- dev_info(&pf->pdev->dev, "RSS count: %d\n", pf->rss_size);
- return pf->rss_size;
+ dev_info(&pf->pdev->dev, "RSS count/HW max RSS count: %d/%d\n",
+ pf->alloc_rss_size, pf->rss_size_max);
+ return pf->alloc_rss_size;
}
/**
@@ -8112,13 +8329,14 @@ static int i40e_sw_init(struct i40e_pf *pf)
* maximum might end up larger than the available queues
*/
pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
- pf->rss_size = 1;
+ pf->alloc_rss_size = 1;
pf->rss_table_size = pf->hw.func_caps.rss_table_size;
pf->rss_size_max = min_t(int, pf->rss_size_max,
pf->hw.func_caps.num_tx_qp);
if (pf->hw.func_caps.rss) {
pf->flags |= I40E_FLAG_RSS_ENABLED;
- pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
+ pf->alloc_rss_size = min_t(int, pf->rss_size_max,
+ num_online_cpus());
}
/* MFP mode enabled */
@@ -8176,7 +8394,8 @@ static int i40e_sw_init(struct i40e_pf *pf)
I40E_FLAG_HW_ATR_EVICT_CAPABLE |
I40E_FLAG_OUTER_UDP_CSUM_CAPABLE |
I40E_FLAG_WB_ON_ITR_CAPABLE |
- I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE;
+ I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE |
+ I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
}
pf->eeprom_version = 0xDEAD;
pf->lan_veb = I40E_NO_VEB;
@@ -8275,26 +8494,27 @@ static int i40e_set_features(struct net_device *netdev,
return 0;
}
-#ifdef CONFIG_I40E_VXLAN
+#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE)
/**
- * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port
+ * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
* @pf: board private structure
* @port: The UDP port to look up
*
* Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
**/
-static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port)
+static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port)
{
u8 i;
for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
- if (pf->vxlan_ports[i] == port)
+ if (pf->udp_ports[i].index == port)
return i;
}
return i;
}
+#endif
/**
* i40e_add_vxlan_port - Get notifications about VXLAN ports that come up
* @netdev: This physical port's netdev
@@ -8304,6 +8524,7 @@ static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port)
static void i40e_add_vxlan_port(struct net_device *netdev,
sa_family_t sa_family, __be16 port)
{
+#if IS_ENABLED(CONFIG_VXLAN)
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
@@ -8313,7 +8534,7 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
if (sa_family == AF_INET6)
return;
- idx = i40e_get_vxlan_port_idx(pf, port);
+ idx = i40e_get_udp_port_idx(pf, port);
/* Check if port already exists */
if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
@@ -8323,7 +8544,7 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
}
/* Now check if there is space to add the new port */
- next_idx = i40e_get_vxlan_port_idx(pf, 0);
+ next_idx = i40e_get_udp_port_idx(pf, 0);
if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n",
@@ -8332,9 +8553,11 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
}
/* New port: add it and mark its index in the bitmap */
- pf->vxlan_ports[next_idx] = port;
- pf->pending_vxlan_bitmap |= BIT_ULL(next_idx);
- pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
+ pf->udp_ports[next_idx].index = port;
+ pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
+ pf->pending_udp_bitmap |= BIT_ULL(next_idx);
+ pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
+#endif
}
/**
@@ -8346,6 +8569,7 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
static void i40e_del_vxlan_port(struct net_device *netdev,
sa_family_t sa_family, __be16 port)
{
+#if IS_ENABLED(CONFIG_VXLAN)
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
@@ -8354,23 +8578,108 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
if (sa_family == AF_INET6)
return;
- idx = i40e_get_vxlan_port_idx(pf, port);
+ idx = i40e_get_udp_port_idx(pf, port);
/* Check if port already exists */
if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
/* if port exists, set it to 0 (mark for deletion)
* and make it pending
*/
- pf->vxlan_ports[idx] = 0;
- pf->pending_vxlan_bitmap |= BIT_ULL(idx);
- pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
+ pf->udp_ports[idx].index = 0;
+ pf->pending_udp_bitmap |= BIT_ULL(idx);
+ pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
} else {
netdev_warn(netdev, "vxlan port %d was not found, not deleting\n",
ntohs(port));
}
+#endif
+}
+
+/**
+ * i40e_add_geneve_port - Get notifications about GENEVE ports that come up
+ * @netdev: This physical port's netdev
+ * @sa_family: Socket Family that GENEVE is notifying us about
+ * @port: New UDP port number that GENEVE started listening to
+ **/
+static void i40e_add_geneve_port(struct net_device *netdev,
+ sa_family_t sa_family, __be16 port)
+{
+#if IS_ENABLED(CONFIG_GENEVE)
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ u8 next_idx;
+ u8 idx;
+
+ if (sa_family == AF_INET6)
+ return;
+
+ idx = i40e_get_udp_port_idx(pf, port);
+
+ /* Check if port already exists */
+ if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
+ netdev_info(netdev, "udp port %d already offloaded\n",
+ ntohs(port));
+ return;
+ }
+
+ /* Now check if there is space to add the new port */
+ next_idx = i40e_get_udp_port_idx(pf, 0);
+
+ if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
+ netdev_info(netdev, "maximum number of UDP ports reached, not adding port %d\n",
+ ntohs(port));
+ return;
+ }
+
+ /* New port: add it and mark its index in the bitmap */
+ pf->udp_ports[next_idx].index = port;
+ pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
+ pf->pending_udp_bitmap |= BIT_ULL(next_idx);
+ pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
+
+ dev_info(&pf->pdev->dev, "adding geneve port %d\n", ntohs(port));
+#endif
}
+/**
+ * i40e_del_geneve_port - Get notifications about GENEVE ports that go away
+ * @netdev: This physical port's netdev
+ * @sa_family: Socket Family that GENEVE is notifying us about
+ * @port: UDP port number that GENEVE stopped listening to
+ **/
+static void i40e_del_geneve_port(struct net_device *netdev,
+ sa_family_t sa_family, __be16 port)
+{
+#if IS_ENABLED(CONFIG_GENEVE)
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ u8 idx;
+
+ if (sa_family == AF_INET6)
+ return;
+
+ idx = i40e_get_udp_port_idx(pf, port);
+
+ /* Check if port already exists */
+ if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
+ /* if port exists, set it to 0 (mark for deletion)
+ * and make it pending
+ */
+ pf->udp_ports[idx].index = 0;
+ pf->pending_udp_bitmap |= BIT_ULL(idx);
+ pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
+
+ dev_info(&pf->pdev->dev, "deleting geneve port %d\n",
+ ntohs(port));
+ } else {
+ netdev_warn(netdev, "geneve port %d was not found, not deleting\n",
+ ntohs(port));
+ }
#endif
+}
+
static int i40e_get_phys_port_id(struct net_device *netdev,
struct netdev_phys_item_id *ppid)
{
@@ -8548,7 +8857,10 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
nlflags, 0, 0, filter_mask, NULL);
}
-#define I40E_MAX_TUNNEL_HDR_LEN 80
+/* Hardware supports L4 tunnel length of 128B (=2^7) which includes
+ * inner mac plus all inner ethertypes.
+ */
+#define I40E_MAX_TUNNEL_HDR_LEN 128
/**
* i40e_features_check - Validate encapsulated packet conforms to limits
* @skb: skb buff
@@ -8560,9 +8872,9 @@ static netdev_features_t i40e_features_check(struct sk_buff *skb,
netdev_features_t features)
{
if (skb->encapsulation &&
- (skb_inner_mac_header(skb) - skb_transport_header(skb) >
+ ((skb_inner_network_header(skb) - skb_transport_header(skb)) >
I40E_MAX_TUNNEL_HDR_LEN))
- return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
return features;
}
@@ -8595,10 +8907,14 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_get_vf_config = i40e_ndo_get_vf_config,
.ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
.ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
-#ifdef CONFIG_I40E_VXLAN
+#if IS_ENABLED(CONFIG_VXLAN)
.ndo_add_vxlan_port = i40e_add_vxlan_port,
.ndo_del_vxlan_port = i40e_del_vxlan_port,
#endif
+#if IS_ENABLED(CONFIG_GENEVE)
+ .ndo_add_geneve_port = i40e_add_geneve_port,
+ .ndo_del_geneve_port = i40e_del_geneve_port,
+#endif
.ndo_get_phys_port_id = i40e_get_phys_port_id,
.ndo_fdb_add = i40e_ndo_fdb_add,
.ndo_features_check = i40e_features_check,
@@ -8632,13 +8948,14 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
np->vsi = vsi;
netdev->hw_enc_features |= NETIF_F_IP_CSUM |
+ NETIF_F_RXCSUM |
NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_GRE |
NETIF_F_TSO;
netdev->features = NETIF_F_SG |
NETIF_F_IP_CSUM |
- NETIF_F_SCTP_CSUM |
+ NETIF_F_SCTP_CRC |
NETIF_F_HIGHDMA |
NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_GRE |
@@ -9051,7 +9368,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
f->is_vf, f->is_netdev);
spin_unlock_bh(&vsi->mac_filter_list_lock);
- i40e_sync_vsi_filters(vsi, false);
+ i40e_sync_vsi_filters(vsi);
i40e_vsi_delete(vsi);
i40e_vsi_free_q_vectors(vsi);
@@ -9213,6 +9530,44 @@ err_vsi:
}
/**
+ * i40e_macaddr_init - explicitly write the mac address filters.
+ *
+ * @vsi: pointer to the vsi.
+ * @macaddr: the MAC address
+ *
+ * This is needed when the macaddr has been obtained by other
+ * means than the default, e.g., from Open Firmware or IDPROM.
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_macaddr_init(struct i40e_vsi *vsi, u8 *macaddr)
+{
+ int ret;
+ struct i40e_aqc_add_macvlan_element_data element;
+
+ ret = i40e_aq_mac_address_write(&vsi->back->hw,
+ I40E_AQC_WRITE_TYPE_LAA_WOL,
+ macaddr, NULL);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "Addr change for VSI failed: %d\n", ret);
+ return -EADDRNOTAVAIL;
+ }
+
+ memset(&element, 0, sizeof(element));
+ ether_addr_copy(element.mac_addr, macaddr);
+ element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
+ ret = i40e_aq_add_macvlan(&vsi->back->hw, vsi->seid, &element, 1, NULL);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "add filter failed err %s aq_err %s\n",
+ i40e_stat_str(&vsi->back->hw, ret),
+ i40e_aq_str(&vsi->back->hw,
+ vsi->back->hw.aq.asq_last_status));
+ }
+ return ret;
+}
+
+/**
* i40e_vsi_setup - Set up a VSI by a given type
* @pf: board private structure
* @type: VSI type
@@ -9336,6 +9691,17 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
switch (vsi->type) {
/* setup the netdev if needed */
case I40E_VSI_MAIN:
+ /* Apply relevant filters if a platform-specific mac
+ * address was selected.
+ */
+ if (!!(pf->flags & I40E_FLAG_PF_MAC)) {
+ ret = i40e_macaddr_init(vsi, pf->hw.mac.addr);
+ if (ret) {
+ dev_warn(&pf->pdev->dev,
+ "could not set up macaddr; err %d\n",
+ ret);
+ }
+ }
case I40E_VSI_VMDQ2:
case I40E_VSI_FCOE:
ret = i40e_config_netdev(vsi);
@@ -9947,7 +10313,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
* the hash
*/
if ((pf->flags & I40E_FLAG_RSS_ENABLED))
- i40e_config_rss(pf);
+ i40e_pf_config_rss(pf);
/* fill in link information and enable LSE reporting */
i40e_update_link_info(&pf->hw);
@@ -9985,7 +10351,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
!(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
/* one qp for PF, no queues for anything else */
queues_left = 0;
- pf->rss_size = pf->num_lan_qps = 1;
+ pf->alloc_rss_size = pf->num_lan_qps = 1;
/* make sure all the fancies are disabled */
pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
@@ -10002,7 +10368,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
I40E_FLAG_FD_ATR_ENABLED |
I40E_FLAG_DCB_CAPABLE))) {
/* one qp for PF */
- pf->rss_size = pf->num_lan_qps = 1;
+ pf->alloc_rss_size = pf->num_lan_qps = 1;
queues_left -= pf->num_lan_qps;
pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
@@ -10072,8 +10438,9 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
"qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
pf->hw.func_caps.num_tx_qp,
!!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
- pf->num_lan_qps, pf->rss_size, pf->num_req_vfs, pf->num_vf_qps,
- pf->num_vmdq_vsis, pf->num_vmdq_qps, queues_left);
+ pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
+ pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
+ queues_left);
#ifdef I40E_FCOE
dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
#endif
@@ -10111,55 +10478,86 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
}
#define INFO_STRING_LEN 255
+#define REMAIN(__x) (INFO_STRING_LEN - (__x))
static void i40e_print_features(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
- char *buf, *string;
+ char *buf;
+ int i;
- string = kzalloc(INFO_STRING_LEN, GFP_KERNEL);
- if (!string) {
- dev_err(&pf->pdev->dev, "Features string allocation failed\n");
+ buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
+ if (!buf)
return;
- }
- buf = string;
-
- buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id);
+ i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
#ifdef CONFIG_PCI_IOV
- buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs);
+ i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
#endif
- buf += sprintf(buf, "VSIs: %d QP: %d RX: %s ",
- pf->hw.func_caps.num_vsis,
- pf->vsi[pf->lan_vsi]->num_queue_pairs,
- pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF");
+ i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d RX: %s",
+ pf->hw.func_caps.num_vsis,
+ pf->vsi[pf->lan_vsi]->num_queue_pairs,
+ pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF");
if (pf->flags & I40E_FLAG_RSS_ENABLED)
- buf += sprintf(buf, "RSS ");
+ i += snprintf(&buf[i], REMAIN(i), " RSS");
if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
- buf += sprintf(buf, "FD_ATR ");
+ i += snprintf(&buf[i], REMAIN(i), " FD_ATR");
if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
- buf += sprintf(buf, "FD_SB ");
- buf += sprintf(buf, "NTUPLE ");
+ i += snprintf(&buf[i], REMAIN(i), " FD_SB");
+ i += snprintf(&buf[i], REMAIN(i), " NTUPLE");
}
if (pf->flags & I40E_FLAG_DCB_CAPABLE)
- buf += sprintf(buf, "DCB ");
+ i += snprintf(&buf[i], REMAIN(i), " DCB");
#if IS_ENABLED(CONFIG_VXLAN)
- buf += sprintf(buf, "VxLAN ");
+ i += snprintf(&buf[i], REMAIN(i), " VxLAN");
+#endif
+#if IS_ENABLED(CONFIG_GENEVE)
+ i += snprintf(&buf[i], REMAIN(i), " Geneve");
#endif
if (pf->flags & I40E_FLAG_PTP)
- buf += sprintf(buf, "PTP ");
+ i += snprintf(&buf[i], REMAIN(i), " PTP");
#ifdef I40E_FCOE
if (pf->flags & I40E_FLAG_FCOE_ENABLED)
- buf += sprintf(buf, "FCOE ");
+ i += snprintf(&buf[i], REMAIN(i), " FCOE");
#endif
if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
- buf += sprintf(buf, "VEB ");
+ i += snprintf(&buf[i], REMAIN(i), " VEB");
else
- buf += sprintf(buf, "VEPA ");
+ i += snprintf(&buf[i], REMAIN(i), " VEPA");
- BUG_ON(buf > (string + INFO_STRING_LEN));
- dev_info(&pf->pdev->dev, "%s\n", string);
- kfree(string);
+ dev_info(&pf->pdev->dev, "%s\n", buf);
+ kfree(buf);
+ WARN_ON(i > INFO_STRING_LEN);
+}
+
+/**
+ * i40e_get_platform_mac_addr - get platform-specific MAC address
+ *
+ * @pdev: PCI device information struct
+ * @pf: board private structure
+ *
+ * Look up the MAC address in Open Firmware on systems that support it,
+ * and use IDPROM on SPARC if no OF address is found. On return, the
+ * I40E_FLAG_PF_MAC will be wset in pf->flags if a platform-specific value
+ * has been selected.
+ **/
+static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
+{
+ struct device_node *dp = pci_device_to_OF_node(pdev);
+ const unsigned char *addr;
+ u8 *mac_addr = pf->hw.mac.addr;
+
+ pf->flags &= ~I40E_FLAG_PF_MAC;
+ addr = of_get_mac_address(dp);
+ if (addr) {
+ ether_addr_copy(mac_addr, addr);
+ pf->flags |= I40E_FLAG_PF_MAC;
+#ifdef CONFIG_SPARC
+ } else {
+ ether_addr_copy(mac_addr, idprom->id_ethaddr);
+ pf->flags |= I40E_FLAG_PF_MAC;
+#endif /* CONFIG_SPARC */
+ }
}
/**
@@ -10183,6 +10581,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
u16 link_status;
int err;
u32 len;
+ u32 val;
u32 i;
u8 set_fc_aq_fail;
@@ -10302,6 +10701,16 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mutex_init(&hw->aq.arq_mutex);
err = i40e_init_adminq(hw);
+ if (err) {
+ if (err == I40E_ERR_FIRMWARE_API_VERSION)
+ dev_info(&pdev->dev,
+ "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
+ else
+ dev_info(&pdev->dev,
+ "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
+
+ goto err_pf_reset;
+ }
/* provide nvm, fw, api versions */
dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
@@ -10309,12 +10718,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->aq.api_maj_ver, hw->aq.api_min_ver,
i40e_nvm_version_str(hw));
- if (err) {
- dev_info(&pdev->dev,
- "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
- goto err_pf_reset;
- }
-
if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
dev_info(&pdev->dev,
@@ -10367,6 +10770,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
i40e_get_mac_addr(hw, hw->mac.addr);
+ /* allow a platform config to override the HW addr */
+ i40e_get_platform_mac_addr(pdev, pf);
if (!is_valid_ether_addr(hw->mac.addr)) {
dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
err = -EIO;
@@ -10411,7 +10816,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* NVM bit on means WoL disabled for the port */
i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
- if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1)
+ if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
pf->wol_en = false;
else
pf->wol_en = true;
@@ -10493,6 +10898,17 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i40e_stat_str(&pf->hw, err),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ /* Reconfigure hardware for allowing smaller MSS in the case
+ * of TSO, so that we avoid the MDD being fired and causing
+ * a reset in the case of small MSS+TSO.
+ */
+ val = rd32(hw, I40E_REG_MSS);
+ if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
+ val &= ~I40E_REG_MSS_MIN_MASK;
+ val |= I40E_64BYTE_MSS;
+ wr32(hw, I40E_REG_MSS, val);
+ }
+
if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
(pf->hw.aq.fw_maj_ver < 4)) {
msleep(75);