diff options
Diffstat (limited to 'drivers/net/ethernet/intel/iavf')
-rw-r--r-- | drivers/net/ethernet/intel/iavf/Makefile | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/iavf/iavf.h | 250 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/iavf/iavf_adminq.c | 19 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/iavf/iavf_adminq.h | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/iavf/iavf_adv_rss.c | 218 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/iavf/iavf_adv_rss.h | 95 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/iavf/iavf_common.c | 128 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/iavf/iavf_ethtool.c | 1131 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/iavf/iavf_fdir.c | 779 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/iavf/iavf_fdir.h | 118 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/iavf/iavf_main.c | 2218 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/iavf/iavf_status.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/iavf/iavf_trace.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/iavf/iavf_txrx.c | 208 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/iavf/iavf_txrx.h | 32 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/iavf/iavf_type.h | 9 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/iavf/iavf_virtchnl.c | 1422 |
17 files changed, 5569 insertions, 1069 deletions
diff --git a/drivers/net/ethernet/intel/iavf/Makefile b/drivers/net/ethernet/intel/iavf/Makefile index c997063ed728..9c3e45c54d01 100644 --- a/drivers/net/ethernet/intel/iavf/Makefile +++ b/drivers/net/ethernet/intel/iavf/Makefile @@ -11,5 +11,6 @@ subdir-ccflags-y += -I$(src) obj-$(CONFIG_IAVF) += iavf.o -iavf-objs := iavf_main.o iavf_ethtool.o iavf_virtchnl.o \ +iavf-objs := iavf_main.o iavf_ethtool.o iavf_virtchnl.o iavf_fdir.o \ + iavf_adv_rss.o \ iavf_txrx.o iavf_common.o iavf_adminq.o iavf_client.o diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index bd1b1ed323f4..3f6187c16424 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -37,10 +37,16 @@ #include "iavf_type.h" #include <linux/avf/virtchnl.h> #include "iavf_txrx.h" +#include "iavf_fdir.h" +#include "iavf_adv_rss.h" +#include <linux/bitmap.h> #define DEFAULT_DEBUG_LEVEL_SHIFT 3 #define PFX "iavf: " +int iavf_status_to_errno(enum iavf_status status); +int virtchnl_status_to_errno(enum virtchnl_status_code v_status); + /* VSI state flags shared with common code */ enum iavf_vsi_state_t { __IAVF_VSI_DOWN, @@ -52,12 +58,12 @@ enum iavf_vsi_state_t { struct iavf_vsi { struct iavf_adapter *back; struct net_device *netdev; - unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + unsigned long active_cvlans[BITS_TO_LONGS(VLAN_N_VID)]; + unsigned long active_svlans[BITS_TO_LONGS(VLAN_N_VID)]; u16 seid; u16 id; DECLARE_BITMAP(state, __IAVF_VSI_STATE_SIZE__); int base_vector; - u16 work_limit; u16 qs_handle; void *priv; /* client driver data reference. */ }; @@ -81,11 +87,16 @@ struct iavf_vsi { #define IAVF_TX_DESC(R, i) (&(((struct iavf_tx_desc *)((R)->desc))[i])) #define IAVF_TX_CTXTDESC(R, i) \ (&(((struct iavf_tx_context_desc *)((R)->desc))[i])) -#define IAVF_MAX_REQ_QUEUES 4 +#define IAVF_MAX_REQ_QUEUES 16 #define IAVF_HKEY_ARRAY_SIZE ((IAVF_VFQF_HKEY_MAX_INDEX + 1) * 4) #define IAVF_HLUT_ARRAY_SIZE ((IAVF_VFQF_HLUT_MAX_INDEX + 1) * 4) #define IAVF_MBPS_DIVISOR 125000 /* divisor to convert to Mbps */ +#define IAVF_MBPS_QUANTA 50 + +#define IAVF_VIRTCHNL_VF_RESOURCE_SIZE (sizeof(struct virtchnl_vf_resource) + \ + (IAVF_MAX_VF_VSI * \ + sizeof(struct virtchnl_vsi_resource))) /* MAX_MSIX_Q_VECTORS of these are allocated, * but we only use one per queue-specific vector. @@ -130,15 +141,31 @@ struct iavf_q_vector { struct iavf_mac_filter { struct list_head list; u8 macaddr[ETH_ALEN]; - bool remove; /* filter needs to be removed */ - bool add; /* filter needs to be added */ + struct { + u8 is_new_mac:1; /* filter is new, wait for PF decision */ + u8 remove:1; /* filter needs to be removed */ + u8 add:1; /* filter needs to be added */ + u8 is_primary:1; /* filter is a default VF MAC */ + u8 add_handled:1; /* received response for filter add */ + u8 padding:3; + }; +}; + +#define IAVF_VLAN(vid, tpid) ((struct iavf_vlan){ vid, tpid }) +struct iavf_vlan { + u16 vid; + u16 tpid; }; struct iavf_vlan_filter { struct list_head list; - u16 vlan; - bool remove; /* filter needs to be removed */ - bool add; /* filter needs to be added */ + struct iavf_vlan vlan; + struct { + u8 is_new_vlan:1; /* filter is new, wait for PF answer */ + u8 remove:1; /* filter needs to be removed */ + u8 add:1; /* filter needs to be added */ + u8 padding:5; + }; }; #define IAVF_MAX_TRAFFIC_CLASS 4 @@ -169,7 +196,10 @@ enum iavf_state_t { __IAVF_REMOVE, /* driver is being unloaded */ __IAVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */ __IAVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */ + __IAVF_INIT_EXTENDED_CAPS, /* process extended caps which require aq msg exchange */ + __IAVF_INIT_CONFIG_ADAPTER, __IAVF_INIT_SW, /* got resources, setting up structs */ + __IAVF_INIT_FAILED, /* init failed, restarting procedure */ __IAVF_RESETTING, /* in reset */ __IAVF_COMM_FAILED, /* communication with PF failed */ /* Below here, watchdog is running */ @@ -180,8 +210,6 @@ enum iavf_state_t { }; enum iavf_critical_section_t { - __IAVF_IN_CRITICAL_TASK, /* cannot be interrupted */ - __IAVF_IN_CLIENT_TASK, __IAVF_IN_REMOVE_TASK, /* device being removed */ }; @@ -215,16 +243,22 @@ struct iavf_cloud_filter { bool add; /* filter needs to be added */ }; +#define IAVF_RESET_WAIT_MS 10 +#define IAVF_RESET_WAIT_DETECTED_COUNT 500 +#define IAVF_RESET_WAIT_COMPLETE_COUNT 2000 + /* board specific private data structure */ struct iavf_adapter { struct work_struct reset_task; struct work_struct adminq_task; struct delayed_work client_task; - struct delayed_work init_task; wait_queue_head_t down_waitqueue; + wait_queue_head_t vc_waitqueue; struct iavf_q_vector *q_vectors; struct list_head vlan_filter_list; struct list_head mac_filter_list; + struct mutex crit_lock; + struct mutex client_lock; /* Lock to protect accesses to MAC and VLAN lists */ spinlock_t mac_vlan_list_lock; char misc_vector_name[IFNAMSIZ + 9]; @@ -262,36 +296,68 @@ struct iavf_adapter { #define IAVF_FLAG_LEGACY_RX BIT(15) #define IAVF_FLAG_REINIT_ITR_NEEDED BIT(16) #define IAVF_FLAG_QUEUES_DISABLED BIT(17) +#define IAVF_FLAG_SETUP_NETDEV_FEATURES BIT(18) +#define IAVF_FLAG_REINIT_MSIX_NEEDED BIT(20) +#define IAVF_FLAG_INITIAL_MAC_SET BIT(23) /* duplicates for common code */ #define IAVF_FLAG_DCB_ENABLED 0 /* flags for admin queue service task */ - u32 aq_required; -#define IAVF_FLAG_AQ_ENABLE_QUEUES BIT(0) -#define IAVF_FLAG_AQ_DISABLE_QUEUES BIT(1) -#define IAVF_FLAG_AQ_ADD_MAC_FILTER BIT(2) -#define IAVF_FLAG_AQ_ADD_VLAN_FILTER BIT(3) -#define IAVF_FLAG_AQ_DEL_MAC_FILTER BIT(4) -#define IAVF_FLAG_AQ_DEL_VLAN_FILTER BIT(5) -#define IAVF_FLAG_AQ_CONFIGURE_QUEUES BIT(6) -#define IAVF_FLAG_AQ_MAP_VECTORS BIT(7) -#define IAVF_FLAG_AQ_HANDLE_RESET BIT(8) -#define IAVF_FLAG_AQ_CONFIGURE_RSS BIT(9) /* direct AQ config */ -#define IAVF_FLAG_AQ_GET_CONFIG BIT(10) + u64 aq_required; +#define IAVF_FLAG_AQ_ENABLE_QUEUES BIT_ULL(0) +#define IAVF_FLAG_AQ_DISABLE_QUEUES BIT_ULL(1) +#define IAVF_FLAG_AQ_ADD_MAC_FILTER BIT_ULL(2) +#define IAVF_FLAG_AQ_ADD_VLAN_FILTER BIT_ULL(3) +#define IAVF_FLAG_AQ_DEL_MAC_FILTER BIT_ULL(4) +#define IAVF_FLAG_AQ_DEL_VLAN_FILTER BIT_ULL(5) +#define IAVF_FLAG_AQ_CONFIGURE_QUEUES BIT_ULL(6) +#define IAVF_FLAG_AQ_MAP_VECTORS BIT_ULL(7) +#define IAVF_FLAG_AQ_HANDLE_RESET BIT_ULL(8) +#define IAVF_FLAG_AQ_CONFIGURE_RSS BIT_ULL(9) /* direct AQ config */ +#define IAVF_FLAG_AQ_GET_CONFIG BIT_ULL(10) /* Newer style, RSS done by the PF so we can ignore hardware vagaries. */ -#define IAVF_FLAG_AQ_GET_HENA BIT(11) -#define IAVF_FLAG_AQ_SET_HENA BIT(12) -#define IAVF_FLAG_AQ_SET_RSS_KEY BIT(13) -#define IAVF_FLAG_AQ_SET_RSS_LUT BIT(14) -#define IAVF_FLAG_AQ_REQUEST_PROMISC BIT(15) -#define IAVF_FLAG_AQ_RELEASE_PROMISC BIT(16) -#define IAVF_FLAG_AQ_REQUEST_ALLMULTI BIT(17) -#define IAVF_FLAG_AQ_RELEASE_ALLMULTI BIT(18) -#define IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT(19) -#define IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT(20) -#define IAVF_FLAG_AQ_ENABLE_CHANNELS BIT(21) -#define IAVF_FLAG_AQ_DISABLE_CHANNELS BIT(22) -#define IAVF_FLAG_AQ_ADD_CLOUD_FILTER BIT(23) -#define IAVF_FLAG_AQ_DEL_CLOUD_FILTER BIT(24) +#define IAVF_FLAG_AQ_GET_HENA BIT_ULL(11) +#define IAVF_FLAG_AQ_SET_HENA BIT_ULL(12) +#define IAVF_FLAG_AQ_SET_RSS_KEY BIT_ULL(13) +#define IAVF_FLAG_AQ_SET_RSS_LUT BIT_ULL(14) +#define IAVF_FLAG_AQ_REQUEST_PROMISC BIT_ULL(15) +#define IAVF_FLAG_AQ_RELEASE_PROMISC BIT_ULL(16) +#define IAVF_FLAG_AQ_REQUEST_ALLMULTI BIT_ULL(17) +#define IAVF_FLAG_AQ_RELEASE_ALLMULTI BIT_ULL(18) +#define IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT_ULL(19) +#define IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT_ULL(20) +#define IAVF_FLAG_AQ_ENABLE_CHANNELS BIT_ULL(21) +#define IAVF_FLAG_AQ_DISABLE_CHANNELS BIT_ULL(22) +#define IAVF_FLAG_AQ_ADD_CLOUD_FILTER BIT_ULL(23) +#define IAVF_FLAG_AQ_DEL_CLOUD_FILTER BIT_ULL(24) +#define IAVF_FLAG_AQ_ADD_FDIR_FILTER BIT_ULL(25) +#define IAVF_FLAG_AQ_DEL_FDIR_FILTER BIT_ULL(26) +#define IAVF_FLAG_AQ_ADD_ADV_RSS_CFG BIT_ULL(27) +#define IAVF_FLAG_AQ_DEL_ADV_RSS_CFG BIT_ULL(28) +#define IAVF_FLAG_AQ_REQUEST_STATS BIT_ULL(29) +#define IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS BIT_ULL(30) +#define IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING BIT_ULL(31) +#define IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING BIT_ULL(32) +#define IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING BIT_ULL(33) +#define IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING BIT_ULL(34) +#define IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION BIT_ULL(35) +#define IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION BIT_ULL(36) +#define IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION BIT_ULL(37) +#define IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION BIT_ULL(38) + + /* flags for processing extended capability messages during + * __IAVF_INIT_EXTENDED_CAPS. Each capability exchange requires + * both a SEND and a RECV step, which must be processed in sequence. + * + * During the __IAVF_INIT_EXTENDED_CAPS state, the driver will + * process one flag at a time during each state loop. + */ + u64 extended_caps; +#define IAVF_EXTENDED_CAP_SEND_VLAN_V2 BIT_ULL(0) +#define IAVF_EXTENDED_CAP_RECV_VLAN_V2 BIT_ULL(1) + +#define IAVF_EXTENDED_CAPS \ + (IAVF_EXTENDED_CAP_SEND_VLAN_V2 | \ + IAVF_EXTENDED_CAP_RECV_VLAN_V2) /* OS defined structs */ struct net_device *netdev; @@ -300,12 +366,21 @@ struct iavf_adapter { struct iavf_hw hw; /* defined in iavf_type.h */ enum iavf_state_t state; + enum iavf_state_t last_state; unsigned long crit_section; struct delayed_work watchdog_task; bool netdev_registered; bool link_up; enum virtchnl_link_speed link_speed; + /* This is only populated if the VIRTCHNL_VF_CAP_ADV_LINK_SPEED is set + * in vf_res->vf_cap_flags. Use ADV_LINK_SUPPORT macro to determine if + * this field is valid. This field should be used going forward and the + * enum virtchnl_link_speed above should be considered the legacy way of + * storing/communicating link speeds. + */ + u32 link_speed_mbps; + enum virtchnl_ops current_op; #define CLIENT_ALLOWED(_a) ((_a)->vf_res ? \ (_a)->vf_res->vf_cap_flags & \ @@ -322,11 +397,26 @@ struct iavf_adapter { VIRTCHNL_VF_OFFLOAD_RSS_PF))) #define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \ VIRTCHNL_VF_OFFLOAD_VLAN) +#define VLAN_V2_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \ + VIRTCHNL_VF_OFFLOAD_VLAN_V2) +#define VLAN_V2_FILTERING_ALLOWED(_a) \ + (VLAN_V2_ALLOWED((_a)) && \ + ((_a)->vlan_v2_caps.filtering.filtering_support.outer || \ + (_a)->vlan_v2_caps.filtering.filtering_support.inner)) +#define VLAN_FILTERING_ALLOWED(_a) \ + (VLAN_ALLOWED((_a)) || VLAN_V2_FILTERING_ALLOWED((_a))) +#define ADV_LINK_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \ + VIRTCHNL_VF_CAP_ADV_LINK_SPEED) +#define FDIR_FLTR_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \ + VIRTCHNL_VF_OFFLOAD_FDIR_PF) +#define ADV_RSS_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \ + VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF) struct virtchnl_vf_resource *vf_res; /* incl. all VSIs */ struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */ struct virtchnl_version_info pf_version; #define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \ ((_a)->pf_version.minor == 1)) + struct virtchnl_vlan_caps vlan_v2_caps; u16 msg_enable; struct iavf_eth_stats current_stats; struct iavf_vsi vsi; @@ -344,6 +434,19 @@ struct iavf_adapter { /* lock to protect access to the cloud filter list */ spinlock_t cloud_filter_list_lock; u16 num_cloud_filters; + /* snapshot of "num_active_queues" before setup_tc for qdisc add + * is invoked. This information is useful during qdisc del flow, + * to restore correct number of queues + */ + int orig_num_active_queues; + +#define IAVF_MAX_FDIR_FILTERS 128 /* max allowed Flow Director filters */ + u16 fdir_active_fltr; + struct list_head fdir_list_head; + spinlock_t fdir_fltr_lock; /* protect the Flow Director filter list */ + + struct list_head adv_rss_list_head; + spinlock_t adv_rss_lock; /* protect the RSS management list */ }; @@ -357,13 +460,63 @@ struct iavf_device { /* needed by iavf_ethtool.c */ extern char iavf_driver_name[]; -extern const char iavf_driver_version[]; extern struct workqueue_struct *iavf_wq; +static inline const char *iavf_state_str(enum iavf_state_t state) +{ + switch (state) { + case __IAVF_STARTUP: + return "__IAVF_STARTUP"; + case __IAVF_REMOVE: + return "__IAVF_REMOVE"; + case __IAVF_INIT_VERSION_CHECK: + return "__IAVF_INIT_VERSION_CHECK"; + case __IAVF_INIT_GET_RESOURCES: + return "__IAVF_INIT_GET_RESOURCES"; + case __IAVF_INIT_EXTENDED_CAPS: + return "__IAVF_INIT_EXTENDED_CAPS"; + case __IAVF_INIT_CONFIG_ADAPTER: + return "__IAVF_INIT_CONFIG_ADAPTER"; + case __IAVF_INIT_SW: + return "__IAVF_INIT_SW"; + case __IAVF_INIT_FAILED: + return "__IAVF_INIT_FAILED"; + case __IAVF_RESETTING: + return "__IAVF_RESETTING"; + case __IAVF_COMM_FAILED: + return "__IAVF_COMM_FAILED"; + case __IAVF_DOWN: + return "__IAVF_DOWN"; + case __IAVF_DOWN_PENDING: + return "__IAVF_DOWN_PENDING"; + case __IAVF_TESTING: + return "__IAVF_TESTING"; + case __IAVF_RUNNING: + return "__IAVF_RUNNING"; + default: + return "__IAVF_UNKNOWN_STATE"; + } +} + +static inline void iavf_change_state(struct iavf_adapter *adapter, + enum iavf_state_t state) +{ + if (adapter->state != state) { + adapter->last_state = adapter->state; + adapter->state = state; + } + dev_dbg(&adapter->pdev->dev, + "state transition from:%s to:%s\n", + iavf_state_str(adapter->last_state), + iavf_state_str(adapter->state)); +} + int iavf_up(struct iavf_adapter *adapter); void iavf_down(struct iavf_adapter *adapter); int iavf_process_config(struct iavf_adapter *adapter); +int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter); void iavf_schedule_reset(struct iavf_adapter *adapter); +void iavf_schedule_request_stats(struct iavf_adapter *adapter); void iavf_reset(struct iavf_adapter *adapter); void iavf_set_ethtool_ops(struct net_device *netdev); void iavf_update_stats(struct iavf_adapter *adapter); @@ -380,6 +533,10 @@ int iavf_send_api_ver(struct iavf_adapter *adapter); int iavf_verify_api_ver(struct iavf_adapter *adapter); int iavf_send_vf_config_msg(struct iavf_adapter *adapter); int iavf_get_vf_config(struct iavf_adapter *adapter); +int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter); +int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter); +void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter); +u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter); void iavf_irq_enable(struct iavf_adapter *adapter, bool flush); void iavf_configure_queues(struct iavf_adapter *adapter); void iavf_deconfigure_queues(struct iavf_adapter *adapter); @@ -393,7 +550,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter); void iavf_del_vlans(struct iavf_adapter *adapter); void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags); void iavf_request_stats(struct iavf_adapter *adapter); -void iavf_request_reset(struct iavf_adapter *adapter); +int iavf_request_reset(struct iavf_adapter *adapter); void iavf_get_hena(struct iavf_adapter *adapter); void iavf_set_hena(struct iavf_adapter *adapter); void iavf_set_rss_key(struct iavf_adapter *adapter); @@ -415,6 +572,21 @@ void iavf_enable_channels(struct iavf_adapter *adapter); void iavf_disable_channels(struct iavf_adapter *adapter); void iavf_add_cloud_filter(struct iavf_adapter *adapter); void iavf_del_cloud_filter(struct iavf_adapter *adapter); +void iavf_enable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid); +void iavf_disable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid); +void iavf_enable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid); +void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid); +int iavf_replace_primary_mac(struct iavf_adapter *adapter, + const u8 *new_mac); +void +iavf_set_vlan_offload_features(struct iavf_adapter *adapter, + netdev_features_t prev_features, + netdev_features_t features); +void iavf_add_fdir_filter(struct iavf_adapter *adapter); +void iavf_del_fdir_filter(struct iavf_adapter *adapter); +void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter); +void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter); struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, const u8 *macaddr); +int iavf_lock_timeout(struct mutex *lock, unsigned int msecs); #endif /* _IAVF_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.c b/drivers/net/ethernet/intel/iavf/iavf_adminq.c index 9fa3fa99b4c2..9ffbd24d83cb 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_adminq.c +++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.c @@ -324,6 +324,7 @@ static enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw) static enum iavf_status iavf_init_asq(struct iavf_hw *hw) { enum iavf_status ret_code = 0; + int i; if (hw->aq.asq.count > 0) { /* queue already initialized */ @@ -354,12 +355,17 @@ static enum iavf_status iavf_init_asq(struct iavf_hw *hw) /* initialize base registers */ ret_code = iavf_config_asq_regs(hw); if (ret_code) - goto init_adminq_free_rings; + goto init_free_asq_bufs; /* success! */ hw->aq.asq.count = hw->aq.num_asq_entries; goto init_adminq_exit; +init_free_asq_bufs: + for (i = 0; i < hw->aq.num_asq_entries; i++) + iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); + iavf_free_virt_mem(hw, &hw->aq.asq.dma_head); + init_adminq_free_rings: iavf_free_adminq_asq(hw); @@ -383,6 +389,7 @@ init_adminq_exit: static enum iavf_status iavf_init_arq(struct iavf_hw *hw) { enum iavf_status ret_code = 0; + int i; if (hw->aq.arq.count > 0) { /* queue already initialized */ @@ -413,12 +420,16 @@ static enum iavf_status iavf_init_arq(struct iavf_hw *hw) /* initialize base registers */ ret_code = iavf_config_arq_regs(hw); if (ret_code) - goto init_adminq_free_rings; + goto init_free_arq_bufs; /* success! */ hw->aq.arq.count = hw->aq.num_arq_entries; goto init_adminq_exit; +init_free_arq_bufs: + for (i = 0; i < hw->aq.num_arq_entries; i++) + iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); + iavf_free_virt_mem(hw, &hw->aq.arq.dma_head); init_adminq_free_rings: iavf_free_adminq_arq(hw); @@ -551,15 +562,13 @@ init_adminq_exit: **/ enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw) { - enum iavf_status ret_code = 0; - if (iavf_check_asq_alive(hw)) iavf_aq_queue_shutdown(hw, true); iavf_shutdown_asq(hw); iavf_shutdown_arq(hw); - return ret_code; + return 0; } /** diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.h b/drivers/net/ethernet/intel/iavf/iavf_adminq.h index baf2fe26f302..1f60518eb0e5 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_adminq.h +++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.h @@ -85,8 +85,8 @@ struct iavf_adminq_info { /** * iavf_aq_rc_to_posix - convert errors to user-land codes - * aq_ret: AdminQ handler error code can override aq_rc - * aq_rc: AdminQ firmware error code to convert + * @aq_ret: AdminQ handler error code can override aq_rc + * @aq_rc: AdminQ firmware error code to convert **/ static inline int iavf_aq_rc_to_posix(int aq_ret, int aq_rc) { diff --git a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c new file mode 100644 index 000000000000..6edbf134b73f --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c @@ -0,0 +1,218 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021, Intel Corporation. */ + +/* advanced RSS configuration ethtool support for iavf */ + +#include "iavf.h" + +/** + * iavf_fill_adv_rss_ip4_hdr - fill the IPv4 RSS protocol header + * @hdr: the virtchnl message protocol header data structure + * @hash_flds: the RSS configuration protocol hash fields + */ +static void +iavf_fill_adv_rss_ip4_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds) +{ + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4); + + if (hash_flds & IAVF_ADV_RSS_HASH_FLD_IPV4_SA) + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC); + + if (hash_flds & IAVF_ADV_RSS_HASH_FLD_IPV4_DA) + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST); +} + +/** + * iavf_fill_adv_rss_ip6_hdr - fill the IPv6 RSS protocol header + * @hdr: the virtchnl message protocol header data structure + * @hash_flds: the RSS configuration protocol hash fields + */ +static void +iavf_fill_adv_rss_ip6_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds) +{ + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6); + + if (hash_flds & IAVF_ADV_RSS_HASH_FLD_IPV6_SA) + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC); + + if (hash_flds & IAVF_ADV_RSS_HASH_FLD_IPV6_DA) + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST); +} + +/** + * iavf_fill_adv_rss_tcp_hdr - fill the TCP RSS protocol header + * @hdr: the virtchnl message protocol header data structure + * @hash_flds: the RSS configuration protocol hash fields + */ +static void +iavf_fill_adv_rss_tcp_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds) +{ + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP); + + if (hash_flds & IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT) + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT); + + if (hash_flds & IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT) + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT); +} + +/** + * iavf_fill_adv_rss_udp_hdr - fill the UDP RSS protocol header + * @hdr: the virtchnl message protocol header data structure + * @hash_flds: the RSS configuration protocol hash fields + */ +static void +iavf_fill_adv_rss_udp_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds) +{ + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP); + + if (hash_flds & IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT) + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT); + + if (hash_flds & IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT) + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT); +} + +/** + * iavf_fill_adv_rss_sctp_hdr - fill the SCTP RSS protocol header + * @hdr: the virtchnl message protocol header data structure + * @hash_flds: the RSS configuration protocol hash fields + */ +static void +iavf_fill_adv_rss_sctp_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds) +{ + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP); + + if (hash_flds & IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT) + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT); + + if (hash_flds & IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT) + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT); +} + +/** + * iavf_fill_adv_rss_cfg_msg - fill the RSS configuration into virtchnl message + * @rss_cfg: the virtchnl message to be filled with RSS configuration setting + * @packet_hdrs: the RSS configuration protocol header types + * @hash_flds: the RSS configuration protocol hash fields + * + * Returns 0 if the RSS configuration virtchnl message is filled successfully + */ +int +iavf_fill_adv_rss_cfg_msg(struct virtchnl_rss_cfg *rss_cfg, + u32 packet_hdrs, u64 hash_flds) +{ + struct virtchnl_proto_hdrs *proto_hdrs = &rss_cfg->proto_hdrs; + struct virtchnl_proto_hdr *hdr; + + rss_cfg->rss_algorithm = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC; + + proto_hdrs->tunnel_level = 0; /* always outer layer */ + + hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + switch (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_L3) { + case IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4: + iavf_fill_adv_rss_ip4_hdr(hdr, hash_flds); + break; + case IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6: + iavf_fill_adv_rss_ip6_hdr(hdr, hash_flds); + break; + default: + return -EINVAL; + } + + hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + switch (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_L4) { + case IAVF_ADV_RSS_FLOW_SEG_HDR_TCP: + iavf_fill_adv_rss_tcp_hdr(hdr, hash_flds); + break; + case IAVF_ADV_RSS_FLOW_SEG_HDR_UDP: + iavf_fill_adv_rss_udp_hdr(hdr, hash_flds); + break; + case IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP: + iavf_fill_adv_rss_sctp_hdr(hdr, hash_flds); + break; + default: + return -EINVAL; + } + + return 0; +} + +/** + * iavf_find_adv_rss_cfg_by_hdrs - find RSS configuration with header type + * @adapter: pointer to the VF adapter structure + * @packet_hdrs: protocol header type to find. + * + * Returns pointer to advance RSS configuration if found or null + */ +struct iavf_adv_rss * +iavf_find_adv_rss_cfg_by_hdrs(struct iavf_adapter *adapter, u32 packet_hdrs) +{ + struct iavf_adv_rss *rss; + + list_for_each_entry(rss, &adapter->adv_rss_list_head, list) + if (rss->packet_hdrs == packet_hdrs) + return rss; + + return NULL; +} + +/** + * iavf_print_adv_rss_cfg + * @adapter: pointer to the VF adapter structure + * @rss: pointer to the advance RSS configuration to print + * @action: the string description about how to handle the RSS + * @result: the string description about the virtchnl result + * + * Print the advance RSS configuration + **/ +void +iavf_print_adv_rss_cfg(struct iavf_adapter *adapter, struct iavf_adv_rss *rss, + const char *action, const char *result) +{ + u32 packet_hdrs = rss->packet_hdrs; + u64 hash_flds = rss->hash_flds; + static char hash_opt[300]; + const char *proto; + + if (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_TCP) + proto = "TCP"; + else if (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_UDP) + proto = "UDP"; + else if (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP) + proto = "SCTP"; + else + return; + + memset(hash_opt, 0, sizeof(hash_opt)); + + strcat(hash_opt, proto); + if (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4) + strcat(hash_opt, "v4 "); + else + strcat(hash_opt, "v6 "); + + if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_SA | + IAVF_ADV_RSS_HASH_FLD_IPV6_SA)) + strcat(hash_opt, "IP SA,"); + if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_DA | + IAVF_ADV_RSS_HASH_FLD_IPV6_DA)) + strcat(hash_opt, "IP DA,"); + if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT | + IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT | + IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT)) + strcat(hash_opt, "src port,"); + if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT | + IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT | + IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT)) + strcat(hash_opt, "dst port,"); + + if (!action) + action = ""; + + if (!result) + result = ""; + + dev_info(&adapter->pdev->dev, "%s %s %s\n", action, hash_opt, result); +} diff --git a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h new file mode 100644 index 000000000000..4d3be11af7aa --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021, Intel Corporation. */ + +#ifndef _IAVF_ADV_RSS_H_ +#define _IAVF_ADV_RSS_H_ + +struct iavf_adapter; + +/* State of advanced RSS configuration */ +enum iavf_adv_rss_state_t { + IAVF_ADV_RSS_ADD_REQUEST, /* User requests to add RSS */ + IAVF_ADV_RSS_ADD_PENDING, /* RSS pending add by the PF */ + IAVF_ADV_RSS_DEL_REQUEST, /* Driver requests to delete RSS */ + IAVF_ADV_RSS_DEL_PENDING, /* RSS pending delete by the PF */ + IAVF_ADV_RSS_ACTIVE, /* RSS configuration is active */ +}; + +enum iavf_adv_rss_flow_seg_hdr { + IAVF_ADV_RSS_FLOW_SEG_HDR_NONE = 0x00000000, + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4 = 0x00000001, + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6 = 0x00000002, + IAVF_ADV_RSS_FLOW_SEG_HDR_TCP = 0x00000004, + IAVF_ADV_RSS_FLOW_SEG_HDR_UDP = 0x00000008, + IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP = 0x00000010, +}; + +#define IAVF_ADV_RSS_FLOW_SEG_HDR_L3 \ + (IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4 | \ + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6) + +#define IAVF_ADV_RSS_FLOW_SEG_HDR_L4 \ + (IAVF_ADV_RSS_FLOW_SEG_HDR_TCP | \ + IAVF_ADV_RSS_FLOW_SEG_HDR_UDP | \ + IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP) + +enum iavf_adv_rss_flow_field { + /* L3 */ + IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV4_SA, + IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV4_DA, + IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV6_SA, + IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV6_DA, + /* L4 */ + IAVF_ADV_RSS_FLOW_FIELD_IDX_TCP_SRC_PORT, + IAVF_ADV_RSS_FLOW_FIELD_IDX_TCP_DST_PORT, + IAVF_ADV_RSS_FLOW_FIELD_IDX_UDP_SRC_PORT, + IAVF_ADV_RSS_FLOW_FIELD_IDX_UDP_DST_PORT, + IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_SRC_PORT, + IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_DST_PORT, + + /* The total number of enums must not exceed 64 */ + IAVF_ADV_RSS_FLOW_FIELD_IDX_MAX +}; + +#define IAVF_ADV_RSS_HASH_INVALID 0 +#define IAVF_ADV_RSS_HASH_FLD_IPV4_SA \ + BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV4_SA) +#define IAVF_ADV_RSS_HASH_FLD_IPV6_SA \ + BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV6_SA) +#define IAVF_ADV_RSS_HASH_FLD_IPV4_DA \ + BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV4_DA) +#define IAVF_ADV_RSS_HASH_FLD_IPV6_DA \ + BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV6_DA) +#define IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT \ + BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_TCP_SRC_PORT) +#define IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT \ + BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_TCP_DST_PORT) +#define IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT \ + BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_UDP_SRC_PORT) +#define IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT \ + BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_UDP_DST_PORT) +#define IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT \ + BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_SRC_PORT) +#define IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT \ + BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_DST_PORT) + +/* bookkeeping of advanced RSS configuration */ +struct iavf_adv_rss { + enum iavf_adv_rss_state_t state; + struct list_head list; + + u32 packet_hdrs; + u64 hash_flds; + + struct virtchnl_rss_cfg cfg_msg; +}; + +int +iavf_fill_adv_rss_cfg_msg(struct virtchnl_rss_cfg *rss_cfg, + u32 packet_hdrs, u64 hash_flds); +struct iavf_adv_rss * +iavf_find_adv_rss_cfg_by_hdrs(struct iavf_adapter *adapter, u32 packet_hdrs); +void +iavf_print_adv_rss_cfg(struct iavf_adapter *adapter, struct iavf_adv_rss *rss, + const char *action, const char *result); +#endif /* _IAVF_ADV_RSS_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c index 8547fc8fdfd6..34e46a23894f 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_common.c +++ b/drivers/net/ethernet/intel/iavf/iavf_common.c @@ -131,8 +131,8 @@ const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err) return "IAVF_ERR_INVALID_MAC_ADDR"; case IAVF_ERR_DEVICE_NOT_SUPPORTED: return "IAVF_ERR_DEVICE_NOT_SUPPORTED"; - case IAVF_ERR_MASTER_REQUESTS_PENDING: - return "IAVF_ERR_MASTER_REQUESTS_PENDING"; + case IAVF_ERR_PRIMARY_REQUESTS_PENDING: + return "IAVF_ERR_PRIMARY_REQUESTS_PENDING"; case IAVF_ERR_INVALID_LINK_SETTINGS: return "IAVF_ERR_INVALID_LINK_SETTINGS"; case IAVF_ERR_AUTONEG_NOT_COMPLETE: @@ -522,9 +522,9 @@ enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 vsi_id, * ENDIF */ -/* macro to make the table lines short */ +/* macro to make the table lines short, use explicit indexing with [PTYPE] */ #define IAVF_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ - { PTYPE, \ + [PTYPE] = { \ 1, \ IAVF_RX_PTYPE_OUTER_##OUTER_IP, \ IAVF_RX_PTYPE_OUTER_##OUTER_IP_VER, \ @@ -535,16 +535,15 @@ enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 vsi_id, IAVF_RX_PTYPE_INNER_PROT_##I, \ IAVF_RX_PTYPE_PAYLOAD_LAYER_##PL } -#define IAVF_PTT_UNUSED_ENTRY(PTYPE) \ - { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 } +#define IAVF_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } /* shorter macros makes the table fit but are terse */ #define IAVF_RX_PTYPE_NOF IAVF_RX_PTYPE_NOT_FRAG #define IAVF_RX_PTYPE_FRG IAVF_RX_PTYPE_FRAG #define IAVF_RX_PTYPE_INNER_PROT_TS IAVF_RX_PTYPE_INNER_PROT_TIMESYNC -/* Lookup table mapping the HW PTYPE to the bit field for decoding */ -struct iavf_rx_ptype_decoded iavf_ptype_lookup[] = { +/* Lookup table mapping the 8-bit HW PTYPE to the bit field for decoding */ +struct iavf_rx_ptype_decoded iavf_ptype_lookup[BIT(8)] = { /* L2 Packet types */ IAVF_PTT_UNUSED_ENTRY(0), IAVF_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), @@ -750,118 +749,7 @@ struct iavf_rx_ptype_decoded iavf_ptype_lookup[] = { IAVF_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), /* unused entries */ - IAVF_PTT_UNUSED_ENTRY(154), - IAVF_PTT_UNUSED_ENTRY(155), - IAVF_PTT_UNUSED_ENTRY(156), - IAVF_PTT_UNUSED_ENTRY(157), - IAVF_PTT_UNUSED_ENTRY(158), - IAVF_PTT_UNUSED_ENTRY(159), - - IAVF_PTT_UNUSED_ENTRY(160), - IAVF_PTT_UNUSED_ENTRY(161), - IAVF_PTT_UNUSED_ENTRY(162), - IAVF_PTT_UNUSED_ENTRY(163), - IAVF_PTT_UNUSED_ENTRY(164), - IAVF_PTT_UNUSED_ENTRY(165), - IAVF_PTT_UNUSED_ENTRY(166), - IAVF_PTT_UNUSED_ENTRY(167), - IAVF_PTT_UNUSED_ENTRY(168), - IAVF_PTT_UNUSED_ENTRY(169), - - IAVF_PTT_UNUSED_ENTRY(170), - IAVF_PTT_UNUSED_ENTRY(171), - IAVF_PTT_UNUSED_ENTRY(172), - IAVF_PTT_UNUSED_ENTRY(173), - IAVF_PTT_UNUSED_ENTRY(174), - IAVF_PTT_UNUSED_ENTRY(175), - IAVF_PTT_UNUSED_ENTRY(176), - IAVF_PTT_UNUSED_ENTRY(177), - IAVF_PTT_UNUSED_ENTRY(178), - IAVF_PTT_UNUSED_ENTRY(179), - - IAVF_PTT_UNUSED_ENTRY(180), - IAVF_PTT_UNUSED_ENTRY(181), - IAVF_PTT_UNUSED_ENTRY(182), - IAVF_PTT_UNUSED_ENTRY(183), - IAVF_PTT_UNUSED_ENTRY(184), - IAVF_PTT_UNUSED_ENTRY(185), - IAVF_PTT_UNUSED_ENTRY(186), - IAVF_PTT_UNUSED_ENTRY(187), - IAVF_PTT_UNUSED_ENTRY(188), - IAVF_PTT_UNUSED_ENTRY(189), - - IAVF_PTT_UNUSED_ENTRY(190), - IAVF_PTT_UNUSED_ENTRY(191), - IAVF_PTT_UNUSED_ENTRY(192), - IAVF_PTT_UNUSED_ENTRY(193), - IAVF_PTT_UNUSED_ENTRY(194), - IAVF_PTT_UNUSED_ENTRY(195), - IAVF_PTT_UNUSED_ENTRY(196), - IAVF_PTT_UNUSED_ENTRY(197), - IAVF_PTT_UNUSED_ENTRY(198), - IAVF_PTT_UNUSED_ENTRY(199), - - IAVF_PTT_UNUSED_ENTRY(200), - IAVF_PTT_UNUSED_ENTRY(201), - IAVF_PTT_UNUSED_ENTRY(202), - IAVF_PTT_UNUSED_ENTRY(203), - IAVF_PTT_UNUSED_ENTRY(204), - IAVF_PTT_UNUSED_ENTRY(205), - IAVF_PTT_UNUSED_ENTRY(206), - IAVF_PTT_UNUSED_ENTRY(207), - IAVF_PTT_UNUSED_ENTRY(208), - IAVF_PTT_UNUSED_ENTRY(209), - - IAVF_PTT_UNUSED_ENTRY(210), - IAVF_PTT_UNUSED_ENTRY(211), - IAVF_PTT_UNUSED_ENTRY(212), - IAVF_PTT_UNUSED_ENTRY(213), - IAVF_PTT_UNUSED_ENTRY(214), - IAVF_PTT_UNUSED_ENTRY(215), - IAVF_PTT_UNUSED_ENTRY(216), - IAVF_PTT_UNUSED_ENTRY(217), - IAVF_PTT_UNUSED_ENTRY(218), - IAVF_PTT_UNUSED_ENTRY(219), - - IAVF_PTT_UNUSED_ENTRY(220), - IAVF_PTT_UNUSED_ENTRY(221), - IAVF_PTT_UNUSED_ENTRY(222), - IAVF_PTT_UNUSED_ENTRY(223), - IAVF_PTT_UNUSED_ENTRY(224), - IAVF_PTT_UNUSED_ENTRY(225), - IAVF_PTT_UNUSED_ENTRY(226), - IAVF_PTT_UNUSED_ENTRY(227), - IAVF_PTT_UNUSED_ENTRY(228), - IAVF_PTT_UNUSED_ENTRY(229), - - IAVF_PTT_UNUSED_ENTRY(230), - IAVF_PTT_UNUSED_ENTRY(231), - IAVF_PTT_UNUSED_ENTRY(232), - IAVF_PTT_UNUSED_ENTRY(233), - IAVF_PTT_UNUSED_ENTRY(234), - IAVF_PTT_UNUSED_ENTRY(235), - IAVF_PTT_UNUSED_ENTRY(236), - IAVF_PTT_UNUSED_ENTRY(237), - IAVF_PTT_UNUSED_ENTRY(238), - IAVF_PTT_UNUSED_ENTRY(239), - - IAVF_PTT_UNUSED_ENTRY(240), - IAVF_PTT_UNUSED_ENTRY(241), - IAVF_PTT_UNUSED_ENTRY(242), - IAVF_PTT_UNUSED_ENTRY(243), - IAVF_PTT_UNUSED_ENTRY(244), - IAVF_PTT_UNUSED_ENTRY(245), - IAVF_PTT_UNUSED_ENTRY(246), - IAVF_PTT_UNUSED_ENTRY(247), - IAVF_PTT_UNUSED_ENTRY(248), - IAVF_PTT_UNUSED_ENTRY(249), - - IAVF_PTT_UNUSED_ENTRY(250), - IAVF_PTT_UNUSED_ENTRY(251), - IAVF_PTT_UNUSED_ENTRY(252), - IAVF_PTT_UNUSED_ENTRY(253), - IAVF_PTT_UNUSED_ENTRY(254), - IAVF_PTT_UNUSED_ENTRY(255) + [154 ... 255] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } }; /** diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index 84c3d8d97ef6..a056e1545615 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -278,35 +278,46 @@ static int iavf_get_link_ksettings(struct net_device *netdev, ethtool_link_ksettings_zero_link_mode(cmd, supported); cmd->base.autoneg = AUTONEG_DISABLE; cmd->base.port = PORT_NONE; - /* Set speed and duplex */ + cmd->base.duplex = DUPLEX_FULL; + + if (ADV_LINK_SUPPORT(adapter)) { + if (adapter->link_speed_mbps && + adapter->link_speed_mbps < U32_MAX) + cmd->base.speed = adapter->link_speed_mbps; + else + cmd->base.speed = SPEED_UNKNOWN; + + return 0; + } + switch (adapter->link_speed) { - case IAVF_LINK_SPEED_40GB: + case VIRTCHNL_LINK_SPEED_40GB: cmd->base.speed = SPEED_40000; break; - case IAVF_LINK_SPEED_25GB: -#ifdef SPEED_25000 + case VIRTCHNL_LINK_SPEED_25GB: cmd->base.speed = SPEED_25000; -#else - netdev_info(netdev, - "Speed is 25G, display not supported by this version of ethtool.\n"); -#endif break; - case IAVF_LINK_SPEED_20GB: + case VIRTCHNL_LINK_SPEED_20GB: cmd->base.speed = SPEED_20000; break; - case IAVF_LINK_SPEED_10GB: + case VIRTCHNL_LINK_SPEED_10GB: cmd->base.speed = SPEED_10000; break; - case IAVF_LINK_SPEED_1GB: + case VIRTCHNL_LINK_SPEED_5GB: + cmd->base.speed = SPEED_5000; + break; + case VIRTCHNL_LINK_SPEED_2_5GB: + cmd->base.speed = SPEED_2500; + break; + case VIRTCHNL_LINK_SPEED_1GB: cmd->base.speed = SPEED_1000; break; - case IAVF_LINK_SPEED_100MB: + case VIRTCHNL_LINK_SPEED_100MB: cmd->base.speed = SPEED_100; break; default: break; } - cmd->base.duplex = DUPLEX_FULL; return 0; } @@ -320,9 +331,16 @@ static int iavf_get_link_ksettings(struct net_device *netdev, **/ static int iavf_get_sset_count(struct net_device *netdev, int sset) { + /* Report the maximum number queues, even if not every queue is + * currently configured. Since allocation of queues is in pairs, + * use netdev->real_num_tx_queues * 2. The real_num_tx_queues is set + * at device creation and never changes. + */ + if (sset == ETH_SS_STATS) return IAVF_STATS_LEN + - (IAVF_QUEUE_STATS_LEN * 2 * IAVF_MAX_REQ_QUEUES); + (IAVF_QUEUE_STATS_LEN * 2 * + netdev->real_num_tx_queues); else if (sset == ETH_SS_PRIV_FLAGS) return IAVF_PRIV_FLAGS_STR_LEN; else @@ -343,20 +361,24 @@ static void iavf_get_ethtool_stats(struct net_device *netdev, struct iavf_adapter *adapter = netdev_priv(netdev); unsigned int i; + /* Explicitly request stats refresh */ + iavf_schedule_request_stats(adapter); + iavf_add_ethtool_stats(&data, adapter, iavf_gstrings_stats); rcu_read_lock(); - for (i = 0; i < IAVF_MAX_REQ_QUEUES; i++) { + /* As num_active_queues describe both tx and rx queues, we can use + * it to iterate over rings' stats. + */ + for (i = 0; i < adapter->num_active_queues; i++) { struct iavf_ring *ring; - /* Avoid accessing un-allocated queues */ - ring = (i < adapter->num_active_queues ? - &adapter->tx_rings[i] : NULL); + /* Tx rings stats */ + ring = &adapter->tx_rings[i]; iavf_add_queue_stats(&data, ring); - /* Avoid accessing un-allocated queues */ - ring = (i < adapter->num_active_queues ? - &adapter->rx_rings[i] : NULL); + /* Rx rings stats */ + ring = &adapter->rx_rings[i]; iavf_add_queue_stats(&data, ring); } rcu_read_unlock(); @@ -393,10 +415,10 @@ static void iavf_get_stat_strings(struct net_device *netdev, u8 *data) iavf_add_stat_strings(&data, iavf_gstrings_stats); - /* Queues are always allocated in pairs, so we just use num_tx_queues - * for both Tx and Rx queues. + /* Queues are always allocated in pairs, so we just use + * real_num_tx_queues for both Tx and Rx queues. */ - for (i = 0; i < netdev->num_tx_queues; i++) { + for (i = 0; i < netdev->real_num_tx_queues; i++) { iavf_add_stat_strings(&data, iavf_gstrings_queue_stats, "tx", i); iavf_add_stat_strings(&data, iavf_gstrings_queue_stats, @@ -559,10 +581,9 @@ static void iavf_get_drvinfo(struct net_device *netdev, { struct iavf_adapter *adapter = netdev_priv(netdev); - strlcpy(drvinfo->driver, iavf_driver_name, 32); - strlcpy(drvinfo->version, iavf_driver_version, 32); - strlcpy(drvinfo->fw_version, "N/A", 4); - strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); + strscpy(drvinfo->driver, iavf_driver_name, 32); + strscpy(drvinfo->fw_version, "N/A", 4); + strscpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); drvinfo->n_priv_flags = IAVF_PRIV_FLAGS_STR_LEN; } @@ -570,12 +591,16 @@ static void iavf_get_drvinfo(struct net_device *netdev, * iavf_get_ringparam - Get ring parameters * @netdev: network interface device structure * @ring: ethtool ringparam structure + * @kernel_ring: ethtool extenal ringparam structure + * @extack: netlink extended ACK report struct * * Returns current ring parameters. TX and RX rings are reported separately, * but the number of rings is not reported. **/ static void iavf_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct iavf_adapter *adapter = netdev_priv(netdev); @@ -589,12 +614,16 @@ static void iavf_get_ringparam(struct net_device *netdev, * iavf_set_ringparam - Set ring parameters * @netdev: network interface device structure * @ring: ethtool ringparam structure + * @kernel_ring: ethtool external ringparam structure + * @extack: netlink extended ACK report struct * * Sets ring parameters. TX and RX rings are controlled separately, but the * number of rings is not specified, so all rings get the same settings. **/ static int iavf_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct iavf_adapter *adapter = netdev_priv(netdev); u32 new_rx_count, new_tx_count; @@ -602,23 +631,44 @@ static int iavf_set_ringparam(struct net_device *netdev, if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; - new_tx_count = clamp_t(u32, ring->tx_pending, - IAVF_MIN_TXD, - IAVF_MAX_TXD); - new_tx_count = ALIGN(new_tx_count, IAVF_REQ_DESCRIPTOR_MULTIPLE); + if (ring->tx_pending > IAVF_MAX_TXD || + ring->tx_pending < IAVF_MIN_TXD || + ring->rx_pending > IAVF_MAX_RXD || + ring->rx_pending < IAVF_MIN_RXD) { + netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n", + ring->tx_pending, ring->rx_pending, IAVF_MIN_TXD, + IAVF_MAX_RXD, IAVF_REQ_DESCRIPTOR_MULTIPLE); + return -EINVAL; + } + + new_tx_count = ALIGN(ring->tx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE); + if (new_tx_count != ring->tx_pending) + netdev_info(netdev, "Requested Tx descriptor count rounded up to %d\n", + new_tx_count); - new_rx_count = clamp_t(u32, ring->rx_pending, - IAVF_MIN_RXD, - IAVF_MAX_RXD); - new_rx_count = ALIGN(new_rx_count, IAVF_REQ_DESCRIPTOR_MULTIPLE); + new_rx_count = ALIGN(ring->rx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE); + if (new_rx_count != ring->rx_pending) + netdev_info(netdev, "Requested Rx descriptor count rounded up to %d\n", + new_rx_count); /* if nothing to do return success */ if ((new_tx_count == adapter->tx_desc_count) && - (new_rx_count == adapter->rx_desc_count)) + (new_rx_count == adapter->rx_desc_count)) { + netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n"); return 0; + } - adapter->tx_desc_count = new_tx_count; - adapter->rx_desc_count = new_rx_count; + if (new_tx_count != adapter->tx_desc_count) { + netdev_dbg(netdev, "Changing Tx descriptor count from %d to %d\n", + adapter->tx_desc_count, new_tx_count); + adapter->tx_desc_count = new_tx_count; + } + + if (new_rx_count != adapter->rx_desc_count) { + netdev_dbg(netdev, "Changing Rx descriptor count from %d to %d\n", + adapter->rx_desc_count, new_rx_count); + adapter->rx_desc_count = new_rx_count; + } if (netif_running(netdev)) { adapter->flags |= IAVF_FLAG_RESET_NEEDED; @@ -642,12 +692,8 @@ static int __iavf_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, int queue) { struct iavf_adapter *adapter = netdev_priv(netdev); - struct iavf_vsi *vsi = &adapter->vsi; struct iavf_ring *rx_ring, *tx_ring; - ec->tx_max_coalesced_frames = vsi->work_limit; - ec->rx_max_coalesced_frames = vsi->work_limit; - /* Rx and Tx usecs per queue value. If user doesn't specify the * queue, return queue 0's value to represent. */ @@ -675,6 +721,8 @@ static int __iavf_get_coalesce(struct net_device *netdev, * iavf_get_coalesce - Get interrupt coalescing settings * @netdev: network interface device structure * @ec: ethtool coalesce structure + * @kernel_coal: ethtool CQE mode setting structure + * @extack: extack for reporting error messages * * Returns current coalescing settings. This is referred to elsewhere in the * driver as Interrupt Throttle Rate, as this is how the hardware describes @@ -682,7 +730,9 @@ static int __iavf_get_coalesce(struct net_device *netdev, * only represents the settings of queue 0. **/ static int iavf_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { return __iavf_get_coalesce(netdev, ec, -1); } @@ -709,12 +759,31 @@ static int iavf_get_per_queue_coalesce(struct net_device *netdev, u32 queue, * * Change the ITR settings for a specific queue. **/ -static void iavf_set_itr_per_queue(struct iavf_adapter *adapter, - struct ethtool_coalesce *ec, int queue) +static int iavf_set_itr_per_queue(struct iavf_adapter *adapter, + struct ethtool_coalesce *ec, int queue) { struct iavf_ring *rx_ring = &adapter->rx_rings[queue]; struct iavf_ring *tx_ring = &adapter->tx_rings[queue]; struct iavf_q_vector *q_vector; + u16 itr_setting; + + itr_setting = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC; + + if (ec->rx_coalesce_usecs != itr_setting && + ec->use_adaptive_rx_coalesce) { + netif_info(adapter, drv, adapter->netdev, + "Rx interrupt throttling cannot be changed if adaptive-rx is enabled\n"); + return -EINVAL; + } + + itr_setting = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC; + + if (ec->tx_coalesce_usecs != itr_setting && + ec->use_adaptive_tx_coalesce) { + netif_info(adapter, drv, adapter->netdev, + "Tx interrupt throttling cannot be changed if adaptive-tx is enabled\n"); + return -EINVAL; + } rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs); tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs); @@ -737,6 +806,7 @@ static void iavf_set_itr_per_queue(struct iavf_adapter *adapter, * the Tx and Rx ITR values based on the values we have entered * into the q_vector, no need to write the values now. */ + return 0; } /** @@ -751,12 +821,8 @@ static int __iavf_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, int queue) { struct iavf_adapter *adapter = netdev_priv(netdev); - struct iavf_vsi *vsi = &adapter->vsi; int i; - if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) - vsi->work_limit = ec->tx_max_coalesced_frames_irq; - if (ec->rx_coalesce_usecs == 0) { if (ec->use_adaptive_rx_coalesce) netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n"); @@ -778,9 +844,11 @@ static int __iavf_set_coalesce(struct net_device *netdev, */ if (queue < 0) { for (i = 0; i < adapter->num_active_queues; i++) - iavf_set_itr_per_queue(adapter, ec, i); + if (iavf_set_itr_per_queue(adapter, ec, i)) + return -EINVAL; } else if (queue < adapter->num_active_queues) { - iavf_set_itr_per_queue(adapter, ec, queue); + if (iavf_set_itr_per_queue(adapter, ec, queue)) + return -EINVAL; } else { netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n", adapter->num_active_queues - 1); @@ -794,11 +862,15 @@ static int __iavf_set_coalesce(struct net_device *netdev, * iavf_set_coalesce - Set interrupt coalescing settings * @netdev: network interface device structure * @ec: ethtool coalesce structure + * @kernel_coal: ethtool CQE mode setting structure + * @extack: extack for reporting error messages * * Change current coalescing settings for every queue. **/ static int iavf_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { return __iavf_set_coalesce(netdev, ec, -1); } @@ -818,6 +890,870 @@ static int iavf_set_per_queue_coalesce(struct net_device *netdev, u32 queue, } /** + * iavf_fltr_to_ethtool_flow - convert filter type values to ethtool + * flow type values + * @flow: filter type to be converted + * + * Returns the corresponding ethtool flow type. + */ +static int iavf_fltr_to_ethtool_flow(enum iavf_fdir_flow_type flow) +{ + switch (flow) { + case IAVF_FDIR_FLOW_IPV4_TCP: + return TCP_V4_FLOW; + case IAVF_FDIR_FLOW_IPV4_UDP: + return UDP_V4_FLOW; + case IAVF_FDIR_FLOW_IPV4_SCTP: + return SCTP_V4_FLOW; + case IAVF_FDIR_FLOW_IPV4_AH: + return AH_V4_FLOW; + case IAVF_FDIR_FLOW_IPV4_ESP: + return ESP_V4_FLOW; + case IAVF_FDIR_FLOW_IPV4_OTHER: + return IPV4_USER_FLOW; + case IAVF_FDIR_FLOW_IPV6_TCP: + return TCP_V6_FLOW; + case IAVF_FDIR_FLOW_IPV6_UDP: + return UDP_V6_FLOW; + case IAVF_FDIR_FLOW_IPV6_SCTP: + return SCTP_V6_FLOW; + case IAVF_FDIR_FLOW_IPV6_AH: + return AH_V6_FLOW; + case IAVF_FDIR_FLOW_IPV6_ESP: + return ESP_V6_FLOW; + case IAVF_FDIR_FLOW_IPV6_OTHER: + return IPV6_USER_FLOW; + case IAVF_FDIR_FLOW_NON_IP_L2: + return ETHER_FLOW; + default: + /* 0 is undefined ethtool flow */ + return 0; + } +} + +/** + * iavf_ethtool_flow_to_fltr - convert ethtool flow type to filter enum + * @eth: Ethtool flow type to be converted + * + * Returns flow enum + */ +static enum iavf_fdir_flow_type iavf_ethtool_flow_to_fltr(int eth) +{ + switch (eth) { + case TCP_V4_FLOW: + return IAVF_FDIR_FLOW_IPV4_TCP; + case UDP_V4_FLOW: + return IAVF_FDIR_FLOW_IPV4_UDP; + case SCTP_V4_FLOW: + return IAVF_FDIR_FLOW_IPV4_SCTP; + case AH_V4_FLOW: + return IAVF_FDIR_FLOW_IPV4_AH; + case ESP_V4_FLOW: + return IAVF_FDIR_FLOW_IPV4_ESP; + case IPV4_USER_FLOW: + return IAVF_FDIR_FLOW_IPV4_OTHER; + case TCP_V6_FLOW: + return IAVF_FDIR_FLOW_IPV6_TCP; + case UDP_V6_FLOW: + return IAVF_FDIR_FLOW_IPV6_UDP; + case SCTP_V6_FLOW: + return IAVF_FDIR_FLOW_IPV6_SCTP; + case AH_V6_FLOW: + return IAVF_FDIR_FLOW_IPV6_AH; + case ESP_V6_FLOW: + return IAVF_FDIR_FLOW_IPV6_ESP; + case IPV6_USER_FLOW: + return IAVF_FDIR_FLOW_IPV6_OTHER; + case ETHER_FLOW: + return IAVF_FDIR_FLOW_NON_IP_L2; + default: + return IAVF_FDIR_FLOW_NONE; + } +} + +/** + * iavf_is_mask_valid - check mask field set + * @mask: full mask to check + * @field: field for which mask should be valid + * + * If the mask is fully set return true. If it is not valid for field return + * false. + */ +static bool iavf_is_mask_valid(u64 mask, u64 field) +{ + return (mask & field) == field; +} + +/** + * iavf_parse_rx_flow_user_data - deconstruct user-defined data + * @fsp: pointer to ethtool Rx flow specification + * @fltr: pointer to Flow Director filter for userdef data storage + * + * Returns 0 on success, negative error value on failure + */ +static int +iavf_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp, + struct iavf_fdir_fltr *fltr) +{ + struct iavf_flex_word *flex; + int i, cnt = 0; + + if (!(fsp->flow_type & FLOW_EXT)) + return 0; + + for (i = 0; i < IAVF_FLEX_WORD_NUM; i++) { +#define IAVF_USERDEF_FLEX_WORD_M GENMASK(15, 0) +#define IAVF_USERDEF_FLEX_OFFS_S 16 +#define IAVF_USERDEF_FLEX_OFFS_M GENMASK(31, IAVF_USERDEF_FLEX_OFFS_S) +#define IAVF_USERDEF_FLEX_FLTR_M GENMASK(31, 0) + u32 value = be32_to_cpu(fsp->h_ext.data[i]); + u32 mask = be32_to_cpu(fsp->m_ext.data[i]); + + if (!value || !mask) + continue; + + if (!iavf_is_mask_valid(mask, IAVF_USERDEF_FLEX_FLTR_M)) + return -EINVAL; + + /* 504 is the maximum value for offsets, and offset is measured + * from the start of the MAC address. + */ +#define IAVF_USERDEF_FLEX_MAX_OFFS_VAL 504 + flex = &fltr->flex_words[cnt++]; + flex->word = value & IAVF_USERDEF_FLEX_WORD_M; + flex->offset = (value & IAVF_USERDEF_FLEX_OFFS_M) >> + IAVF_USERDEF_FLEX_OFFS_S; + if (flex->offset > IAVF_USERDEF_FLEX_MAX_OFFS_VAL) + return -EINVAL; + } + + fltr->flex_cnt = cnt; + + return 0; +} + +/** + * iavf_fill_rx_flow_ext_data - fill the additional data + * @fsp: pointer to ethtool Rx flow specification + * @fltr: pointer to Flow Director filter to get additional data + */ +static void +iavf_fill_rx_flow_ext_data(struct ethtool_rx_flow_spec *fsp, + struct iavf_fdir_fltr *fltr) +{ + if (!fltr->ext_mask.usr_def[0] && !fltr->ext_mask.usr_def[1]) + return; + + fsp->flow_type |= FLOW_EXT; + + memcpy(fsp->h_ext.data, fltr->ext_data.usr_def, sizeof(fsp->h_ext.data)); + memcpy(fsp->m_ext.data, fltr->ext_mask.usr_def, sizeof(fsp->m_ext.data)); +} + +/** + * iavf_get_ethtool_fdir_entry - fill ethtool structure with Flow Director filter data + * @adapter: the VF adapter structure that contains filter list + * @cmd: ethtool command data structure to receive the filter data + * + * Returns 0 as expected for success by ethtool + */ +static int +iavf_get_ethtool_fdir_entry(struct iavf_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; + struct iavf_fdir_fltr *rule = NULL; + int ret = 0; + + if (!FDIR_FLTR_SUPPORT(adapter)) + return -EOPNOTSUPP; + + spin_lock_bh(&adapter->fdir_fltr_lock); + + rule = iavf_find_fdir_fltr_by_loc(adapter, fsp->location); + if (!rule) { + ret = -EINVAL; + goto release_lock; + } + + fsp->flow_type = iavf_fltr_to_ethtool_flow(rule->flow_type); + + memset(&fsp->m_u, 0, sizeof(fsp->m_u)); + memset(&fsp->m_ext, 0, sizeof(fsp->m_ext)); + + switch (fsp->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + fsp->h_u.tcp_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip; + fsp->h_u.tcp_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip; + fsp->h_u.tcp_ip4_spec.psrc = rule->ip_data.src_port; + fsp->h_u.tcp_ip4_spec.pdst = rule->ip_data.dst_port; + fsp->h_u.tcp_ip4_spec.tos = rule->ip_data.tos; + fsp->m_u.tcp_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip; + fsp->m_u.tcp_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip; + fsp->m_u.tcp_ip4_spec.psrc = rule->ip_mask.src_port; + fsp->m_u.tcp_ip4_spec.pdst = rule->ip_mask.dst_port; + fsp->m_u.tcp_ip4_spec.tos = rule->ip_mask.tos; + break; + case AH_V4_FLOW: + case ESP_V4_FLOW: + fsp->h_u.ah_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip; + fsp->h_u.ah_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip; + fsp->h_u.ah_ip4_spec.spi = rule->ip_data.spi; + fsp->h_u.ah_ip4_spec.tos = rule->ip_data.tos; + fsp->m_u.ah_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip; + fsp->m_u.ah_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip; + fsp->m_u.ah_ip4_spec.spi = rule->ip_mask.spi; + fsp->m_u.ah_ip4_spec.tos = rule->ip_mask.tos; + break; + case IPV4_USER_FLOW: + fsp->h_u.usr_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip; + fsp->h_u.usr_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip; + fsp->h_u.usr_ip4_spec.l4_4_bytes = rule->ip_data.l4_header; + fsp->h_u.usr_ip4_spec.tos = rule->ip_data.tos; + fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; + fsp->h_u.usr_ip4_spec.proto = rule->ip_data.proto; + fsp->m_u.usr_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip; + fsp->m_u.usr_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip; + fsp->m_u.usr_ip4_spec.l4_4_bytes = rule->ip_mask.l4_header; + fsp->m_u.usr_ip4_spec.tos = rule->ip_mask.tos; + fsp->m_u.usr_ip4_spec.ip_ver = 0xFF; + fsp->m_u.usr_ip4_spec.proto = rule->ip_mask.proto; + break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + memcpy(fsp->h_u.usr_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->h_u.usr_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip, + sizeof(struct in6_addr)); + fsp->h_u.tcp_ip6_spec.psrc = rule->ip_data.src_port; + fsp->h_u.tcp_ip6_spec.pdst = rule->ip_data.dst_port; + fsp->h_u.tcp_ip6_spec.tclass = rule->ip_data.tclass; + memcpy(fsp->m_u.usr_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->m_u.usr_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip, + sizeof(struct in6_addr)); + fsp->m_u.tcp_ip6_spec.psrc = rule->ip_mask.src_port; + fsp->m_u.tcp_ip6_spec.pdst = rule->ip_mask.dst_port; + fsp->m_u.tcp_ip6_spec.tclass = rule->ip_mask.tclass; + break; + case AH_V6_FLOW: + case ESP_V6_FLOW: + memcpy(fsp->h_u.ah_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->h_u.ah_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip, + sizeof(struct in6_addr)); + fsp->h_u.ah_ip6_spec.spi = rule->ip_data.spi; + fsp->h_u.ah_ip6_spec.tclass = rule->ip_data.tclass; + memcpy(fsp->m_u.ah_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->m_u.ah_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip, + sizeof(struct in6_addr)); + fsp->m_u.ah_ip6_spec.spi = rule->ip_mask.spi; + fsp->m_u.ah_ip6_spec.tclass = rule->ip_mask.tclass; + break; + case IPV6_USER_FLOW: + memcpy(fsp->h_u.usr_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->h_u.usr_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip, + sizeof(struct in6_addr)); + fsp->h_u.usr_ip6_spec.l4_4_bytes = rule->ip_data.l4_header; + fsp->h_u.usr_ip6_spec.tclass = rule->ip_data.tclass; + fsp->h_u.usr_ip6_spec.l4_proto = rule->ip_data.proto; + memcpy(fsp->m_u.usr_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->m_u.usr_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip, + sizeof(struct in6_addr)); + fsp->m_u.usr_ip6_spec.l4_4_bytes = rule->ip_mask.l4_header; + fsp->m_u.usr_ip6_spec.tclass = rule->ip_mask.tclass; + fsp->m_u.usr_ip6_spec.l4_proto = rule->ip_mask.proto; + break; + case ETHER_FLOW: + fsp->h_u.ether_spec.h_proto = rule->eth_data.etype; + fsp->m_u.ether_spec.h_proto = rule->eth_mask.etype; + break; + default: + ret = -EINVAL; + break; + } + + iavf_fill_rx_flow_ext_data(fsp, rule); + + if (rule->action == VIRTCHNL_ACTION_DROP) + fsp->ring_cookie = RX_CLS_FLOW_DISC; + else + fsp->ring_cookie = rule->q_index; + +release_lock: + spin_unlock_bh(&adapter->fdir_fltr_lock); + return ret; +} + +/** + * iavf_get_fdir_fltr_ids - fill buffer with filter IDs of active filters + * @adapter: the VF adapter structure containing the filter list + * @cmd: ethtool command data structure + * @rule_locs: ethtool array passed in from OS to receive filter IDs + * + * Returns 0 as expected for success by ethtool + */ +static int +iavf_get_fdir_fltr_ids(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct iavf_fdir_fltr *fltr; + unsigned int cnt = 0; + int val = 0; + + if (!FDIR_FLTR_SUPPORT(adapter)) + return -EOPNOTSUPP; + + cmd->data = IAVF_MAX_FDIR_FILTERS; + + spin_lock_bh(&adapter->fdir_fltr_lock); + + list_for_each_entry(fltr, &adapter->fdir_list_head, list) { + if (cnt == cmd->rule_cnt) { + val = -EMSGSIZE; + goto release_lock; + } + rule_locs[cnt] = fltr->loc; + cnt++; + } + +release_lock: + spin_unlock_bh(&adapter->fdir_fltr_lock); + if (!val) + cmd->rule_cnt = cnt; + + return val; +} + +/** + * iavf_add_fdir_fltr_info - Set the input set for Flow Director filter + * @adapter: pointer to the VF adapter structure + * @fsp: pointer to ethtool Rx flow specification + * @fltr: filter structure + */ +static int +iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spec *fsp, + struct iavf_fdir_fltr *fltr) +{ + u32 flow_type, q_index = 0; + enum virtchnl_action act; + int err; + + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { + act = VIRTCHNL_ACTION_DROP; + } else { + q_index = fsp->ring_cookie; + if (q_index >= adapter->num_active_queues) + return -EINVAL; + + act = VIRTCHNL_ACTION_QUEUE; + } + + fltr->action = act; + fltr->loc = fsp->location; + fltr->q_index = q_index; + + if (fsp->flow_type & FLOW_EXT) { + memcpy(fltr->ext_data.usr_def, fsp->h_ext.data, + sizeof(fltr->ext_data.usr_def)); + memcpy(fltr->ext_mask.usr_def, fsp->m_ext.data, + sizeof(fltr->ext_mask.usr_def)); + } + + flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS); + fltr->flow_type = iavf_ethtool_flow_to_fltr(flow_type); + + switch (flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + fltr->ip_data.v4_addrs.src_ip = fsp->h_u.tcp_ip4_spec.ip4src; + fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.tcp_ip4_spec.ip4dst; + fltr->ip_data.src_port = fsp->h_u.tcp_ip4_spec.psrc; + fltr->ip_data.dst_port = fsp->h_u.tcp_ip4_spec.pdst; + fltr->ip_data.tos = fsp->h_u.tcp_ip4_spec.tos; + fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.tcp_ip4_spec.ip4src; + fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.tcp_ip4_spec.ip4dst; + fltr->ip_mask.src_port = fsp->m_u.tcp_ip4_spec.psrc; + fltr->ip_mask.dst_port = fsp->m_u.tcp_ip4_spec.pdst; + fltr->ip_mask.tos = fsp->m_u.tcp_ip4_spec.tos; + break; + case AH_V4_FLOW: + case ESP_V4_FLOW: + fltr->ip_data.v4_addrs.src_ip = fsp->h_u.ah_ip4_spec.ip4src; + fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.ah_ip4_spec.ip4dst; + fltr->ip_data.spi = fsp->h_u.ah_ip4_spec.spi; + fltr->ip_data.tos = fsp->h_u.ah_ip4_spec.tos; + fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.ah_ip4_spec.ip4src; + fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.ah_ip4_spec.ip4dst; + fltr->ip_mask.spi = fsp->m_u.ah_ip4_spec.spi; + fltr->ip_mask.tos = fsp->m_u.ah_ip4_spec.tos; + break; + case IPV4_USER_FLOW: + fltr->ip_data.v4_addrs.src_ip = fsp->h_u.usr_ip4_spec.ip4src; + fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.usr_ip4_spec.ip4dst; + fltr->ip_data.l4_header = fsp->h_u.usr_ip4_spec.l4_4_bytes; + fltr->ip_data.tos = fsp->h_u.usr_ip4_spec.tos; + fltr->ip_data.proto = fsp->h_u.usr_ip4_spec.proto; + fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.usr_ip4_spec.ip4src; + fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.usr_ip4_spec.ip4dst; + fltr->ip_mask.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes; + fltr->ip_mask.tos = fsp->m_u.usr_ip4_spec.tos; + fltr->ip_mask.proto = fsp->m_u.usr_ip4_spec.proto; + break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + fltr->ip_data.src_port = fsp->h_u.tcp_ip6_spec.psrc; + fltr->ip_data.dst_port = fsp->h_u.tcp_ip6_spec.pdst; + fltr->ip_data.tclass = fsp->h_u.tcp_ip6_spec.tclass; + memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.usr_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + fltr->ip_mask.src_port = fsp->m_u.tcp_ip6_spec.psrc; + fltr->ip_mask.dst_port = fsp->m_u.tcp_ip6_spec.pdst; + fltr->ip_mask.tclass = fsp->m_u.tcp_ip6_spec.tclass; + break; + case AH_V6_FLOW: + case ESP_V6_FLOW: + memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.ah_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.ah_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + fltr->ip_data.spi = fsp->h_u.ah_ip6_spec.spi; + fltr->ip_data.tclass = fsp->h_u.ah_ip6_spec.tclass; + memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.ah_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.ah_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + fltr->ip_mask.spi = fsp->m_u.ah_ip6_spec.spi; + fltr->ip_mask.tclass = fsp->m_u.ah_ip6_spec.tclass; + break; + case IPV6_USER_FLOW: + memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + fltr->ip_data.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes; + fltr->ip_data.tclass = fsp->h_u.usr_ip6_spec.tclass; + fltr->ip_data.proto = fsp->h_u.usr_ip6_spec.l4_proto; + memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.usr_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + fltr->ip_mask.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes; + fltr->ip_mask.tclass = fsp->m_u.usr_ip6_spec.tclass; + fltr->ip_mask.proto = fsp->m_u.usr_ip6_spec.l4_proto; + break; + case ETHER_FLOW: + fltr->eth_data.etype = fsp->h_u.ether_spec.h_proto; + fltr->eth_mask.etype = fsp->m_u.ether_spec.h_proto; + break; + default: + /* not doing un-parsed flow types */ + return -EINVAL; + } + + if (iavf_fdir_is_dup_fltr(adapter, fltr)) + return -EEXIST; + + err = iavf_parse_rx_flow_user_data(fsp, fltr); + if (err) + return err; + + return iavf_fill_fdir_add_msg(adapter, fltr); +} + +/** + * iavf_add_fdir_ethtool - add Flow Director filter + * @adapter: pointer to the VF adapter structure + * @cmd: command to add Flow Director filter + * + * Returns 0 on success and negative values for failure + */ +static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = &cmd->fs; + struct iavf_fdir_fltr *fltr; + int count = 50; + int err; + + if (!FDIR_FLTR_SUPPORT(adapter)) + return -EOPNOTSUPP; + + if (fsp->flow_type & FLOW_MAC_EXT) + return -EINVAL; + + if (adapter->fdir_active_fltr >= IAVF_MAX_FDIR_FILTERS) { + dev_err(&adapter->pdev->dev, + "Unable to add Flow Director filter because VF reached the limit of max allowed filters (%u)\n", + IAVF_MAX_FDIR_FILTERS); + return -ENOSPC; + } + + spin_lock_bh(&adapter->fdir_fltr_lock); + if (iavf_find_fdir_fltr_by_loc(adapter, fsp->location)) { + dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, it already exists\n"); + spin_unlock_bh(&adapter->fdir_fltr_lock); + return -EEXIST; + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + + fltr = kzalloc(sizeof(*fltr), GFP_KERNEL); + if (!fltr) + return -ENOMEM; + + while (!mutex_trylock(&adapter->crit_lock)) { + if (--count == 0) { + kfree(fltr); + return -EINVAL; + } + udelay(1); + } + + err = iavf_add_fdir_fltr_info(adapter, fsp, fltr); + if (err) + goto ret; + + spin_lock_bh(&adapter->fdir_fltr_lock); + iavf_fdir_list_add_fltr(adapter, fltr); + adapter->fdir_active_fltr++; + fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST; + adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER; + spin_unlock_bh(&adapter->fdir_fltr_lock); + + mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); + +ret: + if (err && fltr) + kfree(fltr); + + mutex_unlock(&adapter->crit_lock); + return err; +} + +/** + * iavf_del_fdir_ethtool - delete Flow Director filter + * @adapter: pointer to the VF adapter structure + * @cmd: command to delete Flow Director filter + * + * Returns 0 on success and negative values for failure + */ +static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; + struct iavf_fdir_fltr *fltr = NULL; + int err = 0; + + if (!FDIR_FLTR_SUPPORT(adapter)) + return -EOPNOTSUPP; + + spin_lock_bh(&adapter->fdir_fltr_lock); + fltr = iavf_find_fdir_fltr_by_loc(adapter, fsp->location); + if (fltr) { + if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) { + fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST; + adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER; + } else { + err = -EBUSY; + } + } else if (adapter->fdir_active_fltr) { + err = -EINVAL; + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + + if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST) + mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); + + return err; +} + +/** + * iavf_adv_rss_parse_hdrs - parses headers from RSS hash input + * @cmd: ethtool rxnfc command + * + * This function parses the rxnfc command and returns intended + * header types for RSS configuration + */ +static u32 iavf_adv_rss_parse_hdrs(struct ethtool_rxnfc *cmd) +{ + u32 hdrs = IAVF_ADV_RSS_FLOW_SEG_HDR_NONE; + + switch (cmd->flow_type) { + case TCP_V4_FLOW: + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_TCP | + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4; + break; + case UDP_V4_FLOW: + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_UDP | + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4; + break; + case SCTP_V4_FLOW: + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP | + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4; + break; + case TCP_V6_FLOW: + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_TCP | + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; + break; + case UDP_V6_FLOW: + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_UDP | + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; + break; + case SCTP_V6_FLOW: + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP | + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; + break; + default: + break; + } + + return hdrs; +} + +/** + * iavf_adv_rss_parse_hash_flds - parses hash fields from RSS hash input + * @cmd: ethtool rxnfc command + * + * This function parses the rxnfc command and returns intended hash fields for + * RSS configuration + */ +static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd) +{ + u64 hfld = IAVF_ADV_RSS_HASH_INVALID; + + if (cmd->data & RXH_IP_SRC || cmd->data & RXH_IP_DST) { + switch (cmd->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + if (cmd->data & RXH_IP_SRC) + hfld |= IAVF_ADV_RSS_HASH_FLD_IPV4_SA; + if (cmd->data & RXH_IP_DST) + hfld |= IAVF_ADV_RSS_HASH_FLD_IPV4_DA; + break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + if (cmd->data & RXH_IP_SRC) + hfld |= IAVF_ADV_RSS_HASH_FLD_IPV6_SA; + if (cmd->data & RXH_IP_DST) + hfld |= IAVF_ADV_RSS_HASH_FLD_IPV6_DA; + break; + default: + break; + } + } + + if (cmd->data & RXH_L4_B_0_1 || cmd->data & RXH_L4_B_2_3) { + switch (cmd->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (cmd->data & RXH_L4_B_0_1) + hfld |= IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT; + if (cmd->data & RXH_L4_B_2_3) + hfld |= IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT; + break; + case UDP_V4_FLOW: + case UDP_V6_FLOW: + if (cmd->data & RXH_L4_B_0_1) + hfld |= IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT; + if (cmd->data & RXH_L4_B_2_3) + hfld |= IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT; + break; + case SCTP_V4_FLOW: + case SCTP_V6_FLOW: + if (cmd->data & RXH_L4_B_0_1) + hfld |= IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT; + if (cmd->data & RXH_L4_B_2_3) + hfld |= IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT; + break; + default: + break; + } + } + + return hfld; +} + +/** + * iavf_set_adv_rss_hash_opt - Enable/Disable flow types for RSS hash + * @adapter: pointer to the VF adapter structure + * @cmd: ethtool rxnfc command + * + * Returns Success if the flow input set is supported. + */ +static int +iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct iavf_adv_rss *rss_old, *rss_new; + bool rss_new_add = false; + int count = 50, err = 0; + u64 hash_flds; + u32 hdrs; + + if (!ADV_RSS_SUPPORT(adapter)) + return -EOPNOTSUPP; + + hdrs = iavf_adv_rss_parse_hdrs(cmd); + if (hdrs == IAVF_ADV_RSS_FLOW_SEG_HDR_NONE) + return -EINVAL; + + hash_flds = iavf_adv_rss_parse_hash_flds(cmd); + if (hash_flds == IAVF_ADV_RSS_HASH_INVALID) + return -EINVAL; + + rss_new = kzalloc(sizeof(*rss_new), GFP_KERNEL); + if (!rss_new) + return -ENOMEM; + + if (iavf_fill_adv_rss_cfg_msg(&rss_new->cfg_msg, hdrs, hash_flds)) { + kfree(rss_new); + return -EINVAL; + } + + while (!mutex_trylock(&adapter->crit_lock)) { + if (--count == 0) { + kfree(rss_new); + return -EINVAL; + } + + udelay(1); + } + + spin_lock_bh(&adapter->adv_rss_lock); + rss_old = iavf_find_adv_rss_cfg_by_hdrs(adapter, hdrs); + if (rss_old) { + if (rss_old->state != IAVF_ADV_RSS_ACTIVE) { + err = -EBUSY; + } else if (rss_old->hash_flds != hash_flds) { + rss_old->state = IAVF_ADV_RSS_ADD_REQUEST; + rss_old->hash_flds = hash_flds; + memcpy(&rss_old->cfg_msg, &rss_new->cfg_msg, + sizeof(rss_new->cfg_msg)); + adapter->aq_required |= IAVF_FLAG_AQ_ADD_ADV_RSS_CFG; + } else { + err = -EEXIST; + } + } else { + rss_new_add = true; + rss_new->state = IAVF_ADV_RSS_ADD_REQUEST; + rss_new->packet_hdrs = hdrs; + rss_new->hash_flds = hash_flds; + list_add_tail(&rss_new->list, &adapter->adv_rss_list_head); + adapter->aq_required |= IAVF_FLAG_AQ_ADD_ADV_RSS_CFG; + } + spin_unlock_bh(&adapter->adv_rss_lock); + + if (!err) + mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); + + mutex_unlock(&adapter->crit_lock); + + if (!rss_new_add) + kfree(rss_new); + + return err; +} + +/** + * iavf_get_adv_rss_hash_opt - Retrieve hash fields for a given flow-type + * @adapter: pointer to the VF adapter structure + * @cmd: ethtool rxnfc command + * + * Returns Success if the flow input set is supported. + */ +static int +iavf_get_adv_rss_hash_opt(struct iavf_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct iavf_adv_rss *rss; + u64 hash_flds; + u32 hdrs; + + if (!ADV_RSS_SUPPORT(adapter)) + return -EOPNOTSUPP; + + cmd->data = 0; + + hdrs = iavf_adv_rss_parse_hdrs(cmd); + if (hdrs == IAVF_ADV_RSS_FLOW_SEG_HDR_NONE) + return -EINVAL; + + spin_lock_bh(&adapter->adv_rss_lock); + rss = iavf_find_adv_rss_cfg_by_hdrs(adapter, hdrs); + if (rss) + hash_flds = rss->hash_flds; + else + hash_flds = IAVF_ADV_RSS_HASH_INVALID; + spin_unlock_bh(&adapter->adv_rss_lock); + + if (hash_flds == IAVF_ADV_RSS_HASH_INVALID) + return -EINVAL; + + if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_SA | + IAVF_ADV_RSS_HASH_FLD_IPV6_SA)) + cmd->data |= (u64)RXH_IP_SRC; + + if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_DA | + IAVF_ADV_RSS_HASH_FLD_IPV6_DA)) + cmd->data |= (u64)RXH_IP_DST; + + if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT | + IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT | + IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT)) + cmd->data |= (u64)RXH_L4_B_0_1; + + if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT | + IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT | + IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT)) + cmd->data |= (u64)RXH_L4_B_2_3; + + return 0; +} + +/** + * iavf_set_rxnfc - command to set Rx flow rules. + * @netdev: network interface device structure + * @cmd: ethtool rxnfc command + * + * Returns 0 for success and negative values for errors + */ +static int iavf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + ret = iavf_add_fdir_ethtool(adapter, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = iavf_del_fdir_ethtool(adapter, cmd); + break; + case ETHTOOL_SRXFH: + ret = iavf_set_adv_rss_hash_opt(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +/** * iavf_get_rxnfc - command to get RX flow classification rules * @netdev: network interface device structure * @cmd: ethtool rxnfc command @@ -836,9 +1772,21 @@ static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, cmd->data = adapter->num_active_queues; ret = 0; break; + case ETHTOOL_GRXCLSRLCNT: + if (!FDIR_FLTR_SUPPORT(adapter)) + break; + cmd->rule_cnt = adapter->fdir_active_fltr; + cmd->data = IAVF_MAX_FDIR_FILTERS; + ret = 0; + break; + case ETHTOOL_GRXCLSRULE: + ret = iavf_get_ethtool_fdir_entry(adapter, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + ret = iavf_get_fdir_fltr_ids(adapter, cmd, (u32 *)rule_locs); + break; case ETHTOOL_GRXFH: - netdev_info(netdev, - "RSS hash info is not available to vf, use pf.\n"); + ret = iavf_get_adv_rss_hash_opt(adapter, cmd); break; default: break; @@ -860,7 +1808,7 @@ static void iavf_get_channels(struct net_device *netdev, struct iavf_adapter *adapter = netdev_priv(netdev); /* Report maximum channels */ - ch->max_combined = IAVF_MAX_REQ_QUEUES; + ch->max_combined = adapter->vsi_res->num_queue_pairs; ch->max_other = NONQ_VECS; ch->other_count = NONQ_VECS; @@ -881,14 +1829,8 @@ static int iavf_set_channels(struct net_device *netdev, struct ethtool_channels *ch) { struct iavf_adapter *adapter = netdev_priv(netdev); - int num_req = ch->combined_count; - - if (num_req != adapter->num_active_queues && - !(adapter->vf_res->vf_cap_flags & - VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)) { - dev_info(&adapter->pdev->dev, "PF is not capable of queue negotiation.\n"); - return -EINVAL; - } + u32 num_req = ch->combined_count; + int i; if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && adapter->num_tc) { @@ -899,14 +1841,33 @@ static int iavf_set_channels(struct net_device *netdev, /* All of these should have already been checked by ethtool before this * even gets to us, but just to be sure. */ - if (num_req <= 0 || num_req > IAVF_MAX_REQ_QUEUES) + if (num_req == 0 || num_req > adapter->vsi_res->num_queue_pairs) return -EINVAL; + if (num_req == adapter->num_active_queues) + return 0; + if (ch->rx_count || ch->tx_count || ch->other_count != NONQ_VECS) return -EINVAL; adapter->num_req_queues = num_req; - return iavf_request_queues(adapter, num_req); + adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; + iavf_schedule_reset(adapter); + + /* wait for the reset is done */ + for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) { + msleep(IAVF_RESET_WAIT_MS); + if (adapter->flags & IAVF_FLAG_RESET_PENDING) + continue; + break; + } + if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) { + adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; + adapter->num_active_queues = num_req; + return -EOPNOTSUPP; + } + + return 0; } /** @@ -952,14 +1913,13 @@ static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, if (hfunc) *hfunc = ETH_RSS_HASH_TOP; - if (!indir) - return 0; - - memcpy(key, adapter->rss_key, adapter->rss_key_size); + if (key) + memcpy(key, adapter->rss_key, adapter->rss_key_size); - /* Each 32 bits pointed by 'indir' is stored with a lut entry */ - for (i = 0; i < adapter->rss_lut_size; i++) - indir[i] = (u32)adapter->rss_lut[i]; + if (indir) + /* Each 32 bits pointed by 'indir' is stored with a lut entry */ + for (i = 0; i < adapter->rss_lut_size; i++) + indir[i] = (u32)adapter->rss_lut[i]; return 0; } @@ -971,7 +1931,7 @@ static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, * @key: hash key * @hfunc: hash function to use * - * Returns -EINVAL if the table specifies an inavlid queue id, otherwise + * Returns -EINVAL if the table specifies an invalid queue id, otherwise * returns 0 after programming the table. **/ static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir, @@ -980,24 +1940,28 @@ static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir, struct iavf_adapter *adapter = netdev_priv(netdev); u16 i; - /* We do not allow change in unsupported parameters */ - if (key || - (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + /* Only support toeplitz hash function */ + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; - if (!indir) + + if (!key && !indir) return 0; if (key) memcpy(adapter->rss_key, key, adapter->rss_key_size); - /* Each 32 bits pointed by 'indir' is stored with a lut entry */ - for (i = 0; i < adapter->rss_lut_size; i++) - adapter->rss_lut[i] = (u8)(indir[i]); + if (indir) { + /* Each 32 bits pointed by 'indir' is stored with a lut entry */ + for (i = 0; i < adapter->rss_lut_size; i++) + adapter->rss_lut[i] = (u8)(indir[i]); + } return iavf_config_rss(adapter); } static const struct ethtool_ops iavf_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_USE_ADAPTIVE, .get_drvinfo = iavf_get_drvinfo, .get_link = ethtool_op_get_link, .get_ringparam = iavf_get_ringparam, @@ -1013,6 +1977,7 @@ static const struct ethtool_ops iavf_ethtool_ops = { .set_coalesce = iavf_set_coalesce, .get_per_queue_coalesce = iavf_get_per_queue_coalesce, .set_per_queue_coalesce = iavf_set_per_queue_coalesce, + .set_rxnfc = iavf_set_rxnfc, .get_rxnfc = iavf_get_rxnfc, .get_rxfh_indir_size = iavf_get_rxfh_indir_size, .get_rxfh = iavf_get_rxfh, diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.c b/drivers/net/ethernet/intel/iavf/iavf_fdir.c new file mode 100644 index 000000000000..6146203efd84 --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.c @@ -0,0 +1,779 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020, Intel Corporation. */ + +/* flow director ethtool support for iavf */ + +#include "iavf.h" + +#define GTPU_PORT 2152 +#define NAT_T_ESP_PORT 4500 +#define PFCP_PORT 8805 + +static const struct in6_addr ipv6_addr_full_mask = { + .in6_u = { + .u6_addr8 = { + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + } + } +}; + +/** + * iavf_pkt_udp_no_pay_len - the length of UDP packet without payload + * @fltr: Flow Director filter data structure + */ +static u16 iavf_pkt_udp_no_pay_len(struct iavf_fdir_fltr *fltr) +{ + return sizeof(struct ethhdr) + + (fltr->ip_ver == 4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) + + sizeof(struct udphdr); +} + +/** + * iavf_fill_fdir_gtpu_hdr - fill the GTP-U protocol header + * @fltr: Flow Director filter data structure + * @proto_hdrs: Flow Director protocol headers data structure + * + * Returns 0 if the GTP-U protocol header is set successfully + */ +static int +iavf_fill_fdir_gtpu_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *uhdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1]; + struct virtchnl_proto_hdr *ghdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + struct virtchnl_proto_hdr *ehdr = NULL; /* Extension Header if it exists */ + u16 adj_offs, hdr_offs; + int i; + + VIRTCHNL_SET_PROTO_HDR_TYPE(ghdr, GTPU_IP); + + adj_offs = iavf_pkt_udp_no_pay_len(fltr); + + for (i = 0; i < fltr->flex_cnt; i++) { +#define IAVF_GTPU_HDR_TEID_OFFS0 4 +#define IAVF_GTPU_HDR_TEID_OFFS1 6 +#define IAVF_GTPU_HDR_N_PDU_AND_NEXT_EXTHDR_OFFS 10 +#define IAVF_GTPU_HDR_NEXT_EXTHDR_TYPE_MASK 0x00FF /* skip N_PDU */ +/* PDU Session Container Extension Header (PSC) */ +#define IAVF_GTPU_PSC_EXTHDR_TYPE 0x85 +#define IAVF_GTPU_HDR_PSC_PDU_TYPE_AND_QFI_OFFS 13 +#define IAVF_GTPU_HDR_PSC_PDU_QFI_MASK 0x3F /* skip Type */ +#define IAVF_GTPU_EH_QFI_IDX 1 + + if (fltr->flex_words[i].offset < adj_offs) + return -EINVAL; + + hdr_offs = fltr->flex_words[i].offset - adj_offs; + + switch (hdr_offs) { + case IAVF_GTPU_HDR_TEID_OFFS0: + case IAVF_GTPU_HDR_TEID_OFFS1: { + __be16 *pay_word = (__be16 *)ghdr->buffer; + + pay_word[hdr_offs >> 1] = htons(fltr->flex_words[i].word); + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(ghdr, GTPU_IP, TEID); + } + break; + case IAVF_GTPU_HDR_N_PDU_AND_NEXT_EXTHDR_OFFS: + if ((fltr->flex_words[i].word & + IAVF_GTPU_HDR_NEXT_EXTHDR_TYPE_MASK) != + IAVF_GTPU_PSC_EXTHDR_TYPE) + return -EOPNOTSUPP; + if (!ehdr) + ehdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + VIRTCHNL_SET_PROTO_HDR_TYPE(ehdr, GTPU_EH); + break; + case IAVF_GTPU_HDR_PSC_PDU_TYPE_AND_QFI_OFFS: + if (!ehdr) + return -EINVAL; + ehdr->buffer[IAVF_GTPU_EH_QFI_IDX] = + fltr->flex_words[i].word & + IAVF_GTPU_HDR_PSC_PDU_QFI_MASK; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(ehdr, GTPU_EH, QFI); + break; + default: + return -EINVAL; + } + } + + uhdr->field_selector = 0; /* The PF ignores the UDP header fields */ + + return 0; +} + +/** + * iavf_fill_fdir_pfcp_hdr - fill the PFCP protocol header + * @fltr: Flow Director filter data structure + * @proto_hdrs: Flow Director protocol headers data structure + * + * Returns 0 if the PFCP protocol header is set successfully + */ +static int +iavf_fill_fdir_pfcp_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *uhdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1]; + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + u16 adj_offs, hdr_offs; + int i; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PFCP); + + adj_offs = iavf_pkt_udp_no_pay_len(fltr); + + for (i = 0; i < fltr->flex_cnt; i++) { +#define IAVF_PFCP_HDR_SFIELD_AND_MSG_TYPE_OFFS 0 + if (fltr->flex_words[i].offset < adj_offs) + return -EINVAL; + + hdr_offs = fltr->flex_words[i].offset - adj_offs; + + switch (hdr_offs) { + case IAVF_PFCP_HDR_SFIELD_AND_MSG_TYPE_OFFS: + hdr->buffer[0] = (fltr->flex_words[i].word >> 8) & 0xff; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, PFCP, S_FIELD); + break; + default: + return -EINVAL; + } + } + + uhdr->field_selector = 0; /* The PF ignores the UDP header fields */ + + return 0; +} + +/** + * iavf_fill_fdir_nat_t_esp_hdr - fill the NAT-T-ESP protocol header + * @fltr: Flow Director filter data structure + * @proto_hdrs: Flow Director protocol headers data structure + * + * Returns 0 if the NAT-T-ESP protocol header is set successfully + */ +static int +iavf_fill_fdir_nat_t_esp_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *uhdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1]; + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + u16 adj_offs, hdr_offs; + u32 spi = 0; + int i; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP); + + adj_offs = iavf_pkt_udp_no_pay_len(fltr); + + for (i = 0; i < fltr->flex_cnt; i++) { +#define IAVF_NAT_T_ESP_SPI_OFFS0 0 +#define IAVF_NAT_T_ESP_SPI_OFFS1 2 + if (fltr->flex_words[i].offset < adj_offs) + return -EINVAL; + + hdr_offs = fltr->flex_words[i].offset - adj_offs; + + switch (hdr_offs) { + case IAVF_NAT_T_ESP_SPI_OFFS0: + spi |= fltr->flex_words[i].word << 16; + break; + case IAVF_NAT_T_ESP_SPI_OFFS1: + spi |= fltr->flex_words[i].word; + break; + default: + return -EINVAL; + } + } + + if (!spi) + return -EOPNOTSUPP; /* Not support IKE Header Format with SPI 0 */ + + *(__be32 *)hdr->buffer = htonl(spi); + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI); + + uhdr->field_selector = 0; /* The PF ignores the UDP header fields */ + + return 0; +} + +/** + * iavf_fill_fdir_udp_flex_pay_hdr - fill the UDP payload header + * @fltr: Flow Director filter data structure + * @proto_hdrs: Flow Director protocol headers data structure + * + * Returns 0 if the UDP payload defined protocol header is set successfully + */ +static int +iavf_fill_fdir_udp_flex_pay_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + int err; + + switch (ntohs(fltr->ip_data.dst_port)) { + case GTPU_PORT: + err = iavf_fill_fdir_gtpu_hdr(fltr, proto_hdrs); + break; + case NAT_T_ESP_PORT: + err = iavf_fill_fdir_nat_t_esp_hdr(fltr, proto_hdrs); + break; + case PFCP_PORT: + err = iavf_fill_fdir_pfcp_hdr(fltr, proto_hdrs); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +/** + * iavf_fill_fdir_ip4_hdr - fill the IPv4 protocol header + * @fltr: Flow Director filter data structure + * @proto_hdrs: Flow Director protocol headers data structure + * + * Returns 0 if the IPv4 protocol header is set successfully + */ +static int +iavf_fill_fdir_ip4_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + struct iphdr *iph = (struct iphdr *)hdr->buffer; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4); + + if (fltr->ip_mask.tos == U8_MAX) { + iph->tos = fltr->ip_data.tos; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DSCP); + } + + if (fltr->ip_mask.proto == U8_MAX) { + iph->protocol = fltr->ip_data.proto; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT); + } + + if (fltr->ip_mask.v4_addrs.src_ip == htonl(U32_MAX)) { + iph->saddr = fltr->ip_data.v4_addrs.src_ip; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC); + } + + if (fltr->ip_mask.v4_addrs.dst_ip == htonl(U32_MAX)) { + iph->daddr = fltr->ip_data.v4_addrs.dst_ip; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST); + } + + fltr->ip_ver = 4; + + return 0; +} + +/** + * iavf_fill_fdir_ip6_hdr - fill the IPv6 protocol header + * @fltr: Flow Director filter data structure + * @proto_hdrs: Flow Director protocol headers data structure + * + * Returns 0 if the IPv6 protocol header is set successfully + */ +static int +iavf_fill_fdir_ip6_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + struct ipv6hdr *iph = (struct ipv6hdr *)hdr->buffer; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6); + + if (fltr->ip_mask.tclass == U8_MAX) { + iph->priority = (fltr->ip_data.tclass >> 4) & 0xF; + iph->flow_lbl[0] = (fltr->ip_data.tclass << 4) & 0xF0; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC); + } + + if (fltr->ip_mask.proto == U8_MAX) { + iph->nexthdr = fltr->ip_data.proto; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT); + } + + if (!memcmp(&fltr->ip_mask.v6_addrs.src_ip, &ipv6_addr_full_mask, + sizeof(struct in6_addr))) { + memcpy(&iph->saddr, &fltr->ip_data.v6_addrs.src_ip, + sizeof(struct in6_addr)); + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC); + } + + if (!memcmp(&fltr->ip_mask.v6_addrs.dst_ip, &ipv6_addr_full_mask, + sizeof(struct in6_addr))) { + memcpy(&iph->daddr, &fltr->ip_data.v6_addrs.dst_ip, + sizeof(struct in6_addr)); + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST); + } + + fltr->ip_ver = 6; + + return 0; +} + +/** + * iavf_fill_fdir_tcp_hdr - fill the TCP protocol header + * @fltr: Flow Director filter data structure + * @proto_hdrs: Flow Director protocol headers data structure + * + * Returns 0 if the TCP protocol header is set successfully + */ +static int +iavf_fill_fdir_tcp_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + struct tcphdr *tcph = (struct tcphdr *)hdr->buffer; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP); + + if (fltr->ip_mask.src_port == htons(U16_MAX)) { + tcph->source = fltr->ip_data.src_port; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT); + } + + if (fltr->ip_mask.dst_port == htons(U16_MAX)) { + tcph->dest = fltr->ip_data.dst_port; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT); + } + + return 0; +} + +/** + * iavf_fill_fdir_udp_hdr - fill the UDP protocol header + * @fltr: Flow Director filter data structure + * @proto_hdrs: Flow Director protocol headers data structure + * + * Returns 0 if the UDP protocol header is set successfully + */ +static int +iavf_fill_fdir_udp_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + struct udphdr *udph = (struct udphdr *)hdr->buffer; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP); + + if (fltr->ip_mask.src_port == htons(U16_MAX)) { + udph->source = fltr->ip_data.src_port; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT); + } + + if (fltr->ip_mask.dst_port == htons(U16_MAX)) { + udph->dest = fltr->ip_data.dst_port; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT); + } + + if (!fltr->flex_cnt) + return 0; + + return iavf_fill_fdir_udp_flex_pay_hdr(fltr, proto_hdrs); +} + +/** + * iavf_fill_fdir_sctp_hdr - fill the SCTP protocol header + * @fltr: Flow Director filter data structure + * @proto_hdrs: Flow Director protocol headers data structure + * + * Returns 0 if the SCTP protocol header is set successfully + */ +static int +iavf_fill_fdir_sctp_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + struct sctphdr *sctph = (struct sctphdr *)hdr->buffer; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP); + + if (fltr->ip_mask.src_port == htons(U16_MAX)) { + sctph->source = fltr->ip_data.src_port; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT); + } + + if (fltr->ip_mask.dst_port == htons(U16_MAX)) { + sctph->dest = fltr->ip_data.dst_port; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT); + } + + return 0; +} + +/** + * iavf_fill_fdir_ah_hdr - fill the AH protocol header + * @fltr: Flow Director filter data structure + * @proto_hdrs: Flow Director protocol headers data structure + * + * Returns 0 if the AH protocol header is set successfully + */ +static int +iavf_fill_fdir_ah_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + struct ip_auth_hdr *ah = (struct ip_auth_hdr *)hdr->buffer; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH); + + if (fltr->ip_mask.spi == htonl(U32_MAX)) { + ah->spi = fltr->ip_data.spi; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI); + } + + return 0; +} + +/** + * iavf_fill_fdir_esp_hdr - fill the ESP protocol header + * @fltr: Flow Director filter data structure + * @proto_hdrs: Flow Director protocol headers data structure + * + * Returns 0 if the ESP protocol header is set successfully + */ +static int +iavf_fill_fdir_esp_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + struct ip_esp_hdr *esph = (struct ip_esp_hdr *)hdr->buffer; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP); + + if (fltr->ip_mask.spi == htonl(U32_MAX)) { + esph->spi = fltr->ip_data.spi; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI); + } + + return 0; +} + +/** + * iavf_fill_fdir_l4_hdr - fill the L4 protocol header + * @fltr: Flow Director filter data structure + * @proto_hdrs: Flow Director protocol headers data structure + * + * Returns 0 if the L4 protocol header is set successfully + */ +static int +iavf_fill_fdir_l4_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *hdr; + __be32 *l4_4_data; + + if (!fltr->ip_mask.proto) /* IPv4/IPv6 header only */ + return 0; + + hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + l4_4_data = (__be32 *)hdr->buffer; + + /* L2TPv3 over IP with 'Session ID' */ + if (fltr->ip_data.proto == 115 && fltr->ip_mask.l4_header == htonl(U32_MAX)) { + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3); + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID); + + *l4_4_data = fltr->ip_data.l4_header; + } else { + return -EOPNOTSUPP; + } + + return 0; +} + +/** + * iavf_fill_fdir_eth_hdr - fill the Ethernet protocol header + * @fltr: Flow Director filter data structure + * @proto_hdrs: Flow Director protocol headers data structure + * + * Returns 0 if the Ethernet protocol header is set successfully + */ +static int +iavf_fill_fdir_eth_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + struct ethhdr *ehdr = (struct ethhdr *)hdr->buffer; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH); + + if (fltr->eth_mask.etype == htons(U16_MAX)) { + if (fltr->eth_data.etype == htons(ETH_P_IP) || + fltr->eth_data.etype == htons(ETH_P_IPV6)) + return -EOPNOTSUPP; + + ehdr->h_proto = fltr->eth_data.etype; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ETH, ETHERTYPE); + } + + return 0; +} + +/** + * iavf_fill_fdir_add_msg - fill the Flow Director filter into virtchnl message + * @adapter: pointer to the VF adapter structure + * @fltr: Flow Director filter data structure + * + * Returns 0 if the add Flow Director virtchnl message is filled successfully + */ +int iavf_fill_fdir_add_msg(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr) +{ + struct virtchnl_fdir_add *vc_msg = &fltr->vc_add_msg; + struct virtchnl_proto_hdrs *proto_hdrs; + int err; + + proto_hdrs = &vc_msg->rule_cfg.proto_hdrs; + + err = iavf_fill_fdir_eth_hdr(fltr, proto_hdrs); /* L2 always exists */ + if (err) + return err; + + switch (fltr->flow_type) { + case IAVF_FDIR_FLOW_IPV4_TCP: + err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_tcp_hdr(fltr, proto_hdrs); + break; + case IAVF_FDIR_FLOW_IPV4_UDP: + err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_udp_hdr(fltr, proto_hdrs); + break; + case IAVF_FDIR_FLOW_IPV4_SCTP: + err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_sctp_hdr(fltr, proto_hdrs); + break; + case IAVF_FDIR_FLOW_IPV4_AH: + err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_ah_hdr(fltr, proto_hdrs); + break; + case IAVF_FDIR_FLOW_IPV4_ESP: + err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_esp_hdr(fltr, proto_hdrs); + break; + case IAVF_FDIR_FLOW_IPV4_OTHER: + err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_l4_hdr(fltr, proto_hdrs); + break; + case IAVF_FDIR_FLOW_IPV6_TCP: + err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_tcp_hdr(fltr, proto_hdrs); + break; + case IAVF_FDIR_FLOW_IPV6_UDP: + err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_udp_hdr(fltr, proto_hdrs); + break; + case IAVF_FDIR_FLOW_IPV6_SCTP: + err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_sctp_hdr(fltr, proto_hdrs); + break; + case IAVF_FDIR_FLOW_IPV6_AH: + err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_ah_hdr(fltr, proto_hdrs); + break; + case IAVF_FDIR_FLOW_IPV6_ESP: + err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_esp_hdr(fltr, proto_hdrs); + break; + case IAVF_FDIR_FLOW_IPV6_OTHER: + err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_l4_hdr(fltr, proto_hdrs); + break; + case IAVF_FDIR_FLOW_NON_IP_L2: + break; + default: + err = -EINVAL; + break; + } + + if (err) + return err; + + vc_msg->vsi_id = adapter->vsi.id; + vc_msg->rule_cfg.action_set.count = 1; + vc_msg->rule_cfg.action_set.actions[0].type = fltr->action; + vc_msg->rule_cfg.action_set.actions[0].act_conf.queue.index = fltr->q_index; + + return 0; +} + +/** + * iavf_fdir_flow_proto_name - get the flow protocol name + * @flow_type: Flow Director filter flow type + **/ +static const char *iavf_fdir_flow_proto_name(enum iavf_fdir_flow_type flow_type) +{ + switch (flow_type) { + case IAVF_FDIR_FLOW_IPV4_TCP: + case IAVF_FDIR_FLOW_IPV6_TCP: + return "TCP"; + case IAVF_FDIR_FLOW_IPV4_UDP: + case IAVF_FDIR_FLOW_IPV6_UDP: + return "UDP"; + case IAVF_FDIR_FLOW_IPV4_SCTP: + case IAVF_FDIR_FLOW_IPV6_SCTP: + return "SCTP"; + case IAVF_FDIR_FLOW_IPV4_AH: + case IAVF_FDIR_FLOW_IPV6_AH: + return "AH"; + case IAVF_FDIR_FLOW_IPV4_ESP: + case IAVF_FDIR_FLOW_IPV6_ESP: + return "ESP"; + case IAVF_FDIR_FLOW_IPV4_OTHER: + case IAVF_FDIR_FLOW_IPV6_OTHER: + return "Other"; + case IAVF_FDIR_FLOW_NON_IP_L2: + return "Ethernet"; + default: + return NULL; + } +} + +/** + * iavf_print_fdir_fltr + * @adapter: adapter structure + * @fltr: Flow Director filter to print + * + * Print the Flow Director filter + **/ +void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr) +{ + const char *proto = iavf_fdir_flow_proto_name(fltr->flow_type); + + if (!proto) + return; + + switch (fltr->flow_type) { + case IAVF_FDIR_FLOW_IPV4_TCP: + case IAVF_FDIR_FLOW_IPV4_UDP: + case IAVF_FDIR_FLOW_IPV4_SCTP: + dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI4 src_ip %pI4 %s: dst_port %hu src_port %hu\n", + fltr->loc, + &fltr->ip_data.v4_addrs.dst_ip, + &fltr->ip_data.v4_addrs.src_ip, + proto, + ntohs(fltr->ip_data.dst_port), + ntohs(fltr->ip_data.src_port)); + break; + case IAVF_FDIR_FLOW_IPV4_AH: + case IAVF_FDIR_FLOW_IPV4_ESP: + dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI4 src_ip %pI4 %s: SPI %u\n", + fltr->loc, + &fltr->ip_data.v4_addrs.dst_ip, + &fltr->ip_data.v4_addrs.src_ip, + proto, + ntohl(fltr->ip_data.spi)); + break; + case IAVF_FDIR_FLOW_IPV4_OTHER: + dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI4 src_ip %pI4 proto: %u L4_bytes: 0x%x\n", + fltr->loc, + &fltr->ip_data.v4_addrs.dst_ip, + &fltr->ip_data.v4_addrs.src_ip, + fltr->ip_data.proto, + ntohl(fltr->ip_data.l4_header)); + break; + case IAVF_FDIR_FLOW_IPV6_TCP: + case IAVF_FDIR_FLOW_IPV6_UDP: + case IAVF_FDIR_FLOW_IPV6_SCTP: + dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI6 src_ip %pI6 %s: dst_port %hu src_port %hu\n", + fltr->loc, + &fltr->ip_data.v6_addrs.dst_ip, + &fltr->ip_data.v6_addrs.src_ip, + proto, + ntohs(fltr->ip_data.dst_port), + ntohs(fltr->ip_data.src_port)); + break; + case IAVF_FDIR_FLOW_IPV6_AH: + case IAVF_FDIR_FLOW_IPV6_ESP: + dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI6 src_ip %pI6 %s: SPI %u\n", + fltr->loc, + &fltr->ip_data.v6_addrs.dst_ip, + &fltr->ip_data.v6_addrs.src_ip, + proto, + ntohl(fltr->ip_data.spi)); + break; + case IAVF_FDIR_FLOW_IPV6_OTHER: + dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI6 src_ip %pI6 proto: %u L4_bytes: 0x%x\n", + fltr->loc, + &fltr->ip_data.v6_addrs.dst_ip, + &fltr->ip_data.v6_addrs.src_ip, + fltr->ip_data.proto, + ntohl(fltr->ip_data.l4_header)); + break; + case IAVF_FDIR_FLOW_NON_IP_L2: + dev_info(&adapter->pdev->dev, "Rule ID: %u eth_type: 0x%x\n", + fltr->loc, + ntohs(fltr->eth_data.etype)); + break; + default: + break; + } +} + +/** + * iavf_fdir_is_dup_fltr - test if filter is already in list + * @adapter: pointer to the VF adapter structure + * @fltr: Flow Director filter data structure + * + * Returns true if the filter is found in the list + */ +bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr) +{ + struct iavf_fdir_fltr *tmp; + + list_for_each_entry(tmp, &adapter->fdir_list_head, list) { + if (tmp->flow_type != fltr->flow_type) + continue; + + if (!memcmp(&tmp->eth_data, &fltr->eth_data, + sizeof(fltr->eth_data)) && + !memcmp(&tmp->ip_data, &fltr->ip_data, + sizeof(fltr->ip_data)) && + !memcmp(&tmp->ext_data, &fltr->ext_data, + sizeof(fltr->ext_data))) + return true; + } + + return false; +} + +/** + * iavf_find_fdir_fltr_by_loc - find filter with location + * @adapter: pointer to the VF adapter structure + * @loc: location to find. + * + * Returns pointer to Flow Director filter if found or null + */ +struct iavf_fdir_fltr *iavf_find_fdir_fltr_by_loc(struct iavf_adapter *adapter, u32 loc) +{ + struct iavf_fdir_fltr *rule; + + list_for_each_entry(rule, &adapter->fdir_list_head, list) + if (rule->loc == loc) + return rule; + + return NULL; +} + +/** + * iavf_fdir_list_add_fltr - add a new node to the flow director filter list + * @adapter: pointer to the VF adapter structure + * @fltr: filter node to add to structure + */ +void iavf_fdir_list_add_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr) +{ + struct iavf_fdir_fltr *rule, *parent = NULL; + + list_for_each_entry(rule, &adapter->fdir_list_head, list) { + if (rule->loc >= fltr->loc) + break; + parent = rule; + } + + if (parent) + list_add(&fltr->list, &parent->list); + else + list_add(&fltr->list, &adapter->fdir_list_head); +} diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.h b/drivers/net/ethernet/intel/iavf/iavf_fdir.h new file mode 100644 index 000000000000..33c55c366315 --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.h @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021, Intel Corporation. */ + +#ifndef _IAVF_FDIR_H_ +#define _IAVF_FDIR_H_ + +struct iavf_adapter; + +/* State of Flow Director filter */ +enum iavf_fdir_fltr_state_t { + IAVF_FDIR_FLTR_ADD_REQUEST, /* User requests to add filter */ + IAVF_FDIR_FLTR_ADD_PENDING, /* Filter pending add by the PF */ + IAVF_FDIR_FLTR_DEL_REQUEST, /* User requests to delete filter */ + IAVF_FDIR_FLTR_DEL_PENDING, /* Filter pending delete by the PF */ + IAVF_FDIR_FLTR_ACTIVE, /* Filter is active */ +}; + +enum iavf_fdir_flow_type { + /* NONE - used for undef/error */ + IAVF_FDIR_FLOW_NONE = 0, + IAVF_FDIR_FLOW_IPV4_TCP, + IAVF_FDIR_FLOW_IPV4_UDP, + IAVF_FDIR_FLOW_IPV4_SCTP, + IAVF_FDIR_FLOW_IPV4_AH, + IAVF_FDIR_FLOW_IPV4_ESP, + IAVF_FDIR_FLOW_IPV4_OTHER, + IAVF_FDIR_FLOW_IPV6_TCP, + IAVF_FDIR_FLOW_IPV6_UDP, + IAVF_FDIR_FLOW_IPV6_SCTP, + IAVF_FDIR_FLOW_IPV6_AH, + IAVF_FDIR_FLOW_IPV6_ESP, + IAVF_FDIR_FLOW_IPV6_OTHER, + IAVF_FDIR_FLOW_NON_IP_L2, + /* MAX - this must be last and add anything new just above it */ + IAVF_FDIR_FLOW_PTYPE_MAX, +}; + +/* Must not exceed the array element number of '__be32 data[2]' in the ethtool + * 'struct ethtool_rx_flow_spec.m_ext.data[2]' to express the flex-byte (word). + */ +#define IAVF_FLEX_WORD_NUM 2 + +struct iavf_flex_word { + u16 offset; + u16 word; +}; + +struct iavf_ipv4_addrs { + __be32 src_ip; + __be32 dst_ip; +}; + +struct iavf_ipv6_addrs { + struct in6_addr src_ip; + struct in6_addr dst_ip; +}; + +struct iavf_fdir_eth { + __be16 etype; +}; + +struct iavf_fdir_ip { + union { + struct iavf_ipv4_addrs v4_addrs; + struct iavf_ipv6_addrs v6_addrs; + }; + __be16 src_port; + __be16 dst_port; + __be32 l4_header; /* first 4 bytes of the layer 4 header */ + __be32 spi; /* security parameter index for AH/ESP */ + union { + u8 tos; + u8 tclass; + }; + u8 proto; +}; + +struct iavf_fdir_extra { + u32 usr_def[IAVF_FLEX_WORD_NUM]; +}; + +/* bookkeeping of Flow Director filters */ +struct iavf_fdir_fltr { + enum iavf_fdir_fltr_state_t state; + struct list_head list; + + enum iavf_fdir_flow_type flow_type; + + struct iavf_fdir_eth eth_data; + struct iavf_fdir_eth eth_mask; + + struct iavf_fdir_ip ip_data; + struct iavf_fdir_ip ip_mask; + + struct iavf_fdir_extra ext_data; + struct iavf_fdir_extra ext_mask; + + enum virtchnl_action action; + + /* flex byte filter data */ + u8 ip_ver; /* used to adjust the flex offset, 4 : IPv4, 6 : IPv6 */ + u8 flex_cnt; + struct iavf_flex_word flex_words[IAVF_FLEX_WORD_NUM]; + + u32 flow_id; + + u32 loc; /* Rule location inside the flow table */ + u32 q_index; + + struct virtchnl_fdir_add vc_add_msg; +}; + +int iavf_fill_fdir_add_msg(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr); +void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr); +bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr); +void iavf_fdir_list_add_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr); +struct iavf_fdir_fltr *iavf_find_fdir_fltr_by_loc(struct iavf_adapter *adapter, u32 loc); +#endif /* _IAVF_FDIR_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 62fe56ddcb6e..3fc572341781 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -14,23 +14,13 @@ static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter); static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter); static int iavf_close(struct net_device *netdev); -static int iavf_init_get_resources(struct iavf_adapter *adapter); +static void iavf_init_get_resources(struct iavf_adapter *adapter); static int iavf_check_reset_complete(struct iavf_hw *hw); char iavf_driver_name[] = "iavf"; static const char iavf_driver_string[] = "Intel(R) Ethernet Adaptive Virtual Function Network Driver"; -#define DRV_KERN "-k" - -#define DRV_VERSION_MAJOR 3 -#define DRV_VERSION_MINOR 2 -#define DRV_VERSION_BUILD 3 -#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ - __stringify(DRV_VERSION_MINOR) "." \ - __stringify(DRV_VERSION_BUILD) \ - DRV_KERN -const char iavf_driver_version[] = DRV_VERSION; static const char iavf_copyright[] = "Copyright (c) 2013 - 2018 Intel Corporation."; @@ -57,11 +47,126 @@ MODULE_ALIAS("i40evf"); MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver"); MODULE_LICENSE("GPL v2"); -MODULE_VERSION(DRV_VERSION); static const struct net_device_ops iavf_netdev_ops; struct workqueue_struct *iavf_wq; +int iavf_status_to_errno(enum iavf_status status) +{ + switch (status) { + case IAVF_SUCCESS: + return 0; + case IAVF_ERR_PARAM: + case IAVF_ERR_MAC_TYPE: + case IAVF_ERR_INVALID_MAC_ADDR: + case IAVF_ERR_INVALID_LINK_SETTINGS: + case IAVF_ERR_INVALID_PD_ID: + case IAVF_ERR_INVALID_QP_ID: + case IAVF_ERR_INVALID_CQ_ID: + case IAVF_ERR_INVALID_CEQ_ID: + case IAVF_ERR_INVALID_AEQ_ID: + case IAVF_ERR_INVALID_SIZE: + case IAVF_ERR_INVALID_ARP_INDEX: + case IAVF_ERR_INVALID_FPM_FUNC_ID: + case IAVF_ERR_QP_INVALID_MSG_SIZE: + case IAVF_ERR_INVALID_FRAG_COUNT: + case IAVF_ERR_INVALID_ALIGNMENT: + case IAVF_ERR_INVALID_PUSH_PAGE_INDEX: + case IAVF_ERR_INVALID_IMM_DATA_SIZE: + case IAVF_ERR_INVALID_VF_ID: + case IAVF_ERR_INVALID_HMCFN_ID: + case IAVF_ERR_INVALID_PBLE_INDEX: + case IAVF_ERR_INVALID_SD_INDEX: + case IAVF_ERR_INVALID_PAGE_DESC_INDEX: + case IAVF_ERR_INVALID_SD_TYPE: + case IAVF_ERR_INVALID_HMC_OBJ_INDEX: + case IAVF_ERR_INVALID_HMC_OBJ_COUNT: + case IAVF_ERR_INVALID_SRQ_ARM_LIMIT: + return -EINVAL; + case IAVF_ERR_NVM: + case IAVF_ERR_NVM_CHECKSUM: + case IAVF_ERR_PHY: + case IAVF_ERR_CONFIG: + case IAVF_ERR_UNKNOWN_PHY: + case IAVF_ERR_LINK_SETUP: + case IAVF_ERR_ADAPTER_STOPPED: + case IAVF_ERR_PRIMARY_REQUESTS_PENDING: + case IAVF_ERR_AUTONEG_NOT_COMPLETE: + case IAVF_ERR_RESET_FAILED: + case IAVF_ERR_BAD_PTR: + case IAVF_ERR_SWFW_SYNC: + case IAVF_ERR_QP_TOOMANY_WRS_POSTED: + case IAVF_ERR_QUEUE_EMPTY: + case IAVF_ERR_FLUSHED_QUEUE: + case IAVF_ERR_OPCODE_MISMATCH: + case IAVF_ERR_CQP_COMPL_ERROR: + case IAVF_ERR_BACKING_PAGE_ERROR: + case IAVF_ERR_NO_PBLCHUNKS_AVAILABLE: + case IAVF_ERR_MEMCPY_FAILED: + case IAVF_ERR_SRQ_ENABLED: + case IAVF_ERR_ADMIN_QUEUE_ERROR: + case IAVF_ERR_ADMIN_QUEUE_FULL: + case IAVF_ERR_BAD_IWARP_CQE: + case IAVF_ERR_NVM_BLANK_MODE: + case IAVF_ERR_PE_DOORBELL_NOT_ENABLED: + case IAVF_ERR_DIAG_TEST_FAILED: + case IAVF_ERR_FIRMWARE_API_VERSION: + case IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR: + return -EIO; + case IAVF_ERR_DEVICE_NOT_SUPPORTED: + return -ENODEV; + case IAVF_ERR_NO_AVAILABLE_VSI: + case IAVF_ERR_RING_FULL: + return -ENOSPC; + case IAVF_ERR_NO_MEMORY: + return -ENOMEM; + case IAVF_ERR_TIMEOUT: + case IAVF_ERR_ADMIN_QUEUE_TIMEOUT: + return -ETIMEDOUT; + case IAVF_ERR_NOT_IMPLEMENTED: + case IAVF_NOT_SUPPORTED: + return -EOPNOTSUPP; + case IAVF_ERR_ADMIN_QUEUE_NO_WORK: + return -EALREADY; + case IAVF_ERR_NOT_READY: + return -EBUSY; + case IAVF_ERR_BUF_TOO_SHORT: + return -EMSGSIZE; + } + + return -EIO; +} + +int virtchnl_status_to_errno(enum virtchnl_status_code v_status) +{ + switch (v_status) { + case VIRTCHNL_STATUS_SUCCESS: + return 0; + case VIRTCHNL_STATUS_ERR_PARAM: + case VIRTCHNL_STATUS_ERR_INVALID_VF_ID: + return -EINVAL; + case VIRTCHNL_STATUS_ERR_NO_MEMORY: + return -ENOMEM; + case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH: + case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR: + case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR: + return -EIO; + case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED: + return -EOPNOTSUPP; + } + + return -EIO; +} + +/** + * iavf_pdev_to_adapter - go from pci_dev to adapter + * @pdev: pci_dev pointer + */ +static struct iavf_adapter *iavf_pdev_to_adapter(struct pci_dev *pdev) +{ + return netdev_priv(pci_get_drvdata(pdev)); +} + /** * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code * @hw: pointer to the HW structure @@ -143,6 +248,27 @@ enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw, } /** + * iavf_lock_timeout - try to lock mutex but give up after timeout + * @lock: mutex that should be locked + * @msecs: timeout in msecs + * + * Returns 0 on success, negative on failure + **/ +int iavf_lock_timeout(struct mutex *lock, unsigned int msecs) +{ + unsigned int wait, delay = 10; + + for (wait = 0; wait < msecs; wait += delay) { + if (mutex_trylock(lock)) + return 0; + + msleep(delay); + } + + return -1; +} + +/** * iavf_schedule_reset - Set the flags and schedule a reset event * @adapter: board private structure **/ @@ -156,8 +282,22 @@ void iavf_schedule_reset(struct iavf_adapter *adapter) } /** + * iavf_schedule_request_stats - Set the flags and schedule statistics request + * @adapter: board private structure + * + * Sets IAVF_FLAG_AQ_REQUEST_STATS flag so iavf_watchdog_task() will explicitly + * request and refresh ethtool stats + **/ +void iavf_schedule_request_stats(struct iavf_adapter *adapter) +{ + adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS; + mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); +} + +/** * iavf_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure + * @txqueue: queue number that is timing out **/ static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue) { @@ -269,8 +409,9 @@ static irqreturn_t iavf_msix_aq(int irq, void *data) rd32(hw, IAVF_VFINT_ICR01); rd32(hw, IAVF_VFINT_ICR0_ENA1); - /* schedule work on the private workqueue */ - queue_work(iavf_wq, &adapter->adminq_task); + if (adapter->state != __IAVF_REMOVE) + /* schedule work on the private workqueue */ + queue_work(iavf_wq, &adapter->adminq_task); return IRQ_HANDLED; } @@ -430,14 +571,14 @@ iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename) if (q_vector->tx.ring && q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name), - "iavf-%s-TxRx-%d", basename, rx_int_idx++); + "iavf-%s-TxRx-%u", basename, rx_int_idx++); tx_int_idx++; } else if (q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name), - "iavf-%s-rx-%d", basename, rx_int_idx++); + "iavf-%s-rx-%u", basename, rx_int_idx++); } else if (q_vector->tx.ring) { snprintf(q_vector->name, sizeof(q_vector->name), - "iavf-%s-tx-%d", basename, tx_int_idx++); + "iavf-%s-tx-%u", basename, tx_int_idx++); } else { /* skip this unused q_vector */ continue; @@ -459,10 +600,10 @@ iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename) irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); /* Spread the IRQ affinity hints across online CPUs. Note that * get_cpu_mask returns a mask with a permanent lifetime so - * it's safe to use as a hint for irq_set_affinity_hint. + * it's safe to use as a hint for irq_update_affinity_hint. */ cpu = cpumask_local_spread(q_vector->v_idx, -1); - irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); + irq_update_affinity_hint(irq_num, get_cpu_mask(cpu)); } return 0; @@ -472,7 +613,7 @@ free_queue_irqs: vector--; irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; irq_set_affinity_notifier(irq_num, NULL); - irq_set_affinity_hint(irq_num, NULL); + irq_update_affinity_hint(irq_num, NULL); free_irq(irq_num, &adapter->q_vectors[vector]); } return err; @@ -524,7 +665,7 @@ static void iavf_free_traffic_irqs(struct iavf_adapter *adapter) for (vector = 0; vector < q_vectors; vector++) { irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; irq_set_affinity_notifier(irq_num, NULL); - irq_set_affinity_hint(irq_num, NULL); + irq_update_affinity_hint(irq_num, NULL); free_irq(irq_num, &adapter->q_vectors[vector]); } } @@ -613,14 +754,17 @@ static void iavf_configure_rx(struct iavf_adapter *adapter) * mac_vlan_list_lock. **/ static struct -iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, u16 vlan) +iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, + struct iavf_vlan vlan) { struct iavf_vlan_filter *f; list_for_each_entry(f, &adapter->vlan_filter_list, list) { - if (vlan == f->vlan) + if (f->vlan.vid == vlan.vid && + f->vlan.tpid == vlan.tpid) return f; } + return NULL; } @@ -632,7 +776,8 @@ iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, u16 vlan) * Returns ptr to the filter object or NULL when no memory available. **/ static struct -iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, u16 vlan) +iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, + struct iavf_vlan vlan) { struct iavf_vlan_filter *f = NULL; @@ -661,7 +806,7 @@ clearout: * @adapter: board private structure * @vlan: VLAN tag **/ -static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan) +static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan) { struct iavf_vlan_filter *f; @@ -677,6 +822,68 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan) } /** + * iavf_restore_filters + * @adapter: board private structure + * + * Restore existing non MAC filters when VF netdev comes back up + **/ +static void iavf_restore_filters(struct iavf_adapter *adapter) +{ + u16 vid; + + /* re-add all VLAN filters */ + for_each_set_bit(vid, adapter->vsi.active_cvlans, VLAN_N_VID) + iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021Q)); + + for_each_set_bit(vid, adapter->vsi.active_svlans, VLAN_N_VID) + iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021AD)); +} + +/** + * iavf_get_num_vlans_added - get number of VLANs added + * @adapter: board private structure + */ +u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter) +{ + return bitmap_weight(adapter->vsi.active_cvlans, VLAN_N_VID) + + bitmap_weight(adapter->vsi.active_svlans, VLAN_N_VID); +} + +/** + * iavf_get_max_vlans_allowed - get maximum VLANs allowed for this VF + * @adapter: board private structure + * + * This depends on the negotiated VLAN capability. For VIRTCHNL_VF_OFFLOAD_VLAN, + * do not impose a limit as that maintains current behavior and for + * VIRTCHNL_VF_OFFLOAD_VLAN_V2, use the maximum allowed sent from the PF. + **/ +static u16 iavf_get_max_vlans_allowed(struct iavf_adapter *adapter) +{ + /* don't impose any limit for VIRTCHNL_VF_OFFLOAD_VLAN since there has + * never been a limit on the VF driver side + */ + if (VLAN_ALLOWED(adapter)) + return VLAN_N_VID; + else if (VLAN_V2_ALLOWED(adapter)) + return adapter->vlan_v2_caps.filtering.max_filters; + + return 0; +} + +/** + * iavf_max_vlans_added - check if maximum VLANs allowed already exist + * @adapter: board private structure + **/ +static bool iavf_max_vlans_added(struct iavf_adapter *adapter) +{ + if (iavf_get_num_vlans_added(adapter) < + iavf_get_max_vlans_allowed(adapter)) + return false; + + return true; +} + +/** * iavf_vlan_rx_add_vid - Add a VLAN filter to a device * @netdev: network device struct * @proto: unused protocol data @@ -687,10 +894,18 @@ static int iavf_vlan_rx_add_vid(struct net_device *netdev, { struct iavf_adapter *adapter = netdev_priv(netdev); - if (!VLAN_ALLOWED(adapter)) + if (!VLAN_FILTERING_ALLOWED(adapter)) + return -EIO; + + if (iavf_max_vlans_added(adapter)) { + netdev_err(netdev, "Max allowed VLAN filters %u. Remove existing VLANs or disable filtering via Ethtool if supported.\n", + iavf_get_max_vlans_allowed(adapter)); return -EIO; - if (iavf_add_vlan(adapter, vid) == NULL) + } + + if (!iavf_add_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)))) return -ENOMEM; + return 0; } @@ -705,11 +920,13 @@ static int iavf_vlan_rx_kill_vid(struct net_device *netdev, { struct iavf_adapter *adapter = netdev_priv(netdev); - if (VLAN_ALLOWED(adapter)) { - iavf_del_vlan(adapter, vid); - return 0; - } - return -EIO; + iavf_del_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto))); + if (proto == cpu_to_be16(ETH_P_8021Q)) + clear_bit(vid, adapter->vsi.active_cvlans); + else + clear_bit(vid, adapter->vsi.active_svlans); + + return 0; } /** @@ -761,6 +978,9 @@ struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, list_add_tail(&f->list, &adapter->mac_filter_list); f->add = true; + f->add_handled = false; + f->is_new_mac = true; + f->is_primary = ether_addr_equal(macaddr, adapter->hw.mac.addr); adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; } else { f->remove = false; @@ -770,42 +990,129 @@ struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, } /** - * iavf_set_mac - NDO callback to set port mac address - * @netdev: network interface device structure - * @p: pointer to an address structure + * iavf_replace_primary_mac - Replace current primary address + * @adapter: board private structure + * @new_mac: new MAC address to be applied * - * Returns 0 on success, negative on failure + * Replace current dev_addr and send request to PF for removal of previous + * primary MAC address filter and addition of new primary MAC filter. + * Return 0 for success, -ENOMEM for failure. + * + * Do not call this with mac_vlan_list_lock! **/ -static int iavf_set_mac(struct net_device *netdev, void *p) +int iavf_replace_primary_mac(struct iavf_adapter *adapter, + const u8 *new_mac) { - struct iavf_adapter *adapter = netdev_priv(netdev); struct iavf_hw *hw = &adapter->hw; struct iavf_mac_filter *f; - struct sockaddr *addr = p; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; - - if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) - return 0; spin_lock_bh(&adapter->mac_vlan_list_lock); + list_for_each_entry(f, &adapter->mac_filter_list, list) { + f->is_primary = false; + } + f = iavf_find_filter(adapter, hw->mac.addr); if (f) { f->remove = true; adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; } - f = iavf_add_filter(adapter, addr->sa_data); + f = iavf_add_filter(adapter, new_mac); + + if (f) { + /* Always send the request to add if changing primary MAC + * even if filter is already present on the list + */ + f->is_primary = true; + f->add = true; + adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; + ether_addr_copy(hw->mac.addr, new_mac); + } spin_unlock_bh(&adapter->mac_vlan_list_lock); + /* schedule the watchdog task to immediately process the request */ if (f) { - ether_addr_copy(hw->mac.addr, addr->sa_data); + queue_work(iavf_wq, &adapter->watchdog_task.work); + return 0; } + return -ENOMEM; +} + +/** + * iavf_is_mac_set_handled - wait for a response to set MAC from PF + * @netdev: network interface device structure + * @macaddr: MAC address to set + * + * Returns true on success, false on failure + */ +static bool iavf_is_mac_set_handled(struct net_device *netdev, + const u8 *macaddr) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + struct iavf_mac_filter *f; + bool ret = false; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + f = iavf_find_filter(adapter, macaddr); + + if (!f || (!f->add && f->add_handled)) + ret = true; + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + return ret; +} + +/** + * iavf_set_mac - NDO callback to set port MAC address + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + */ +static int iavf_set_mac(struct net_device *netdev, void *p) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + int ret; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + ret = iavf_replace_primary_mac(adapter, addr->sa_data); + + if (ret) + return ret; + + /* If this is an initial set MAC during VF spawn do not wait */ + if (adapter->flags & IAVF_FLAG_INITIAL_MAC_SET) { + adapter->flags &= ~IAVF_FLAG_INITIAL_MAC_SET; + return 0; + } + + ret = wait_event_interruptible_timeout(adapter->vc_waitqueue, + iavf_is_mac_set_handled(netdev, addr->sa_data), + msecs_to_jiffies(2500)); + + /* If ret < 0 then it means wait was interrupted. + * If ret == 0 then it means we got a timeout. + * else it means we got response for set MAC from PF, + * check if netdev MAC was updated to requested MAC, + * if yes then set MAC succeeded otherwise it failed return -EACCES + */ + if (ret < 0) + return ret; + + if (!ret) + return -EAGAIN; - return (f == NULL) ? -ENOMEM : 0; + if (!ether_addr_equal(netdev->dev_addr, addr->sa_data)) + return -EACCES; + + return 0; } /** @@ -948,7 +1255,7 @@ static void iavf_configure(struct iavf_adapter *adapter) **/ static void iavf_up_complete(struct iavf_adapter *adapter) { - adapter->state = __IAVF_RUNNING; + iavf_change_state(adapter, __IAVF_RUNNING); clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state); iavf_napi_enable_all(adapter); @@ -960,63 +1267,156 @@ static void iavf_up_complete(struct iavf_adapter *adapter) } /** - * iavf_down - Shutdown the connection processing + * iavf_clear_mac_vlan_filters - Remove mac and vlan filters not sent to PF + * yet and mark other to be removed. * @adapter: board private structure - * - * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. **/ -void iavf_down(struct iavf_adapter *adapter) +static void iavf_clear_mac_vlan_filters(struct iavf_adapter *adapter) { - struct net_device *netdev = adapter->netdev; - struct iavf_vlan_filter *vlf; - struct iavf_mac_filter *f; - struct iavf_cloud_filter *cf; - - if (adapter->state <= __IAVF_DOWN_PENDING) - return; - - netif_carrier_off(netdev); - netif_tx_disable(netdev); - adapter->link_up = false; - iavf_napi_disable_all(adapter); - iavf_irq_disable(adapter); + struct iavf_vlan_filter *vlf, *vlftmp; + struct iavf_mac_filter *f, *ftmp; spin_lock_bh(&adapter->mac_vlan_list_lock); - /* clear the sync flag on all filters */ __dev_uc_unsync(adapter->netdev, NULL); __dev_mc_unsync(adapter->netdev, NULL); /* remove all MAC filters */ - list_for_each_entry(f, &adapter->mac_filter_list, list) { - f->remove = true; + list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, + list) { + if (f->add) { + list_del(&f->list); + kfree(f); + } else { + f->remove = true; + } } /* remove all VLAN filters */ - list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { - vlf->remove = true; + list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list, + list) { + if (vlf->add) { + list_del(&vlf->list); + kfree(vlf); + } else { + vlf->remove = true; + } } - spin_unlock_bh(&adapter->mac_vlan_list_lock); +} + +/** + * iavf_clear_cloud_filters - Remove cloud filters not sent to PF yet and + * mark other to be removed. + * @adapter: board private structure + **/ +static void iavf_clear_cloud_filters(struct iavf_adapter *adapter) +{ + struct iavf_cloud_filter *cf, *cftmp; /* remove all cloud filters */ spin_lock_bh(&adapter->cloud_filter_list_lock); - list_for_each_entry(cf, &adapter->cloud_filter_list, list) { - cf->del = true; + list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, + list) { + if (cf->add) { + list_del(&cf->list); + kfree(cf); + adapter->num_cloud_filters--; + } else { + cf->del = true; + } } spin_unlock_bh(&adapter->cloud_filter_list_lock); +} - if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) && - adapter->state != __IAVF_RESETTING) { +/** + * iavf_clear_fdir_filters - Remove fdir filters not sent to PF yet and mark + * other to be removed. + * @adapter: board private structure + **/ +static void iavf_clear_fdir_filters(struct iavf_adapter *adapter) +{ + struct iavf_fdir_fltr *fdir, *fdirtmp; + + /* remove all Flow Director filters */ + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, + list) { + if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) { + list_del(&fdir->list); + kfree(fdir); + adapter->fdir_active_fltr--; + } else { + fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST; + } + } + spin_unlock_bh(&adapter->fdir_fltr_lock); +} + +/** + * iavf_clear_adv_rss_conf - Remove adv rss conf not sent to PF yet and mark + * other to be removed. + * @adapter: board private structure + **/ +static void iavf_clear_adv_rss_conf(struct iavf_adapter *adapter) +{ + struct iavf_adv_rss *rss, *rsstmp; + + /* remove all advance RSS configuration */ + spin_lock_bh(&adapter->adv_rss_lock); + list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head, + list) { + if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) { + list_del(&rss->list); + kfree(rss); + } else { + rss->state = IAVF_ADV_RSS_DEL_REQUEST; + } + } + spin_unlock_bh(&adapter->adv_rss_lock); +} + +/** + * iavf_down - Shutdown the connection processing + * @adapter: board private structure + * + * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. + **/ +void iavf_down(struct iavf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + if (adapter->state <= __IAVF_DOWN_PENDING) + return; + + netif_carrier_off(netdev); + netif_tx_disable(netdev); + adapter->link_up = false; + iavf_napi_disable_all(adapter); + iavf_irq_disable(adapter); + + iavf_clear_mac_vlan_filters(adapter); + iavf_clear_cloud_filters(adapter); + iavf_clear_fdir_filters(adapter); + iavf_clear_adv_rss_conf(adapter); + + if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)) { /* cancel any current operation */ adapter->current_op = VIRTCHNL_OP_UNKNOWN; /* Schedule operations to close down the HW. Don't wait * here for this to complete. The watchdog is still running * and it will take care of this. */ - adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER; - adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; - adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; + if (!list_empty(&adapter->mac_filter_list)) + adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; + if (!list_empty(&adapter->vlan_filter_list)) + adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; + if (!list_empty(&adapter->cloud_filter_list)) + adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; + if (!list_empty(&adapter->fdir_list_head)) + adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER; + if (!list_empty(&adapter->adv_rss_list_head)) + adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG; adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES; } @@ -1084,6 +1484,86 @@ static void iavf_free_queues(struct iavf_adapter *adapter) } /** + * iavf_set_queue_vlan_tag_loc - set location for VLAN tag offload + * @adapter: board private structure + * + * Based on negotiated capabilities, the VLAN tag needs to be inserted and/or + * stripped in certain descriptor fields. Instead of checking the offload + * capability bits in the hot path, cache the location the ring specific + * flags. + */ +void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_active_queues; i++) { + struct iavf_ring *tx_ring = &adapter->tx_rings[i]; + struct iavf_ring *rx_ring = &adapter->rx_rings[i]; + + /* prevent multiple L2TAG bits being set after VFR */ + tx_ring->flags &= + ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 | + IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2); + rx_ring->flags &= + ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 | + IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2); + + if (VLAN_ALLOWED(adapter)) { + tx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1; + rx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1; + } else if (VLAN_V2_ALLOWED(adapter)) { + struct virtchnl_vlan_supported_caps *stripping_support; + struct virtchnl_vlan_supported_caps *insertion_support; + + stripping_support = + &adapter->vlan_v2_caps.offloads.stripping_support; + insertion_support = + &adapter->vlan_v2_caps.offloads.insertion_support; + + if (stripping_support->outer) { + if (stripping_support->outer & + VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1) + rx_ring->flags |= + IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1; + else if (stripping_support->outer & + VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2) + rx_ring->flags |= + IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2; + } else if (stripping_support->inner) { + if (stripping_support->inner & + VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1) + rx_ring->flags |= + IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1; + else if (stripping_support->inner & + VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2) + rx_ring->flags |= + IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2; + } + + if (insertion_support->outer) { + if (insertion_support->outer & + VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1) + tx_ring->flags |= + IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1; + else if (insertion_support->outer & + VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2) + tx_ring->flags |= + IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2; + } else if (insertion_support->inner) { + if (insertion_support->inner & + VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1) + tx_ring->flags |= + IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1; + else if (insertion_support->inner & + VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2) + tx_ring->flags |= + IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2; + } + } + } +} + +/** * iavf_alloc_queues - Allocate memory for all rings * @adapter: board private structure to initialize * @@ -1144,6 +1624,8 @@ static int iavf_alloc_queues(struct iavf_adapter *adapter) adapter->num_active_queues = num_active_queues; + iavf_set_queue_vlan_tag_loc(adapter); + return 0; err_out: @@ -1207,7 +1689,7 @@ static int iavf_config_rss_aq(struct iavf_adapter *adapter) struct iavf_aqc_get_set_rss_key_data *rss_key = (struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key; struct iavf_hw *hw = &adapter->hw; - int ret = 0; + enum iavf_status status; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -1216,24 +1698,25 @@ static int iavf_config_rss_aq(struct iavf_adapter *adapter) return -EBUSY; } - ret = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key); - if (ret) { + status = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key); + if (status) { dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n", - iavf_stat_str(hw, ret), + iavf_stat_str(hw, status), iavf_aq_str(hw, hw->aq.asq_last_status)); - return ret; + return iavf_status_to_errno(status); } - ret = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false, - adapter->rss_lut, adapter->rss_lut_size); - if (ret) { + status = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false, + adapter->rss_lut, adapter->rss_lut_size); + if (status) { dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n", - iavf_stat_str(hw, ret), + iavf_stat_str(hw, status), iavf_aq_str(hw, hw->aq.asq_last_status)); + return iavf_status_to_errno(status); } - return ret; + return 0; } @@ -1303,7 +1786,6 @@ static void iavf_fill_rss_lut(struct iavf_adapter *adapter) static int iavf_init_rss(struct iavf_adapter *adapter) { struct iavf_hw *hw = &adapter->hw; - int ret; if (!RSS_PF(adapter)) { /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ @@ -1319,9 +1801,8 @@ static int iavf_init_rss(struct iavf_adapter *adapter) iavf_fill_rss_lut(adapter); netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size); - ret = iavf_config_rss(adapter); - return ret; + return iavf_config_rss(adapter); } /** @@ -1350,7 +1831,7 @@ static int iavf_alloc_q_vectors(struct iavf_adapter *adapter) q_vector->reg_idx = q_idx; cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); netif_napi_add(adapter->netdev, &q_vector->napi, - iavf_napi_poll, NAPI_POLL_WEIGHT); + iavf_napi_poll); } return 0; @@ -1499,11 +1980,6 @@ static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter) set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); iavf_map_rings_to_vectors(adapter); - - if (RSS_AQ(adapter)) - adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; - else - err = iavf_init_rss(adapter); err: return err; } @@ -1521,6 +1997,8 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter) { if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG) return iavf_send_vf_config_msg(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS) + return iavf_send_vf_offload_vlan_v2_msg(adapter); if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) { iavf_disable_queues(adapter); return 0; @@ -1606,8 +2084,7 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter) iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC); return 0; } - - if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) && + if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) || (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) { iavf_set_promiscuous(adapter, 0); return 0; @@ -1639,38 +2116,178 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter) iavf_add_cloud_filter(adapter); return 0; } + if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) { + iavf_add_fdir_filter(adapter); + return IAVF_SUCCESS; + } + if (adapter->aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER) { + iavf_del_fdir_filter(adapter); + return IAVF_SUCCESS; + } + if (adapter->aq_required & IAVF_FLAG_AQ_ADD_ADV_RSS_CFG) { + iavf_add_adv_rss_cfg(adapter); + return 0; + } + if (adapter->aq_required & IAVF_FLAG_AQ_DEL_ADV_RSS_CFG) { + iavf_del_adv_rss_cfg(adapter); + return 0; + } + if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING) { + iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021Q); + return 0; + } + if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING) { + iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021AD); + return 0; + } + if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING) { + iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021Q); + return 0; + } + if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING) { + iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021AD); + return 0; + } + if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION) { + iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021Q); + return 0; + } + if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION) { + iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021AD); + return 0; + } + if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION) { + iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021Q); + return 0; + } + if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION) { + iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021AD); + return 0; + } + + if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) { + iavf_request_stats(adapter); + return 0; + } + return -EAGAIN; } /** + * iavf_set_vlan_offload_features - set VLAN offload configuration + * @adapter: board private structure + * @prev_features: previous features used for comparison + * @features: updated features used for configuration + * + * Set the aq_required bit(s) based on the requested features passed in to + * configure VLAN stripping and/or VLAN insertion if supported. Also, schedule + * the watchdog if any changes are requested to expedite the request via + * virtchnl. + **/ +void +iavf_set_vlan_offload_features(struct iavf_adapter *adapter, + netdev_features_t prev_features, + netdev_features_t features) +{ + bool enable_stripping = true, enable_insertion = true; + u16 vlan_ethertype = 0; + u64 aq_required = 0; + + /* keep cases separate because one ethertype for offloads can be + * disabled at the same time as another is disabled, so check for an + * enabled ethertype first, then check for disabled. Default to + * ETH_P_8021Q so an ethertype is specified if disabling insertion and + * stripping. + */ + if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) + vlan_ethertype = ETH_P_8021AD; + else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) + vlan_ethertype = ETH_P_8021Q; + else if (prev_features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) + vlan_ethertype = ETH_P_8021AD; + else if (prev_features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) + vlan_ethertype = ETH_P_8021Q; + else + vlan_ethertype = ETH_P_8021Q; + + if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX))) + enable_stripping = false; + if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX))) + enable_insertion = false; + + if (VLAN_ALLOWED(adapter)) { + /* VIRTCHNL_VF_OFFLOAD_VLAN only has support for toggling VLAN + * stripping via virtchnl. VLAN insertion can be toggled on the + * netdev, but it doesn't require a virtchnl message + */ + if (enable_stripping) + aq_required |= IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; + else + aq_required |= IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; + + } else if (VLAN_V2_ALLOWED(adapter)) { + switch (vlan_ethertype) { + case ETH_P_8021Q: + if (enable_stripping) + aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING; + else + aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING; + + if (enable_insertion) + aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION; + else + aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION; + break; + case ETH_P_8021AD: + if (enable_stripping) + aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING; + else + aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING; + + if (enable_insertion) + aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION; + else + aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION; + break; + } + } + + if (aq_required) { + adapter->aq_required |= aq_required; + mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); + } +} + +/** * iavf_startup - first step of driver startup * @adapter: board private structure * * Function process __IAVF_STARTUP driver state. * When success the state is changed to __IAVF_INIT_VERSION_CHECK - * when fails it returns -EAGAIN + * when fails the state is changed to __IAVF_INIT_FAILED **/ -static int iavf_startup(struct iavf_adapter *adapter) +static void iavf_startup(struct iavf_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; struct iavf_hw *hw = &adapter->hw; - int err; + enum iavf_status status; + int ret; WARN_ON(adapter->state != __IAVF_STARTUP); /* driver loaded, probe complete */ adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; adapter->flags &= ~IAVF_FLAG_RESET_PENDING; - err = iavf_set_mac_type(hw); - if (err) { - dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", err); + status = iavf_set_mac_type(hw); + if (status) { + dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", status); goto err; } - err = iavf_check_reset_complete(hw); - if (err) { + ret = iavf_check_reset_complete(hw); + if (ret) { dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n", - err); + ret); goto err; } hw->aq.num_arq_entries = IAVF_AQ_LEN; @@ -1678,20 +2295,22 @@ static int iavf_startup(struct iavf_adapter *adapter) hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE; hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE; - err = iavf_init_adminq(hw); - if (err) { - dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", err); + status = iavf_init_adminq(hw); + if (status) { + dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", + status); goto err; } - err = iavf_send_api_ver(adapter); - if (err) { - dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err); + ret = iavf_send_api_ver(adapter); + if (ret) { + dev_err(&pdev->dev, "Unable to send to PF (%d)\n", ret); iavf_shutdown_adminq(hw); goto err; } - adapter->state = __IAVF_INIT_VERSION_CHECK; + iavf_change_state(adapter, __IAVF_INIT_VERSION_CHECK); + return; err: - return err; + iavf_change_state(adapter, __IAVF_INIT_FAILED); } /** @@ -1700,9 +2319,9 @@ err: * * Function process __IAVF_INIT_VERSION_CHECK driver state. * When success the state is changed to __IAVF_INIT_GET_RESOURCES - * when fails it returns -EAGAIN + * when fails the state is changed to __IAVF_INIT_FAILED **/ -static int iavf_init_version_check(struct iavf_adapter *adapter) +static void iavf_init_version_check(struct iavf_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; struct iavf_hw *hw = &adapter->hw; @@ -1713,14 +2332,14 @@ static int iavf_init_version_check(struct iavf_adapter *adapter) if (!iavf_asq_done(hw)) { dev_err(&pdev->dev, "Admin queue command never completed\n"); iavf_shutdown_adminq(hw); - adapter->state = __IAVF_STARTUP; + iavf_change_state(adapter, __IAVF_STARTUP); goto err; } /* aq msg sent, awaiting reply */ err = iavf_verify_api_ver(adapter); if (err) { - if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) + if (err == -EALREADY) err = iavf_send_api_ver(adapter); else dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n", @@ -1736,10 +2355,62 @@ static int iavf_init_version_check(struct iavf_adapter *adapter) err); goto err; } - adapter->state = __IAVF_INIT_GET_RESOURCES; - + iavf_change_state(adapter, __IAVF_INIT_GET_RESOURCES); + return; err: - return err; + iavf_change_state(adapter, __IAVF_INIT_FAILED); +} + +/** + * iavf_parse_vf_resource_msg - parse response from VIRTCHNL_OP_GET_VF_RESOURCES + * @adapter: board private structure + */ +int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter) +{ + int i, num_req_queues = adapter->num_req_queues; + struct iavf_vsi *vsi = &adapter->vsi; + + for (i = 0; i < adapter->vf_res->num_vsis; i++) { + if (adapter->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV) + adapter->vsi_res = &adapter->vf_res->vsi_res[i]; + } + if (!adapter->vsi_res) { + dev_err(&adapter->pdev->dev, "No LAN VSI found\n"); + return -ENODEV; + } + + if (num_req_queues && + num_req_queues > adapter->vsi_res->num_queue_pairs) { + /* Problem. The PF gave us fewer queues than what we had + * negotiated in our request. Need a reset to see if we can't + * get back to a working state. + */ + dev_err(&adapter->pdev->dev, + "Requested %d queues, but PF only gave us %d.\n", + num_req_queues, + adapter->vsi_res->num_queue_pairs); + adapter->flags |= IAVF_FLAG_REINIT_MSIX_NEEDED; + adapter->num_req_queues = adapter->vsi_res->num_queue_pairs; + iavf_schedule_reset(adapter); + + return -EAGAIN; + } + adapter->num_req_queues = 0; + adapter->vsi.id = adapter->vsi_res->vsi_id; + + adapter->vsi.back = adapter; + adapter->vsi.base_vector = 1; + vsi->netdev = adapter->netdev; + vsi->qs_handle = adapter->vsi_res->qset_handle; + if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { + adapter->rss_key_size = adapter->vf_res->rss_key_size; + adapter->rss_lut_size = adapter->vf_res->rss_lut_size; + } else { + adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE; + adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE; + } + + return 0; } /** @@ -1749,45 +2420,169 @@ err: * Function process __IAVF_INIT_GET_RESOURCES driver state and * finishes driver initialization procedure. * When success the state is changed to __IAVF_DOWN - * when fails it returns -EAGAIN + * when fails the state is changed to __IAVF_INIT_FAILED **/ -static int iavf_init_get_resources(struct iavf_adapter *adapter) +static void iavf_init_get_resources(struct iavf_adapter *adapter) { - struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; struct iavf_hw *hw = &adapter->hw; - int err = 0, bufsz; + int err; WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES); /* aq msg sent, awaiting reply */ if (!adapter->vf_res) { - bufsz = sizeof(struct virtchnl_vf_resource) + - (IAVF_MAX_VF_VSI * - sizeof(struct virtchnl_vsi_resource)); - adapter->vf_res = kzalloc(bufsz, GFP_KERNEL); - if (!adapter->vf_res) + adapter->vf_res = kzalloc(IAVF_VIRTCHNL_VF_RESOURCE_SIZE, + GFP_KERNEL); + if (!adapter->vf_res) { + err = -ENOMEM; goto err; + } } err = iavf_get_vf_config(adapter); - if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) { + if (err == -EALREADY) { err = iavf_send_vf_config_msg(adapter); goto err; - } else if (err == IAVF_ERR_PARAM) { - /* We only get ERR_PARAM if the device is in a very bad + } else if (err == -EINVAL) { + /* We only get -EINVAL if the device is in a very bad * state or if we've been disabled for previous bad * behavior. Either way, we're done now. */ iavf_shutdown_adminq(hw); dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n"); - return 0; + return; } if (err) { dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err); goto err_alloc; } - if (iavf_process_config(adapter)) + err = iavf_parse_vf_resource_msg(adapter); + if (err) { + dev_err(&pdev->dev, "Failed to parse VF resource message from PF (%d)\n", + err); goto err_alloc; + } + /* Some features require additional messages to negotiate extended + * capabilities. These are processed in sequence by the + * __IAVF_INIT_EXTENDED_CAPS driver state. + */ + adapter->extended_caps = IAVF_EXTENDED_CAPS; + + iavf_change_state(adapter, __IAVF_INIT_EXTENDED_CAPS); + return; + +err_alloc: + kfree(adapter->vf_res); + adapter->vf_res = NULL; +err: + iavf_change_state(adapter, __IAVF_INIT_FAILED); +} + +/** + * iavf_init_send_offload_vlan_v2_caps - part of initializing VLAN V2 caps + * @adapter: board private structure + * + * Function processes send of the extended VLAN V2 capability message to the + * PF. Must clear IAVF_EXTENDED_CAP_RECV_VLAN_V2 if the message is not sent, + * e.g. due to PF not negotiating VIRTCHNL_VF_OFFLOAD_VLAN_V2. + */ +static void iavf_init_send_offload_vlan_v2_caps(struct iavf_adapter *adapter) +{ + int ret; + + WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2)); + + ret = iavf_send_vf_offload_vlan_v2_msg(adapter); + if (ret && ret == -EOPNOTSUPP) { + /* PF does not support VIRTCHNL_VF_OFFLOAD_V2. In this case, + * we did not send the capability exchange message and do not + * expect a response. + */ + adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2; + } + + /* We sent the message, so move on to the next step */ + adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_VLAN_V2; +} + +/** + * iavf_init_recv_offload_vlan_v2_caps - part of initializing VLAN V2 caps + * @adapter: board private structure + * + * Function processes receipt of the extended VLAN V2 capability message from + * the PF. + **/ +static void iavf_init_recv_offload_vlan_v2_caps(struct iavf_adapter *adapter) +{ + int ret; + + WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2)); + + memset(&adapter->vlan_v2_caps, 0, sizeof(adapter->vlan_v2_caps)); + + ret = iavf_get_vf_vlan_v2_caps(adapter); + if (ret) + goto err; + + /* We've processed receipt of the VLAN V2 caps message */ + adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2; + return; +err: + /* We didn't receive a reply. Make sure we try sending again when + * __IAVF_INIT_FAILED attempts to recover. + */ + adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_VLAN_V2; + iavf_change_state(adapter, __IAVF_INIT_FAILED); +} + +/** + * iavf_init_process_extended_caps - Part of driver startup + * @adapter: board private structure + * + * Function processes __IAVF_INIT_EXTENDED_CAPS driver state. This state + * handles negotiating capabilities for features which require an additional + * message. + * + * Once all extended capabilities exchanges are finished, the driver will + * transition into __IAVF_INIT_CONFIG_ADAPTER. + */ +static void iavf_init_process_extended_caps(struct iavf_adapter *adapter) +{ + WARN_ON(adapter->state != __IAVF_INIT_EXTENDED_CAPS); + + /* Process capability exchange for VLAN V2 */ + if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2) { + iavf_init_send_offload_vlan_v2_caps(adapter); + return; + } else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2) { + iavf_init_recv_offload_vlan_v2_caps(adapter); + return; + } + + /* When we reach here, no further extended capabilities exchanges are + * necessary, so we finally transition into __IAVF_INIT_CONFIG_ADAPTER + */ + iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER); +} + +/** + * iavf_init_config_adapter - last part of driver startup + * @adapter: board private structure + * + * After all the supported capabilities are negotiated, then the + * __IAVF_INIT_CONFIG_ADAPTER state will finish driver initialization. + */ +static void iavf_init_config_adapter(struct iavf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int err; + + WARN_ON(adapter->state != __IAVF_INIT_CONFIG_ADAPTER); + + if (iavf_process_config(adapter)) + goto err; + adapter->current_op = VIRTCHNL_OP_UNKNOWN; adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED; @@ -1806,10 +2601,12 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter) eth_hw_addr_random(netdev); ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); } else { - ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); + eth_hw_addr_set(netdev, adapter->hw.mac.addr); ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); } + adapter->flags |= IAVF_FLAG_INITIAL_MAC_SET; + adapter->tx_desc_count = IAVF_DEFAULT_TXD; adapter->rx_desc_count = IAVF_DEFAULT_RXD; err = iavf_init_interrupt_scheme(adapter); @@ -1844,17 +2641,15 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter) netif_tx_stop_all_queues(netdev); if (CLIENT_ALLOWED(adapter)) { err = iavf_lan_add_device(adapter); - if (err) { - rtnl_unlock(); + if (err) dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n", err); - } } dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr); if (netdev->features & NETIF_F_GRO) dev_info(&pdev->dev, "GRO is enabled\n"); - adapter->state = __IAVF_DOWN; + iavf_change_state(adapter, __IAVF_DOWN); set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); rtnl_unlock(); @@ -1863,25 +2658,28 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter) adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL); adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL); - if (!adapter->rss_key || !adapter->rss_lut) + if (!adapter->rss_key || !adapter->rss_lut) { + err = -ENOMEM; goto err_mem; + } if (RSS_AQ(adapter)) adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; else iavf_init_rss(adapter); - return err; + if (VLAN_V2_ALLOWED(adapter)) + /* request initial VLAN offload settings */ + iavf_set_vlan_offload_features(adapter, 0, netdev->features); + + return; err_mem: iavf_free_rss(adapter); err_register: iavf_free_misc_irq(adapter); err_sw_init: iavf_reset_interrupt_capability(adapter); -err_alloc: - kfree(adapter->vf_res); - adapter->vf_res = NULL; err: - return err; + iavf_change_state(adapter, __IAVF_INIT_FAILED); } /** @@ -1896,14 +2694,92 @@ static void iavf_watchdog_task(struct work_struct *work) struct iavf_hw *hw = &adapter->hw; u32 reg_val; - if (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section)) + if (!mutex_trylock(&adapter->crit_lock)) { + if (adapter->state == __IAVF_REMOVE) + return; + goto restart_watchdog; + } if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) - adapter->state = __IAVF_COMM_FAILED; + iavf_change_state(adapter, __IAVF_COMM_FAILED); + + if (adapter->flags & IAVF_FLAG_RESET_NEEDED) { + adapter->aq_required = 0; + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + mutex_unlock(&adapter->crit_lock); + queue_work(iavf_wq, &adapter->reset_task); + return; + } switch (adapter->state) { + case __IAVF_STARTUP: + iavf_startup(adapter); + mutex_unlock(&adapter->crit_lock); + queue_delayed_work(iavf_wq, &adapter->watchdog_task, + msecs_to_jiffies(30)); + return; + case __IAVF_INIT_VERSION_CHECK: + iavf_init_version_check(adapter); + mutex_unlock(&adapter->crit_lock); + queue_delayed_work(iavf_wq, &adapter->watchdog_task, + msecs_to_jiffies(30)); + return; + case __IAVF_INIT_GET_RESOURCES: + iavf_init_get_resources(adapter); + mutex_unlock(&adapter->crit_lock); + queue_delayed_work(iavf_wq, &adapter->watchdog_task, + msecs_to_jiffies(1)); + return; + case __IAVF_INIT_EXTENDED_CAPS: + iavf_init_process_extended_caps(adapter); + mutex_unlock(&adapter->crit_lock); + queue_delayed_work(iavf_wq, &adapter->watchdog_task, + msecs_to_jiffies(1)); + return; + case __IAVF_INIT_CONFIG_ADAPTER: + iavf_init_config_adapter(adapter); + mutex_unlock(&adapter->crit_lock); + queue_delayed_work(iavf_wq, &adapter->watchdog_task, + msecs_to_jiffies(1)); + return; + case __IAVF_INIT_FAILED: + if (test_bit(__IAVF_IN_REMOVE_TASK, + &adapter->crit_section)) { + /* Do not update the state and do not reschedule + * watchdog task, iavf_remove should handle this state + * as it can loop forever + */ + mutex_unlock(&adapter->crit_lock); + return; + } + if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) { + dev_err(&adapter->pdev->dev, + "Failed to communicate with PF; waiting before retry\n"); + adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; + iavf_shutdown_adminq(hw); + mutex_unlock(&adapter->crit_lock); + queue_delayed_work(iavf_wq, + &adapter->watchdog_task, (5 * HZ)); + return; + } + /* Try again from failed step*/ + iavf_change_state(adapter, adapter->last_state); + mutex_unlock(&adapter->crit_lock); + queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ); + return; case __IAVF_COMM_FAILED: + if (test_bit(__IAVF_IN_REMOVE_TASK, + &adapter->crit_section)) { + /* Set state to __IAVF_INIT_FAILED and perform remove + * steps. Remove IAVF_FLAG_PF_COMMS_FAILED so the task + * doesn't bring the state back to __IAVF_COMM_FAILED. + */ + iavf_change_state(adapter, __IAVF_INIT_FAILED); + adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; + mutex_unlock(&adapter->crit_lock); + return; + } reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & IAVF_VFGEN_RSTAT_VFR_STATE_MASK; if (reg_val == VIRTCHNL_VFR_VFACTIVE || @@ -1911,28 +2787,22 @@ static void iavf_watchdog_task(struct work_struct *work) /* A chance for redemption! */ dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n"); - adapter->state = __IAVF_STARTUP; - adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; - queue_delayed_work(iavf_wq, &adapter->init_task, 10); - clear_bit(__IAVF_IN_CRITICAL_TASK, - &adapter->crit_section); - /* Don't reschedule the watchdog, since we've restarted - * the init task. When init_task contacts the PF and + /* When init task contacts the PF and * gets everything set up again, it'll restart the * watchdog for us. Down, boy. Sit. Stay. Woof. */ - return; + iavf_change_state(adapter, __IAVF_STARTUP); + adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; } adapter->aq_required = 0; adapter->current_op = VIRTCHNL_OP_UNKNOWN; - clear_bit(__IAVF_IN_CRITICAL_TASK, - &adapter->crit_section); + mutex_unlock(&adapter->crit_lock); queue_delayed_work(iavf_wq, &adapter->watchdog_task, msecs_to_jiffies(10)); - goto watchdog_done; + return; case __IAVF_RESETTING: - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + mutex_unlock(&adapter->crit_lock); queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); return; case __IAVF_DOWN: @@ -1946,45 +2816,58 @@ static void iavf_watchdog_task(struct work_struct *work) iavf_send_api_ver(adapter); } } else { - if (!iavf_process_aq_command(adapter) && + int ret = iavf_process_aq_command(adapter); + + /* An error will be returned if no commands were + * processed; use this opportunity to update stats + * if the error isn't -ENOTSUPP + */ + if (ret && ret != -EOPNOTSUPP && adapter->state == __IAVF_RUNNING) iavf_request_stats(adapter); } + if (adapter->state == __IAVF_RUNNING) + iavf_detect_recover_hung(&adapter->vsi); break; case __IAVF_REMOVE: - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); - return; default: - goto restart_watchdog; + mutex_unlock(&adapter->crit_lock); + return; } - /* check for hw reset */ + /* check for hw reset */ reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK; if (!reg_val) { - adapter->state = __IAVF_RESETTING; adapter->flags |= IAVF_FLAG_RESET_PENDING; adapter->aq_required = 0; adapter->current_op = VIRTCHNL_OP_UNKNOWN; dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); queue_work(iavf_wq, &adapter->reset_task); - goto watchdog_done; + mutex_unlock(&adapter->crit_lock); + queue_delayed_work(iavf_wq, + &adapter->watchdog_task, HZ * 2); + return; } schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5)); -watchdog_done: - if (adapter->state == __IAVF_RUNNING || - adapter->state == __IAVF_COMM_FAILED) - iavf_detect_recover_hung(&adapter->vsi); - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + mutex_unlock(&adapter->crit_lock); restart_watchdog: + if (adapter->state >= __IAVF_DOWN) + queue_work(iavf_wq, &adapter->adminq_task); if (adapter->aq_required) queue_delayed_work(iavf_wq, &adapter->watchdog_task, msecs_to_jiffies(20)); else queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); - queue_work(iavf_wq, &adapter->adminq_task); } +/** + * iavf_disable_vf - disable VF + * @adapter: board private structure + * + * Set communication failed flag and free all resources. + * NOTE: This function is expected to be called with crit_lock being held. + **/ static void iavf_disable_vf(struct iavf_adapter *adapter) { struct iavf_mac_filter *f, *ftmp; @@ -2034,20 +2917,17 @@ static void iavf_disable_vf(struct iavf_adapter *adapter) iavf_free_misc_irq(adapter); iavf_reset_interrupt_capability(adapter); - iavf_free_queues(adapter); iavf_free_q_vectors(adapter); - kfree(adapter->vf_res); + iavf_free_queues(adapter); + memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE); iavf_shutdown_adminq(&adapter->hw); adapter->netdev->flags &= ~IFF_UP; - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); adapter->flags &= ~IAVF_FLAG_RESET_PENDING; - adapter->state = __IAVF_DOWN; + iavf_change_state(adapter, __IAVF_DOWN); wake_up(&adapter->down_waitqueue); dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n"); } -#define IAVF_RESET_WAIT_MS 10 -#define IAVF_RESET_WAIT_COUNT 500 /** * iavf_reset_task - Call-back task to handle hardware reset * @work: pointer to work_struct @@ -2065,20 +2945,28 @@ static void iavf_reset_task(struct work_struct *work) struct net_device *netdev = adapter->netdev; struct iavf_hw *hw = &adapter->hw; struct iavf_mac_filter *f, *ftmp; - struct iavf_vlan_filter *vlf; struct iavf_cloud_filter *cf; + enum iavf_status status; u32 reg_val; int i = 0, err; bool running; + /* Detach interface to avoid subsequent NDO callbacks */ + rtnl_lock(); + netif_device_detach(netdev); + rtnl_unlock(); + /* When device is being removed it doesn't make sense to run the reset * task, just return in such a case. */ - if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) - return; + if (!mutex_trylock(&adapter->crit_lock)) { + if (adapter->state != __IAVF_REMOVE) + queue_work(iavf_wq, &adapter->reset_task); + + goto reset_finish; + } - while (test_and_set_bit(__IAVF_IN_CLIENT_TASK, - &adapter->crit_section)) + while (!mutex_trylock(&adapter->client_lock)) usleep_range(500, 1000); if (CLIENT_ENABLED(adapter)) { adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN | @@ -2101,20 +2989,20 @@ static void iavf_reset_task(struct work_struct *work) adapter->flags |= IAVF_FLAG_RESET_PENDING; /* poll until we see the reset actually happen */ - for (i = 0; i < IAVF_RESET_WAIT_COUNT; i++) { + for (i = 0; i < IAVF_RESET_WAIT_DETECTED_COUNT; i++) { reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK; if (!reg_val) break; usleep_range(5000, 10000); } - if (i == IAVF_RESET_WAIT_COUNT) { + if (i == IAVF_RESET_WAIT_DETECTED_COUNT) { dev_info(&adapter->pdev->dev, "Never saw reset\n"); goto continue_reset; /* act like the reset happened */ } /* wait until the reset is complete and the PF is responding to us */ - for (i = 0; i < IAVF_RESET_WAIT_COUNT; i++) { + for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) { /* sleep first to make sure a minimum wait time is met */ msleep(IAVF_RESET_WAIT_MS); @@ -2125,12 +3013,14 @@ static void iavf_reset_task(struct work_struct *work) } pci_set_master(adapter->pdev); + pci_restore_msi_state(adapter->pdev); - if (i == IAVF_RESET_WAIT_COUNT) { + if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) { dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", reg_val); iavf_disable_vf(adapter); - clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); + mutex_unlock(&adapter->client_lock); + mutex_unlock(&adapter->crit_lock); return; /* Do not attempt to reinit. It's dead, Jim. */ } @@ -2139,18 +3029,16 @@ continue_reset: * ndo_open() returning, so we can't assume it means all our open * tasks have finished, since we're not holding the rtnl_lock here. */ - running = ((adapter->state == __IAVF_RUNNING) || - (adapter->state == __IAVF_RESETTING)); + running = adapter->state == __IAVF_RUNNING; if (running) { netif_carrier_off(netdev); - netif_tx_stop_all_queues(netdev); adapter->link_up = false; iavf_napi_disable_all(adapter); } iavf_irq_disable(adapter); - adapter->state = __IAVF_RESETTING; + iavf_change_state(adapter, __IAVF_RESETTING); adapter->flags &= ~IAVF_FLAG_RESET_PENDING; /* free the Tx/Rx rings and descriptors, might be better to just @@ -2163,19 +3051,37 @@ continue_reset: /* kill and reinit the admin queue */ iavf_shutdown_adminq(hw); adapter->current_op = VIRTCHNL_OP_UNKNOWN; - err = iavf_init_adminq(hw); - if (err) + status = iavf_init_adminq(hw); + if (status) { dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n", - err); + status); + goto reset_err; + } adapter->aq_required = 0; - if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) { + if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) || + (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) { err = iavf_reinit_interrupt_scheme(adapter); if (err) goto reset_err; } + if (RSS_AQ(adapter)) { + adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; + } else { + err = iavf_init_rss(adapter); + if (err) + goto reset_err; + } + adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG; + /* always set since VIRTCHNL_OP_GET_VF_RESOURCES has not been + * sent/received yet, so VLAN_V2_ALLOWED() cannot is not reliable here, + * however the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS won't be sent until + * VIRTCHNL_OP_GET_VF_RESOURCES and VIRTCHNL_VF_OFFLOAD_VLAN_V2 have + * been successfully sent and negotiated + */ + adapter->aq_required |= IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS; adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; spin_lock_bh(&adapter->mac_vlan_list_lock); @@ -2194,11 +3100,6 @@ continue_reset: list_for_each_entry(f, &adapter->mac_filter_list, list) { f->add = true; } - /* re-add all VLAN filters */ - list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { - vlf->add = true; - } - spin_unlock_bh(&adapter->mac_vlan_list_lock); /* check if TCs are running and re-add all cloud filters */ @@ -2212,10 +3113,12 @@ continue_reset: spin_unlock_bh(&adapter->cloud_filter_list_lock); adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; - adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; iavf_misc_irq_enable(adapter); + bitmap_clear(adapter->vsi.active_cvlans, 0, VLAN_N_VID); + bitmap_clear(adapter->vsi.active_svlans, 0, VLAN_N_VID); + mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2); /* We were running when the reset started, so we need to restore some @@ -2232,32 +3135,48 @@ continue_reset: if (err) goto reset_err; - if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) { + if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) || + (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) { err = iavf_request_traffic_irqs(adapter, netdev->name); if (err) goto reset_err; - adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; + adapter->flags &= ~IAVF_FLAG_REINIT_MSIX_NEEDED; } iavf_configure(adapter); + /* iavf_up_complete() will switch device back + * to __IAVF_RUNNING + */ iavf_up_complete(adapter); iavf_irq_enable(adapter, true); } else { - adapter->state = __IAVF_DOWN; + iavf_change_state(adapter, __IAVF_DOWN); wake_up(&adapter->down_waitqueue); } - clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); - return; + adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; + + mutex_unlock(&adapter->client_lock); + mutex_unlock(&adapter->crit_lock); + + goto reset_finish; reset_err: - clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + if (running) { + set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); + iavf_free_traffic_irqs(adapter); + } + iavf_disable_vf(adapter); + + mutex_unlock(&adapter->client_lock); + mutex_unlock(&adapter->crit_lock); dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); - iavf_close(netdev); +reset_finish: + rtnl_lock(); + netif_device_attach(netdev); + rtnl_unlock(); } /** @@ -2278,6 +3197,14 @@ static void iavf_adminq_task(struct work_struct *work) if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) goto out; + if (!mutex_trylock(&adapter->crit_lock)) { + if (adapter->state == __IAVF_REMOVE) + return; + + queue_work(iavf_wq, &adapter->adminq_task); + goto out; + } + event.buf_len = IAVF_MAX_AQ_BUF_SIZE; event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); if (!event.msg_buf) @@ -2296,7 +3223,26 @@ static void iavf_adminq_task(struct work_struct *work) if (pending != 0) memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE); } while (pending); + mutex_unlock(&adapter->crit_lock); + + if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES)) { + if (adapter->netdev_registered || + !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) { + struct net_device *netdev = adapter->netdev; + + rtnl_lock(); + netdev_update_features(netdev); + rtnl_unlock(); + /* Request VLAN offload settings */ + if (VLAN_V2_ALLOWED(adapter)) + iavf_set_vlan_offload_features + (adapter, 0, netdev->features); + + iavf_set_queue_vlan_tag_loc(adapter); + } + adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES; + } if ((adapter->flags & (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) || adapter->state == __IAVF_RESETTING) @@ -2304,7 +3250,7 @@ static void iavf_adminq_task(struct work_struct *work) /* check for error indications */ val = rd32(hw, hw->aq.arq.len); - if (val == 0xdeadbeef) /* indicates device in reset */ + if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */ goto freedom; oldval = val; if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) { @@ -2362,7 +3308,7 @@ static void iavf_client_task(struct work_struct *work) * later. */ - if (test_and_set_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section)) + if (!mutex_trylock(&adapter->client_lock)) return; if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) { @@ -2385,7 +3331,7 @@ static void iavf_client_task(struct work_struct *work) adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN; } out: - clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); + mutex_unlock(&adapter->client_lock); } /** @@ -2487,29 +3433,46 @@ static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter, { int speed = 0, ret = 0; + if (ADV_LINK_SUPPORT(adapter)) { + if (adapter->link_speed_mbps < U32_MAX) { + speed = adapter->link_speed_mbps; + goto validate_bw; + } else { + dev_err(&adapter->pdev->dev, "Unknown link speed\n"); + return -EINVAL; + } + } + switch (adapter->link_speed) { - case IAVF_LINK_SPEED_40GB: - speed = 40000; + case VIRTCHNL_LINK_SPEED_40GB: + speed = SPEED_40000; break; - case IAVF_LINK_SPEED_25GB: - speed = 25000; + case VIRTCHNL_LINK_SPEED_25GB: + speed = SPEED_25000; break; - case IAVF_LINK_SPEED_20GB: - speed = 20000; + case VIRTCHNL_LINK_SPEED_20GB: + speed = SPEED_20000; break; - case IAVF_LINK_SPEED_10GB: - speed = 10000; + case VIRTCHNL_LINK_SPEED_10GB: + speed = SPEED_10000; break; - case IAVF_LINK_SPEED_1GB: - speed = 1000; + case VIRTCHNL_LINK_SPEED_5GB: + speed = SPEED_5000; break; - case IAVF_LINK_SPEED_100MB: - speed = 100; + case VIRTCHNL_LINK_SPEED_2_5GB: + speed = SPEED_2500; + break; + case VIRTCHNL_LINK_SPEED_1GB: + speed = SPEED_1000; + break; + case VIRTCHNL_LINK_SPEED_100MB: + speed = SPEED_100; break; default: break; } +validate_bw: if (max_tx_rate > speed) { dev_err(&adapter->pdev->dev, "Invalid tx rate specified\n"); @@ -2520,7 +3483,7 @@ static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter, } /** - * iavf_validate_channel_config - validate queue mapping info + * iavf_validate_ch_config - validate queue mapping info * @adapter: board private structure * @mqprio_qopt: queue parameters * @@ -2532,6 +3495,7 @@ static int iavf_validate_ch_config(struct iavf_adapter *adapter, struct tc_mqprio_qopt_offload *mqprio_qopt) { u64 total_max_rate = 0; + u32 tx_rate_rem = 0; int i, num_qps = 0; u64 tx_rate = 0; int ret = 0; @@ -2546,25 +3510,48 @@ static int iavf_validate_ch_config(struct iavf_adapter *adapter, return -EINVAL; if (mqprio_qopt->min_rate[i]) { dev_err(&adapter->pdev->dev, - "Invalid min tx rate (greater than 0) specified\n"); + "Invalid min tx rate (greater than 0) specified for TC%d\n", + i); return -EINVAL; } - /*convert to Mbps */ + + /* convert to Mbps */ tx_rate = div_u64(mqprio_qopt->max_rate[i], IAVF_MBPS_DIVISOR); + + if (mqprio_qopt->max_rate[i] && + tx_rate < IAVF_MBPS_QUANTA) { + dev_err(&adapter->pdev->dev, + "Invalid max tx rate for TC%d, minimum %dMbps\n", + i, IAVF_MBPS_QUANTA); + return -EINVAL; + } + + (void)div_u64_rem(tx_rate, IAVF_MBPS_QUANTA, &tx_rate_rem); + + if (tx_rate_rem != 0) { + dev_err(&adapter->pdev->dev, + "Invalid max tx rate for TC%d, not divisible by %d\n", + i, IAVF_MBPS_QUANTA); + return -EINVAL; + } + total_max_rate += tx_rate; num_qps += mqprio_qopt->qopt.count[i]; } - if (num_qps > IAVF_MAX_REQ_QUEUES) + if (num_qps > adapter->num_active_queues) { + dev_err(&adapter->pdev->dev, + "Cannot support requested number of queues\n"); return -EINVAL; + } ret = iavf_validate_tx_bandwidth(adapter, total_max_rate); return ret; } /** - * iavf_del_all_cloud_filters - delete all cloud filters - * on the traffic classes + * iavf_del_all_cloud_filters - delete all cloud filters on the traffic classes + * @adapter: board private structure **/ static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter) { @@ -2583,7 +3570,7 @@ static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter) /** * __iavf_setup_tc - configure multiple traffic classes * @netdev: network interface device structure - * @type_date: tc offload data + * @type_data: tc offload data * * This function processes the config information provided by the * user to configure traffic classes/queue channels and packages the @@ -2615,6 +3602,7 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data) netif_tx_disable(netdev); iavf_del_all_cloud_filters(adapter); adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS; + total_qps = adapter->orig_num_active_queues; goto exit; } else { return -EINVAL; @@ -2658,7 +3646,21 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data) adapter->ch_config.ch_info[i].offset = 0; } } + + /* Take snapshot of original config such as "num_active_queues" + * It is used later when delete ADQ flow is exercised, so that + * once delete ADQ flow completes, VF shall go back to its + * original queue configuration + */ + + adapter->orig_num_active_queues = adapter->num_active_queues; + + /* Store queue info based on TC so that VF gets configured + * with correct number of queues when VF completes ADQ config + * flow + */ adapter->ch_config.total_qps = total_qps; + netif_tx_stop_all_queues(netdev); netif_tx_disable(netdev); adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS; @@ -2675,13 +3677,19 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data) } } exit: + if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) + return 0; + + netif_set_real_num_rx_queues(netdev, total_qps); + netif_set_real_num_tx_queues(netdev, total_qps); + return ret; } /** * iavf_parse_cls_flower - Parse tc flower filters provided by kernel * @adapter: board private structure - * @cls_flower: pointer to struct flow_cls_offload + * @f: pointer to struct flow_cls_offload * @filter: pointer to cloud filter structure */ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, @@ -2757,7 +3765,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, } else { dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n", match.mask->dst); - return IAVF_ERR_CONFIG; + return -EINVAL; } } @@ -2767,7 +3775,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, } else { dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n", match.mask->src); - return IAVF_ERR_CONFIG; + return -EINVAL; } } @@ -2802,7 +3810,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, } else { dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n", match.mask->vlan_id); - return IAVF_ERR_CONFIG; + return -EINVAL; } } vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff); @@ -2826,7 +3834,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, } else { dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n", be32_to_cpu(match.mask->dst)); - return IAVF_ERR_CONFIG; + return -EINVAL; } } @@ -2836,13 +3844,13 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, } else { dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n", be32_to_cpu(match.mask->dst)); - return IAVF_ERR_CONFIG; + return -EINVAL; } } if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) { dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n"); - return IAVF_ERR_CONFIG; + return -EINVAL; } if (match.key->dst) { vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff); @@ -2863,7 +3871,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, if (ipv6_addr_any(&match.mask->dst)) { dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n", IPV6_ADDR_ANY); - return IAVF_ERR_CONFIG; + return -EINVAL; } /* src and dest IPv6 address should not be LOOPBACK @@ -2873,7 +3881,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, ipv6_addr_loopback(&match.key->src)) { dev_err(&adapter->pdev->dev, "ipv6 addr should not be loopback\n"); - return IAVF_ERR_CONFIG; + return -EINVAL; } if (!ipv6_addr_any(&match.mask->dst) || !ipv6_addr_any(&match.mask->src)) @@ -2898,7 +3906,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, } else { dev_err(&adapter->pdev->dev, "Bad src port mask %u\n", be16_to_cpu(match.mask->src)); - return IAVF_ERR_CONFIG; + return -EINVAL; } } @@ -2908,7 +3916,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, } else { dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n", be16_to_cpu(match.mask->dst)); - return IAVF_ERR_CONFIG; + return -EINVAL; } } if (match.key->dst) { @@ -2951,6 +3959,29 @@ static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc, } /** + * iavf_find_cf - Find the cloud filter in the list + * @adapter: Board private structure + * @cookie: filter specific cookie + * + * Returns ptr to the filter object or NULL. Must be called while holding the + * cloud_filter_list_lock. + */ +static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter, + unsigned long *cookie) +{ + struct iavf_cloud_filter *filter = NULL; + + if (!cookie) + return NULL; + + list_for_each_entry(filter, &adapter->cloud_filter_list, list) { + if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie))) + return filter; + } + return NULL; +} + +/** * iavf_configure_clsflower - Add tc flower filters * @adapter: board private structure * @cls_flower: Pointer to struct flow_cls_offload @@ -2971,25 +4002,35 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter, if (!filter) return -ENOMEM; - while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, - &adapter->crit_section)) { - if (--count == 0) - goto err; + while (!mutex_trylock(&adapter->crit_lock)) { + if (--count == 0) { + kfree(filter); + return err; + } udelay(1); } filter->cookie = cls_flower->cookie; + /* bail out here if filter already exists */ + spin_lock_bh(&adapter->cloud_filter_list_lock); + if (iavf_find_cf(adapter, &cls_flower->cookie)) { + dev_err(&adapter->pdev->dev, "Failed to add TC Flower filter, it already exists\n"); + err = -EEXIST; + goto spin_unlock; + } + spin_unlock_bh(&adapter->cloud_filter_list_lock); + /* set the mask to all zeroes to begin with */ memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec)); /* start out with flow type and eth type IPv4 to begin with */ filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW; err = iavf_parse_cls_flower(adapter, cls_flower, filter); - if (err < 0) + if (err) goto err; err = iavf_handle_tclass(adapter, tc, filter); - if (err < 0) + if (err) goto err; /* add filter to the list */ @@ -2998,37 +4039,16 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter, adapter->num_cloud_filters++; filter->add = true; adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; +spin_unlock: spin_unlock_bh(&adapter->cloud_filter_list_lock); err: if (err) kfree(filter); - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + mutex_unlock(&adapter->crit_lock); return err; } -/* iavf_find_cf - Find the cloud filter in the list - * @adapter: Board private structure - * @cookie: filter specific cookie - * - * Returns ptr to the filter object or NULL. Must be called while holding the - * cloud_filter_list_lock. - */ -static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter, - unsigned long *cookie) -{ - struct iavf_cloud_filter *filter = NULL; - - if (!cookie) - return NULL; - - list_for_each_entry(filter, &adapter->cloud_filter_list, list) { - if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie))) - return filter; - } - return NULL; -} - /** * iavf_delete_clsflower - Remove tc flower filters * @adapter: board private structure @@ -3055,15 +4075,12 @@ static int iavf_delete_clsflower(struct iavf_adapter *adapter, /** * iavf_setup_tc_cls_flower - flower classifier offloads - * @netdev: net device to configure - * @type_data: offload data + * @adapter: board private structure + * @cls_flower: pointer to flow_cls_offload struct with flow info */ static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter, struct flow_cls_offload *cls_flower) { - if (cls_flower->common.chain_index) - return -EOPNOTSUPP; - switch (cls_flower->command) { case FLOW_CLS_REPLACE: return iavf_configure_clsflower(adapter, cls_flower); @@ -3087,6 +4104,11 @@ static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter, static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) { + struct iavf_adapter *adapter = cb_priv; + + if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data)) + return -EOPNOTSUPP; + switch (type) { case TC_SETUP_CLSFLOWER: return iavf_setup_tc_cls_flower(cb_priv, type_data); @@ -3101,7 +4123,7 @@ static LIST_HEAD(iavf_block_cb_list); * iavf_setup_tc - configure multiple traffic classes * @netdev: network interface device structure * @type: type of offload - * @type_date: tc offload data + * @type_data: tc offload data * * This function is the callback to ndo_setup_tc in the * netdev_ops. @@ -3148,15 +4170,30 @@ static int iavf_open(struct net_device *netdev) return -EIO; } - while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, - &adapter->crit_section)) + while (!mutex_trylock(&adapter->crit_lock)) { + /* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock + * is already taken and iavf_open is called from an upper + * device's notifier reacting on NETDEV_REGISTER event. + * We have to leave here to avoid dead lock. + */ + if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER) + return -EBUSY; + usleep_range(500, 1000); + } if (adapter->state != __IAVF_DOWN) { err = -EBUSY; goto err_unlock; } + if (adapter->state == __IAVF_RUNNING && + !test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) { + dev_dbg(&adapter->pdev->dev, "VF is already open.\n"); + err = 0; + goto err_unlock; + } + /* allocate transmit descriptors */ err = iavf_setup_all_tx_resources(adapter); if (err) @@ -3178,13 +4215,16 @@ static int iavf_open(struct net_device *netdev) spin_unlock_bh(&adapter->mac_vlan_list_lock); + /* Restore VLAN filters that were removed with IFF_DOWN */ + iavf_restore_filters(adapter); + iavf_configure(adapter); iavf_up_complete(adapter); iavf_irq_enable(adapter, true); - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + mutex_unlock(&adapter->crit_lock); return 0; @@ -3196,7 +4236,7 @@ err_setup_rx: err_setup_tx: iavf_free_all_tx_resources(adapter); err_unlock: - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + mutex_unlock(&adapter->crit_lock); return err; } @@ -3215,24 +4255,48 @@ err_unlock: static int iavf_close(struct net_device *netdev) { struct iavf_adapter *adapter = netdev_priv(netdev); + u64 aq_to_restore; int status; - if (adapter->state <= __IAVF_DOWN_PENDING) - return 0; + mutex_lock(&adapter->crit_lock); - while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, - &adapter->crit_section)) - usleep_range(500, 1000); + if (adapter->state <= __IAVF_DOWN_PENDING) { + mutex_unlock(&adapter->crit_lock); + return 0; + } set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); if (CLIENT_ENABLED(adapter)) adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE; + /* We cannot send IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS before + * IAVF_FLAG_AQ_DISABLE_QUEUES because in such case there is rtnl + * deadlock with adminq_task() until iavf_close timeouts. We must send + * IAVF_FLAG_AQ_GET_CONFIG before IAVF_FLAG_AQ_DISABLE_QUEUES to make + * disable queues possible for vf. Give only necessary flags to + * iavf_down and save other to set them right before iavf_close() + * returns, when IAVF_FLAG_AQ_DISABLE_QUEUES will be already sent and + * iavf will be in DOWN state. + */ + aq_to_restore = adapter->aq_required; + adapter->aq_required &= IAVF_FLAG_AQ_GET_CONFIG; + + /* Remove flags which we do not want to send after close or we want to + * send before disable queues. + */ + aq_to_restore &= ~(IAVF_FLAG_AQ_GET_CONFIG | + IAVF_FLAG_AQ_ENABLE_QUEUES | + IAVF_FLAG_AQ_CONFIGURE_QUEUES | + IAVF_FLAG_AQ_ADD_VLAN_FILTER | + IAVF_FLAG_AQ_ADD_MAC_FILTER | + IAVF_FLAG_AQ_ADD_CLOUD_FILTER | + IAVF_FLAG_AQ_ADD_FDIR_FILTER | + IAVF_FLAG_AQ_ADD_ADV_RSS_CFG); iavf_down(adapter); - adapter->state = __IAVF_DOWN_PENDING; + iavf_change_state(adapter, __IAVF_DOWN_PENDING); iavf_free_traffic_irqs(adapter); - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + mutex_unlock(&adapter->crit_lock); /* We explicitly don't free resources here because the hardware is * still active and can DMA into memory. Resources are cleared in @@ -3250,6 +4314,10 @@ static int iavf_close(struct net_device *netdev) msecs_to_jiffies(500)); if (!status) netdev_warn(netdev, "Device resources not yet released\n"); + + mutex_lock(&adapter->crit_lock); + adapter->aq_required |= aq_to_restore; + mutex_unlock(&adapter->crit_lock); return 0; } @@ -3264,17 +4332,27 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu) { struct iavf_adapter *adapter = netdev_priv(netdev); + netdev_dbg(netdev, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); netdev->mtu = new_mtu; if (CLIENT_ENABLED(adapter)) { iavf_notify_client_l2_params(&adapter->vsi); adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED; } - adapter->flags |= IAVF_FLAG_RESET_NEEDED; - queue_work(iavf_wq, &adapter->reset_task); + + if (netif_running(netdev)) { + adapter->flags |= IAVF_FLAG_RESET_NEEDED; + queue_work(iavf_wq, &adapter->reset_task); + } return 0; } +#define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \ + NETIF_F_HW_VLAN_CTAG_TX | \ + NETIF_F_HW_VLAN_STAG_RX | \ + NETIF_F_HW_VLAN_STAG_TX) + /** * iavf_set_features - set the netdev feature flags * @netdev: ptr to the netdev being adjusted @@ -3286,20 +4364,11 @@ static int iavf_set_features(struct net_device *netdev, { struct iavf_adapter *adapter = netdev_priv(netdev); - /* Don't allow changing VLAN_RX flag when adapter is not capable - * of VLAN offload - */ - if (!VLAN_ALLOWED(adapter)) { - if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) - return -EINVAL; - } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) { - if (features & NETIF_F_HW_VLAN_CTAG_RX) - adapter->aq_required |= - IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; - else - adapter->aq_required |= - IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; - } + /* trigger update on any VLAN feature change */ + if ((netdev->features & NETIF_VLAN_OFFLOAD_FEATURES) ^ + (features & NETIF_VLAN_OFFLOAD_FEATURES)) + iavf_set_vlan_offload_features(adapter, netdev->features, + features); return 0; } @@ -3353,7 +4422,7 @@ static netdev_features_t iavf_features_check(struct sk_buff *skb, } /* No need to validate L4LEN as TCP is the only protocol with a - * a flexible value and we support all possible values supported + * flexible value and we support all possible values supported * by TCP, which is at most 15 dwords */ @@ -3363,6 +4432,228 @@ out_err: } /** + * iavf_get_netdev_vlan_hw_features - get NETDEV VLAN features that can toggle on/off + * @adapter: board private structure + * + * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2 + * were negotiated determine the VLAN features that can be toggled on and off. + **/ +static netdev_features_t +iavf_get_netdev_vlan_hw_features(struct iavf_adapter *adapter) +{ + netdev_features_t hw_features = 0; + + if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags) + return hw_features; + + /* Enable VLAN features if supported */ + if (VLAN_ALLOWED(adapter)) { + hw_features |= (NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX); + } else if (VLAN_V2_ALLOWED(adapter)) { + struct virtchnl_vlan_caps *vlan_v2_caps = + &adapter->vlan_v2_caps; + struct virtchnl_vlan_supported_caps *stripping_support = + &vlan_v2_caps->offloads.stripping_support; + struct virtchnl_vlan_supported_caps *insertion_support = + &vlan_v2_caps->offloads.insertion_support; + + if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED && + stripping_support->outer & VIRTCHNL_VLAN_TOGGLE) { + if (stripping_support->outer & + VIRTCHNL_VLAN_ETHERTYPE_8100) + hw_features |= NETIF_F_HW_VLAN_CTAG_RX; + if (stripping_support->outer & + VIRTCHNL_VLAN_ETHERTYPE_88A8) + hw_features |= NETIF_F_HW_VLAN_STAG_RX; + } else if (stripping_support->inner != + VIRTCHNL_VLAN_UNSUPPORTED && + stripping_support->inner & VIRTCHNL_VLAN_TOGGLE) { + if (stripping_support->inner & + VIRTCHNL_VLAN_ETHERTYPE_8100) + hw_features |= NETIF_F_HW_VLAN_CTAG_RX; + } + + if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED && + insertion_support->outer & VIRTCHNL_VLAN_TOGGLE) { + if (insertion_support->outer & + VIRTCHNL_VLAN_ETHERTYPE_8100) + hw_features |= NETIF_F_HW_VLAN_CTAG_TX; + if (insertion_support->outer & + VIRTCHNL_VLAN_ETHERTYPE_88A8) + hw_features |= NETIF_F_HW_VLAN_STAG_TX; + } else if (insertion_support->inner && + insertion_support->inner & VIRTCHNL_VLAN_TOGGLE) { + if (insertion_support->inner & + VIRTCHNL_VLAN_ETHERTYPE_8100) + hw_features |= NETIF_F_HW_VLAN_CTAG_TX; + } + } + + return hw_features; +} + +/** + * iavf_get_netdev_vlan_features - get the enabled NETDEV VLAN fetures + * @adapter: board private structure + * + * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2 + * were negotiated determine the VLAN features that are enabled by default. + **/ +static netdev_features_t +iavf_get_netdev_vlan_features(struct iavf_adapter *adapter) +{ + netdev_features_t features = 0; + + if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags) + return features; + + if (VLAN_ALLOWED(adapter)) { + features |= NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX; + } else if (VLAN_V2_ALLOWED(adapter)) { + struct virtchnl_vlan_caps *vlan_v2_caps = + &adapter->vlan_v2_caps; + struct virtchnl_vlan_supported_caps *filtering_support = + &vlan_v2_caps->filtering.filtering_support; + struct virtchnl_vlan_supported_caps *stripping_support = + &vlan_v2_caps->offloads.stripping_support; + struct virtchnl_vlan_supported_caps *insertion_support = + &vlan_v2_caps->offloads.insertion_support; + u32 ethertype_init; + + /* give priority to outer stripping and don't support both outer + * and inner stripping + */ + ethertype_init = vlan_v2_caps->offloads.ethertype_init; + if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) { + if (stripping_support->outer & + VIRTCHNL_VLAN_ETHERTYPE_8100 && + ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100) + features |= NETIF_F_HW_VLAN_CTAG_RX; + else if (stripping_support->outer & + VIRTCHNL_VLAN_ETHERTYPE_88A8 && + ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8) + features |= NETIF_F_HW_VLAN_STAG_RX; + } else if (stripping_support->inner != + VIRTCHNL_VLAN_UNSUPPORTED) { + if (stripping_support->inner & + VIRTCHNL_VLAN_ETHERTYPE_8100 && + ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100) + features |= NETIF_F_HW_VLAN_CTAG_RX; + } + + /* give priority to outer insertion and don't support both outer + * and inner insertion + */ + if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) { + if (insertion_support->outer & + VIRTCHNL_VLAN_ETHERTYPE_8100 && + ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else if (insertion_support->outer & + VIRTCHNL_VLAN_ETHERTYPE_88A8 && + ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8) + features |= NETIF_F_HW_VLAN_STAG_TX; + } else if (insertion_support->inner != + VIRTCHNL_VLAN_UNSUPPORTED) { + if (insertion_support->inner & + VIRTCHNL_VLAN_ETHERTYPE_8100 && + ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100) + features |= NETIF_F_HW_VLAN_CTAG_TX; + } + + /* give priority to outer filtering and don't bother if both + * outer and inner filtering are enabled + */ + ethertype_init = vlan_v2_caps->filtering.ethertype_init; + if (filtering_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) { + if (filtering_support->outer & + VIRTCHNL_VLAN_ETHERTYPE_8100 && + ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100) + features |= NETIF_F_HW_VLAN_CTAG_FILTER; + if (filtering_support->outer & + VIRTCHNL_VLAN_ETHERTYPE_88A8 && + ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8) + features |= NETIF_F_HW_VLAN_STAG_FILTER; + } else if (filtering_support->inner != + VIRTCHNL_VLAN_UNSUPPORTED) { + if (filtering_support->inner & + VIRTCHNL_VLAN_ETHERTYPE_8100 && + ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100) + features |= NETIF_F_HW_VLAN_CTAG_FILTER; + if (filtering_support->inner & + VIRTCHNL_VLAN_ETHERTYPE_88A8 && + ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8) + features |= NETIF_F_HW_VLAN_STAG_FILTER; + } + } + + return features; +} + +#define IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested, allowed, feature_bit) \ + (!(((requested) & (feature_bit)) && \ + !((allowed) & (feature_bit)))) + +/** + * iavf_fix_netdev_vlan_features - fix NETDEV VLAN features based on support + * @adapter: board private structure + * @requested_features: stack requested NETDEV features + **/ +static netdev_features_t +iavf_fix_netdev_vlan_features(struct iavf_adapter *adapter, + netdev_features_t requested_features) +{ + netdev_features_t allowed_features; + + allowed_features = iavf_get_netdev_vlan_hw_features(adapter) | + iavf_get_netdev_vlan_features(adapter); + + if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features, + allowed_features, + NETIF_F_HW_VLAN_CTAG_TX)) + requested_features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features, + allowed_features, + NETIF_F_HW_VLAN_CTAG_RX)) + requested_features &= ~NETIF_F_HW_VLAN_CTAG_RX; + + if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features, + allowed_features, + NETIF_F_HW_VLAN_STAG_TX)) + requested_features &= ~NETIF_F_HW_VLAN_STAG_TX; + if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features, + allowed_features, + NETIF_F_HW_VLAN_STAG_RX)) + requested_features &= ~NETIF_F_HW_VLAN_STAG_RX; + + if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features, + allowed_features, + NETIF_F_HW_VLAN_CTAG_FILTER)) + requested_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + + if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features, + allowed_features, + NETIF_F_HW_VLAN_STAG_FILTER)) + requested_features &= ~NETIF_F_HW_VLAN_STAG_FILTER; + + if ((requested_features & + (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) && + (requested_features & + (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) && + adapter->vlan_v2_caps.offloads.ethertype_match == + VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION) { + netdev_warn(adapter->netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n"); + requested_features &= ~(NETIF_F_HW_VLAN_STAG_RX | + NETIF_F_HW_VLAN_STAG_TX); + } + + return requested_features; +} + +/** * iavf_fix_features - fix up the netdev feature bits * @netdev: our net device * @features: desired feature bits @@ -3374,12 +4665,7 @@ static netdev_features_t iavf_fix_features(struct net_device *netdev, { struct iavf_adapter *adapter = netdev_priv(netdev); - if (!(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) - features &= ~(NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER); - - return features; + return iavf_fix_netdev_vlan_features(adapter, features); } static const struct net_device_ops iavf_netdev_ops = { @@ -3410,7 +4696,7 @@ static int iavf_check_reset_complete(struct iavf_hw *hw) u32 rstat; int i; - for (i = 0; i < 100; i++) { + for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) { rstat = rd32(hw, IAVF_VFGEN_RSTAT) & IAVF_VFGEN_RSTAT_VFR_STATE_MASK; if ((rstat == VIRTCHNL_VFR_VFACTIVE) || @@ -3431,39 +4717,11 @@ static int iavf_check_reset_complete(struct iavf_hw *hw) int iavf_process_config(struct iavf_adapter *adapter) { struct virtchnl_vf_resource *vfres = adapter->vf_res; - int i, num_req_queues = adapter->num_req_queues; + netdev_features_t hw_vlan_features, vlan_features; struct net_device *netdev = adapter->netdev; - struct iavf_vsi *vsi = &adapter->vsi; netdev_features_t hw_enc_features; netdev_features_t hw_features; - /* got VF config message back from PF, now we can parse it */ - for (i = 0; i < vfres->num_vsis; i++) { - if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV) - adapter->vsi_res = &vfres->vsi_res[i]; - } - if (!adapter->vsi_res) { - dev_err(&adapter->pdev->dev, "No LAN VSI found\n"); - return -ENODEV; - } - - if (num_req_queues && - num_req_queues != adapter->vsi_res->num_queue_pairs) { - /* Problem. The PF gave us fewer queues than what we had - * negotiated in our request. Need a reset to see if we can't - * get back to a working state. - */ - dev_err(&adapter->pdev->dev, - "Requested %d queues, but PF only gave us %d.\n", - num_req_queues, - adapter->vsi_res->num_queue_pairs); - adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; - adapter->num_req_queues = adapter->vsi_res->num_queue_pairs; - iavf_schedule_reset(adapter); - return -ENODEV; - } - adapter->num_req_queues = 0; - hw_enc_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | @@ -3507,17 +4765,19 @@ int iavf_process_config(struct iavf_adapter *adapter) */ hw_features = hw_enc_features; - /* Enable VLAN features if supported */ - if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) - hw_features |= (NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX); + /* get HW VLAN features that can be toggled */ + hw_vlan_features = iavf_get_netdev_vlan_hw_features(adapter); + /* Enable cloud filter if ADQ is supported */ if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) hw_features |= NETIF_F_HW_TC; + if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO) + hw_features |= NETIF_F_GSO_UDP_L4; - netdev->hw_features |= hw_features; + netdev->hw_features |= hw_features | hw_vlan_features; + vlan_features = iavf_get_netdev_vlan_features(adapter); - netdev->features |= hw_features; + netdev->features |= hw_features | vlan_features; if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; @@ -3542,93 +4802,29 @@ int iavf_process_config(struct iavf_adapter *adapter) netdev->features &= ~NETIF_F_GSO; } - adapter->vsi.id = adapter->vsi_res->vsi_id; - - adapter->vsi.back = adapter; - adapter->vsi.base_vector = 1; - adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK; - vsi->netdev = adapter->netdev; - vsi->qs_handle = adapter->vsi_res->qset_handle; - if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { - adapter->rss_key_size = vfres->rss_key_size; - adapter->rss_lut_size = vfres->rss_lut_size; - } else { - adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE; - adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE; - } - return 0; } /** - * iavf_init_task - worker thread to perform delayed initialization - * @work: pointer to work_struct containing our data - * - * This task completes the work that was begun in probe. Due to the nature - * of VF-PF communications, we may need to wait tens of milliseconds to get - * responses back from the PF. Rather than busy-wait in probe and bog down the - * whole system, we'll do it in a task so we can sleep. - * This task only runs during driver init. Once we've established - * communications with the PF driver and set up our netdev, the watchdog - * takes over. - **/ -static void iavf_init_task(struct work_struct *work) -{ - struct iavf_adapter *adapter = container_of(work, - struct iavf_adapter, - init_task.work); - struct iavf_hw *hw = &adapter->hw; - - switch (adapter->state) { - case __IAVF_STARTUP: - if (iavf_startup(adapter) < 0) - goto init_failed; - break; - case __IAVF_INIT_VERSION_CHECK: - if (iavf_init_version_check(adapter) < 0) - goto init_failed; - break; - case __IAVF_INIT_GET_RESOURCES: - if (iavf_init_get_resources(adapter) < 0) - goto init_failed; - return; - default: - goto init_failed; - } - - queue_delayed_work(iavf_wq, &adapter->init_task, - msecs_to_jiffies(30)); - return; -init_failed: - if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) { - dev_err(&adapter->pdev->dev, - "Failed to communicate with PF; waiting before retry\n"); - adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; - iavf_shutdown_adminq(hw); - adapter->state = __IAVF_STARTUP; - queue_delayed_work(iavf_wq, &adapter->init_task, HZ * 5); - return; - } - queue_delayed_work(iavf_wq, &adapter->init_task, HZ); -} - -/** * iavf_shutdown - Shutdown the device in preparation for a reboot * @pdev: pci device structure **/ static void iavf_shutdown(struct pci_dev *pdev) { - struct net_device *netdev = pci_get_drvdata(pdev); - struct iavf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev); + struct net_device *netdev = adapter->netdev; netif_device_detach(netdev); if (netif_running(netdev)) iavf_close(netdev); + if (iavf_lock_timeout(&adapter->crit_lock, 5000)) + dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__); /* Prevent the watchdog from running. */ - adapter->state = __IAVF_REMOVE; + iavf_change_state(adapter, __IAVF_REMOVE); adapter->aq_required = 0; + mutex_unlock(&adapter->crit_lock); #ifdef CONFIG_PM pci_save_state(pdev); @@ -3661,12 +4857,9 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err) { - err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, - "DMA configuration failed: 0x%x\n", err); - goto err_dma; - } + dev_err(&pdev->dev, + "DMA configuration failed: 0x%x\n", err); + goto err_dma; } err = pci_request_regions(pdev, iavf_driver_name); @@ -3699,7 +4892,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->back = adapter; adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1; - adapter->state = __IAVF_STARTUP; + iavf_change_state(adapter, __IAVF_STARTUP); /* Call save state here because it relies on the adapter struct. */ pci_save_state(pdev); @@ -3722,32 +4915,41 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* set up the locks for the AQ, do this only once in probe * and destroy them only once in remove */ + mutex_init(&adapter->crit_lock); + mutex_init(&adapter->client_lock); mutex_init(&hw->aq.asq_mutex); mutex_init(&hw->aq.arq_mutex); spin_lock_init(&adapter->mac_vlan_list_lock); spin_lock_init(&adapter->cloud_filter_list_lock); + spin_lock_init(&adapter->fdir_fltr_lock); + spin_lock_init(&adapter->adv_rss_lock); INIT_LIST_HEAD(&adapter->mac_filter_list); INIT_LIST_HEAD(&adapter->vlan_filter_list); INIT_LIST_HEAD(&adapter->cloud_filter_list); + INIT_LIST_HEAD(&adapter->fdir_list_head); + INIT_LIST_HEAD(&adapter->adv_rss_list_head); INIT_WORK(&adapter->reset_task, iavf_reset_task); INIT_WORK(&adapter->adminq_task, iavf_adminq_task); INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task); INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task); - INIT_DELAYED_WORK(&adapter->init_task, iavf_init_task); - queue_delayed_work(iavf_wq, &adapter->init_task, + queue_delayed_work(iavf_wq, &adapter->watchdog_task, msecs_to_jiffies(5 * (pdev->devfn & 0x07))); /* Setup the wait queue for indicating transition to down status */ init_waitqueue_head(&adapter->down_waitqueue); + /* Setup the wait queue for indicating virtchannel events */ + init_waitqueue_head(&adapter->vc_waitqueue); + return 0; err_ioremap: free_netdev(netdev); err_alloc_etherdev: + pci_disable_pcie_error_reporting(pdev); pci_release_regions(pdev); err_pci_reg: err_dma: @@ -3755,24 +4957,20 @@ err_dma: return err; } -#ifdef CONFIG_PM /** * iavf_suspend - Power management suspend routine - * @pdev: PCI device information struct - * @state: unused + * @dev_d: device info pointer * * Called when the system (VM) is entering sleep/suspend. **/ -static int iavf_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused iavf_suspend(struct device *dev_d) { - struct net_device *netdev = pci_get_drvdata(pdev); + struct net_device *netdev = dev_get_drvdata(dev_d); struct iavf_adapter *adapter = netdev_priv(netdev); - int retval = 0; netif_device_detach(netdev); - while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, - &adapter->crit_section)) + while (!mutex_trylock(&adapter->crit_lock)) usleep_range(500, 1000); if (netif_running(netdev)) { @@ -3783,41 +4981,25 @@ static int iavf_suspend(struct pci_dev *pdev, pm_message_t state) iavf_free_misc_irq(adapter); iavf_reset_interrupt_capability(adapter); - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); - - retval = pci_save_state(pdev); - if (retval) - return retval; - - pci_disable_device(pdev); + mutex_unlock(&adapter->crit_lock); return 0; } /** * iavf_resume - Power management resume routine - * @pdev: PCI device information struct + * @dev_d: device info pointer * * Called when the system (VM) is resumed from sleep/suspend. **/ -static int iavf_resume(struct pci_dev *pdev) +static int __maybe_unused iavf_resume(struct device *dev_d) { - struct iavf_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = to_pci_dev(dev_d); + struct iavf_adapter *adapter; u32 err; - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - /* pci_restore_state clears dev->state_saved so call - * pci_save_state to restore it. - */ - pci_save_state(pdev); + adapter = iavf_pdev_to_adapter(pdev); - err = pci_enable_device_mem(pdev); - if (err) { - dev_err(&pdev->dev, "Cannot enable PCI device from suspend.\n"); - return err; - } pci_set_master(pdev); rtnl_lock(); @@ -3836,12 +5018,11 @@ static int iavf_resume(struct pci_dev *pdev) queue_work(iavf_wq, &adapter->reset_task); - netif_device_attach(netdev); + netif_device_attach(adapter->netdev); return err; } -#endif /* CONFIG_PM */ /** * iavf_remove - Device Removal Routine * @pdev: PCI device information struct @@ -3853,21 +5034,46 @@ static int iavf_resume(struct pci_dev *pdev) **/ static void iavf_remove(struct pci_dev *pdev) { - struct net_device *netdev = pci_get_drvdata(pdev); - struct iavf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev); + struct net_device *netdev = adapter->netdev; + struct iavf_fdir_fltr *fdir, *fdirtmp; struct iavf_vlan_filter *vlf, *vlftmp; + struct iavf_adv_rss *rss, *rsstmp; struct iavf_mac_filter *f, *ftmp; struct iavf_cloud_filter *cf, *cftmp; struct iavf_hw *hw = &adapter->hw; int err; - /* Indicate we are in remove and not to run reset_task */ + + /* When reboot/shutdown is in progress no need to do anything + * as the adapter is already REMOVE state that was set during + * iavf_shutdown() callback. + */ + if (adapter->state == __IAVF_REMOVE) + return; + set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section); - cancel_delayed_work_sync(&adapter->init_task); - cancel_work_sync(&adapter->reset_task); - cancel_delayed_work_sync(&adapter->client_task); + /* Wait until port initialization is complete. + * There are flows where register/unregister netdev may race. + */ + while (1) { + mutex_lock(&adapter->crit_lock); + if (adapter->state == __IAVF_RUNNING || + adapter->state == __IAVF_DOWN || + adapter->state == __IAVF_INIT_FAILED) { + mutex_unlock(&adapter->crit_lock); + break; + } + + mutex_unlock(&adapter->crit_lock); + usleep_range(500, 1000); + } + cancel_delayed_work_sync(&adapter->watchdog_task); + if (adapter->netdev_registered) { - unregister_netdev(netdev); + rtnl_lock(); + unregister_netdevice(netdev); adapter->netdev_registered = false; + rtnl_unlock(); } if (CLIENT_ALLOWED(adapter)) { err = iavf_lan_del_device(adapter); @@ -3876,10 +5082,10 @@ static void iavf_remove(struct pci_dev *pdev) err); } - /* Shut down all the garbage mashers on the detention level */ - adapter->state = __IAVF_REMOVE; - adapter->aq_required = 0; - adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; + mutex_lock(&adapter->crit_lock); + dev_info(&adapter->pdev->dev, "Remove device\n"); + iavf_change_state(adapter, __IAVF_REMOVE); + iavf_request_reset(adapter); msleep(50); /* If the FW isn't responding, kick it once, but only once. */ @@ -3887,17 +5093,24 @@ static void iavf_remove(struct pci_dev *pdev) iavf_request_reset(adapter); msleep(50); } + + iavf_misc_irq_disable(adapter); + /* Shut down all the garbage mashers on the detention level */ + cancel_work_sync(&adapter->reset_task); + cancel_delayed_work_sync(&adapter->watchdog_task); + cancel_work_sync(&adapter->adminq_task); + cancel_delayed_work_sync(&adapter->client_task); + + adapter->aq_required = 0; + adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; + iavf_free_all_tx_resources(adapter); iavf_free_all_rx_resources(adapter); - iavf_misc_irq_disable(adapter); iavf_free_misc_irq(adapter); + iavf_reset_interrupt_capability(adapter); iavf_free_q_vectors(adapter); - cancel_delayed_work_sync(&adapter->watchdog_task); - - cancel_work_sync(&adapter->adminq_task); - iavf_free_rss(adapter); if (hw->aq.asq.count) @@ -3906,11 +5119,12 @@ static void iavf_remove(struct pci_dev *pdev) /* destroy the locks only once, here */ mutex_destroy(&hw->aq.arq_mutex); mutex_destroy(&hw->aq.asq_mutex); + mutex_destroy(&adapter->client_lock); + mutex_unlock(&adapter->crit_lock); + mutex_destroy(&adapter->crit_lock); iounmap(hw->hw_addr); pci_release_regions(pdev); - iavf_free_all_tx_resources(adapter); - iavf_free_all_rx_resources(adapter); iavf_free_queues(adapter); kfree(adapter->vf_res); spin_lock_bh(&adapter->mac_vlan_list_lock); @@ -3936,6 +5150,21 @@ static void iavf_remove(struct pci_dev *pdev) } spin_unlock_bh(&adapter->cloud_filter_list_lock); + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, list) { + list_del(&fdir->list); + kfree(fdir); + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + + spin_lock_bh(&adapter->adv_rss_lock); + list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head, + list) { + list_del(&rss->list); + kfree(rss); + } + spin_unlock_bh(&adapter->adv_rss_lock); + free_netdev(netdev); pci_disable_pcie_error_reporting(pdev); @@ -3943,16 +5172,15 @@ static void iavf_remove(struct pci_dev *pdev) pci_disable_device(pdev); } +static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume); + static struct pci_driver iavf_driver = { - .name = iavf_driver_name, - .id_table = iavf_pci_tbl, - .probe = iavf_probe, - .remove = iavf_remove, -#ifdef CONFIG_PM - .suspend = iavf_suspend, - .resume = iavf_resume, -#endif - .shutdown = iavf_shutdown, + .name = iavf_driver_name, + .id_table = iavf_pci_tbl, + .probe = iavf_probe, + .remove = iavf_remove, + .driver.pm = &iavf_pm_ops, + .shutdown = iavf_shutdown, }; /** @@ -3963,10 +5191,7 @@ static struct pci_driver iavf_driver = { **/ static int __init iavf_init_module(void) { - int ret; - - pr_info("iavf: %s - version %s\n", iavf_driver_string, - iavf_driver_version); + pr_info("iavf: %s\n", iavf_driver_string); pr_info("%s\n", iavf_copyright); @@ -3976,8 +5201,7 @@ static int __init iavf_init_module(void) pr_err("%s: Failed to create workqueue\n", iavf_driver_name); return -ENOMEM; } - ret = pci_register_driver(&iavf_driver); - return ret; + return pci_register_driver(&iavf_driver); } module_init(iavf_init_module); diff --git a/drivers/net/ethernet/intel/iavf/iavf_status.h b/drivers/net/ethernet/intel/iavf/iavf_status.h index 46e3d1f6b604..2ea5c7c339bc 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_status.h +++ b/drivers/net/ethernet/intel/iavf/iavf_status.h @@ -18,7 +18,7 @@ enum iavf_status { IAVF_ERR_ADAPTER_STOPPED = -9, IAVF_ERR_INVALID_MAC_ADDR = -10, IAVF_ERR_DEVICE_NOT_SUPPORTED = -11, - IAVF_ERR_MASTER_REQUESTS_PENDING = -12, + IAVF_ERR_PRIMARY_REQUESTS_PENDING = -12, IAVF_ERR_INVALID_LINK_SETTINGS = -13, IAVF_ERR_AUTONEG_NOT_COMPLETE = -14, IAVF_ERR_RESET_FAILED = -15, diff --git a/drivers/net/ethernet/intel/iavf/iavf_trace.h b/drivers/net/ethernet/intel/iavf/iavf_trace.h index 1058e68a02b4..82fda6f5abf0 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_trace.h +++ b/drivers/net/ethernet/intel/iavf/iavf_trace.h @@ -22,7 +22,7 @@ #include <linux/tracepoint.h> -/** +/* * iavf_trace() macro enables shared code to refer to trace points * like: * diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index 7a30d5d5ef53..18b6a702a1d6 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -114,8 +114,11 @@ u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw) { u32 head, tail; + /* underlying hardware might not allow access and/or always return + * 0 for the head/tail registers so just use the cached values + */ head = ring->next_to_clean; - tail = readl(ring->tail); + tail = ring->next_to_use; if (head != tail) return (head < tail) ? @@ -194,7 +197,7 @@ static bool iavf_clean_tx_irq(struct iavf_vsi *vsi, struct iavf_tx_buffer *tx_buf; struct iavf_tx_desc *tx_desc; unsigned int total_bytes = 0, total_packets = 0; - unsigned int budget = vsi->work_limit; + unsigned int budget = IAVF_DEFAULT_IRQ_WORK; tx_buf = &tx_ring->tx_bi[i]; tx_desc = IAVF_TX_DESC(tx_ring, i); @@ -374,29 +377,60 @@ static inline bool iavf_container_is_rx(struct iavf_q_vector *q_vector, return &q_vector->rx == rc; } -static inline unsigned int iavf_itr_divisor(struct iavf_q_vector *q_vector) +#define IAVF_AIM_MULTIPLIER_100G 2560 +#define IAVF_AIM_MULTIPLIER_50G 1280 +#define IAVF_AIM_MULTIPLIER_40G 1024 +#define IAVF_AIM_MULTIPLIER_20G 512 +#define IAVF_AIM_MULTIPLIER_10G 256 +#define IAVF_AIM_MULTIPLIER_1G 32 + +static unsigned int iavf_mbps_itr_multiplier(u32 speed_mbps) { - unsigned int divisor; + switch (speed_mbps) { + case SPEED_100000: + return IAVF_AIM_MULTIPLIER_100G; + case SPEED_50000: + return IAVF_AIM_MULTIPLIER_50G; + case SPEED_40000: + return IAVF_AIM_MULTIPLIER_40G; + case SPEED_25000: + case SPEED_20000: + return IAVF_AIM_MULTIPLIER_20G; + case SPEED_10000: + default: + return IAVF_AIM_MULTIPLIER_10G; + case SPEED_1000: + case SPEED_100: + return IAVF_AIM_MULTIPLIER_1G; + } +} - switch (q_vector->adapter->link_speed) { - case IAVF_LINK_SPEED_40GB: - divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 1024; - break; - case IAVF_LINK_SPEED_25GB: - case IAVF_LINK_SPEED_20GB: - divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 512; - break; +static unsigned int +iavf_virtchnl_itr_multiplier(enum virtchnl_link_speed speed_virtchnl) +{ + switch (speed_virtchnl) { + case VIRTCHNL_LINK_SPEED_40GB: + return IAVF_AIM_MULTIPLIER_40G; + case VIRTCHNL_LINK_SPEED_25GB: + case VIRTCHNL_LINK_SPEED_20GB: + return IAVF_AIM_MULTIPLIER_20G; + case VIRTCHNL_LINK_SPEED_10GB: default: - case IAVF_LINK_SPEED_10GB: - divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 256; - break; - case IAVF_LINK_SPEED_1GB: - case IAVF_LINK_SPEED_100MB: - divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 32; - break; + return IAVF_AIM_MULTIPLIER_10G; + case VIRTCHNL_LINK_SPEED_1GB: + case VIRTCHNL_LINK_SPEED_100MB: + return IAVF_AIM_MULTIPLIER_1G; } +} - return divisor; +static unsigned int iavf_itr_divisor(struct iavf_adapter *adapter) +{ + if (ADV_LINK_SUPPORT(adapter)) + return IAVF_ITR_ADAPTIVE_MIN_INC * + iavf_mbps_itr_multiplier(adapter->link_speed_mbps); + else + return IAVF_ITR_ADAPTIVE_MIN_INC * + iavf_virtchnl_itr_multiplier(adapter->link_speed); } /** @@ -586,8 +620,9 @@ adjust_by_size: * Use addition as we have already recorded the new latency flag * for the ITR value. */ - itr += DIV_ROUND_UP(avg_wire_size, iavf_itr_divisor(q_vector)) * - IAVF_ITR_ADAPTIVE_MIN_INC; + itr += DIV_ROUND_UP(avg_wire_size, + iavf_itr_divisor(q_vector->adapter)) * + IAVF_ITR_ADAPTIVE_MIN_INC; if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) { itr &= IAVF_ITR_ADAPTIVE_LATENCY; @@ -865,6 +900,9 @@ static void iavf_receive_skb(struct iavf_ring *rx_ring, if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && (vlan_tag & VLAN_VID_MASK)) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); + else if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_STAG_RX) && + vlan_tag & VLAN_VID_MASK) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tag); napi_gro_receive(&q_vector->napi, skb); } @@ -1007,7 +1045,7 @@ static inline void iavf_rx_checksum(struct iavf_vsi *vsi, case IAVF_RX_PTYPE_INNER_PROT_UDP: case IAVF_RX_PTYPE_INNER_PROT_SCTP: skb->ip_summed = CHECKSUM_UNNECESSARY; - /* fall though */ + fallthrough; default: break; } @@ -1142,19 +1180,6 @@ static void iavf_reuse_rx_page(struct iavf_ring *rx_ring, } /** - * iavf_page_is_reusable - check if any reuse is possible - * @page: page struct to check - * - * A page is not reusable if it was allocated under low memory - * conditions, or it's not in the same NUMA node as this CPU. - */ -static inline bool iavf_page_is_reusable(struct page *page) -{ - return (page_to_nid(page) == numa_mem_id()) && - !page_is_pfmemalloc(page); -} - -/** * iavf_can_reuse_rx_page - Determine if this page can be reused by * the adapter for another receive * @@ -1187,7 +1212,7 @@ static bool iavf_can_reuse_rx_page(struct iavf_rx_buffer *rx_buffer) struct page *page = rx_buffer->page; /* Is any reuse possible? */ - if (unlikely(!iavf_page_is_reusable(page))) + if (!dev_page_is_reusable(page)) return false; #if (PAGE_SIZE < 8192) @@ -1263,11 +1288,10 @@ static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring, { struct iavf_rx_buffer *rx_buffer; - if (!size) - return NULL; - rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; prefetchw(rx_buffer->page); + if (!size) + return rx_buffer; /* we are reusing so sync this buffer for CPU use */ dma_sync_single_range_for_cpu(rx_ring->dev, @@ -1309,10 +1333,7 @@ static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring, return NULL; /* prefetch first cache line of first page */ va = page_address(rx_buffer->page) + rx_buffer->page_offset; - prefetch(va); -#if L1_CACHE_BYTES < 128 - prefetch(va + L1_CACHE_BYTES); -#endif + net_prefetch(va); /* allocate a skb to store the frags */ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, @@ -1372,16 +1393,14 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring, #endif struct sk_buff *skb; - if (!rx_buffer) + if (!rx_buffer || !size) return NULL; /* prefetch first cache line of first page */ va = page_address(rx_buffer->page) + rx_buffer->page_offset; - prefetch(va); -#if L1_CACHE_BYTES < 128 - prefetch(va + L1_CACHE_BYTES); -#endif + net_prefetch(va); + /* build an skb around the page buffer */ - skb = build_skb(va - IAVF_SKB_PAD, truesize); + skb = napi_build_skb(va - IAVF_SKB_PAD, truesize); if (unlikely(!skb)) return NULL; @@ -1486,7 +1505,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) struct iavf_rx_buffer *rx_buffer; union iavf_rx_desc *rx_desc; unsigned int size; - u16 vlan_tag; + u16 vlan_tag = 0; u8 rx_ptype; u64 qword; @@ -1532,7 +1551,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) /* exit if we failed to retrieve a buffer */ if (!skb) { rx_ring->rx_stats.alloc_buff_failed++; - if (rx_buffer) + if (rx_buffer && size) rx_buffer->pagecnt_bias++; break; } @@ -1569,9 +1588,13 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) /* populate checksum, VLAN, and protocol */ iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); - - vlan_tag = (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) ? - le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; + if (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT) && + rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) + vlan_tag = le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1); + if (rx_desc->wb.qword2.ext_status & + cpu_to_le16(BIT(IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) && + rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2) + vlan_tag = le16_to_cpu(rx_desc->wb.qword2.l2tag2_2); iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb); iavf_receive_skb(rx_ring, skb, vlan_tag); @@ -1784,7 +1807,7 @@ tx_only: if (likely(napi_complete_done(napi, work_done))) iavf_update_enable_itr(vsi, q_vector); - return min(work_done, budget - 1); + return min_t(int, work_done, budget - 1); } /** @@ -1799,46 +1822,29 @@ tx_only: * Returns error code indicate the frame should be dropped upon error and the * otherwise returns 0 to indicate the flags has been set properly. **/ -static inline int iavf_tx_prepare_vlan_flags(struct sk_buff *skb, - struct iavf_ring *tx_ring, - u32 *flags) +static void iavf_tx_prepare_vlan_flags(struct sk_buff *skb, + struct iavf_ring *tx_ring, u32 *flags) { - __be16 protocol = skb->protocol; u32 tx_flags = 0; - if (protocol == htons(ETH_P_8021Q) && - !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { - /* When HW VLAN acceleration is turned off by the user the - * stack sets the protocol to 8021q so that the driver - * can take any steps required to support the SW only - * VLAN handling. In our case the driver doesn't need - * to take any further steps so just set the protocol - * to the encapsulated ethertype. - */ - skb->protocol = vlan_get_protocol(skb); - goto out; - } - /* if we have a HW VLAN tag being added, default to the HW one */ - if (skb_vlan_tag_present(skb)) { - tx_flags |= skb_vlan_tag_get(skb) << IAVF_TX_FLAGS_VLAN_SHIFT; - tx_flags |= IAVF_TX_FLAGS_HW_VLAN; - /* else if it is a SW VLAN, check the next protocol and store the tag */ - } else if (protocol == htons(ETH_P_8021Q)) { - struct vlan_hdr *vhdr, _vhdr; - - vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); - if (!vhdr) - return -EINVAL; + /* stack will only request hardware VLAN insertion offload for protocols + * that the driver supports and has enabled + */ + if (!skb_vlan_tag_present(skb)) + return; - protocol = vhdr->h_vlan_encapsulated_proto; - tx_flags |= ntohs(vhdr->h_vlan_TCI) << IAVF_TX_FLAGS_VLAN_SHIFT; - tx_flags |= IAVF_TX_FLAGS_SW_VLAN; + tx_flags |= skb_vlan_tag_get(skb) << IAVF_TX_FLAGS_VLAN_SHIFT; + if (tx_ring->flags & IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2) { + tx_flags |= IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN; + } else if (tx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) { + tx_flags |= IAVF_TX_FLAGS_HW_VLAN; + } else { + dev_dbg(tx_ring->dev, "Unsupported Tx VLAN tag location requested\n"); + return; } -out: *flags = tx_flags; - return 0; } /** @@ -1923,13 +1929,20 @@ static int iavf_tso(struct iavf_tx_buffer *first, u8 *hdr_len, /* determine offset of inner transport header */ l4_offset = l4.hdr - skb->data; - /* remove payload length from inner checksum */ paylen = skb->len - l4_offset; - csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); - /* compute length of segmentation header */ - *hdr_len = (l4.tcp->doff * 4) + l4_offset; + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { + csum_replace_by_diff(&l4.udp->check, + (__force __wsum)htonl(paylen)); + /* compute length of UDP segmentation header */ + *hdr_len = (u8)sizeof(l4.udp) + l4_offset; + } else { + csum_replace_by_diff(&l4.tcp->check, + (__force __wsum)htonl(paylen)); + /* compute length of TCP segmentation header */ + *hdr_len = (u8)((l4.tcp->doff * 4) + l4_offset); + } /* pull values out of skb_shinfo */ gso_size = skb_shinfo(skb)->gso_size; @@ -2116,7 +2129,7 @@ static int iavf_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, } /** - * iavf_create_tx_ctx Build the Tx context descriptor + * iavf_create_tx_ctx - Build the Tx context descriptor * @tx_ring: ring to create the descriptor on * @cd_type_cmd_tso_mss: Quad Word 1 * @cd_tunneling: Quad Word 0 - bits 0-31 @@ -2451,8 +2464,13 @@ static netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb, first->gso_segs = 1; /* prepare the xmit flags */ - if (iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) - goto out_drop; + iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags); + if (tx_flags & IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN) { + cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2 << + IAVF_TXD_CTX_QW1_CMD_SHIFT; + cd_l2tag2 = (tx_flags & IAVF_TX_FLAGS_VLAN_MASK) >> + IAVF_TX_FLAGS_VLAN_SHIFT; + } /* obtain protocol of skb */ protocol = vlan_get_protocol(skb); diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h index dd3348f9da9d..2624bf6d009e 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h @@ -243,19 +243,20 @@ static inline unsigned int iavf_txd_use_count(unsigned int size) #define DESC_NEEDED (MAX_SKB_FRAGS + 6) #define IAVF_MIN_DESC_PENDING 4 -#define IAVF_TX_FLAGS_HW_VLAN BIT(1) -#define IAVF_TX_FLAGS_SW_VLAN BIT(2) -#define IAVF_TX_FLAGS_TSO BIT(3) -#define IAVF_TX_FLAGS_IPV4 BIT(4) -#define IAVF_TX_FLAGS_IPV6 BIT(5) -#define IAVF_TX_FLAGS_FCCRC BIT(6) -#define IAVF_TX_FLAGS_FSO BIT(7) -#define IAVF_TX_FLAGS_FD_SB BIT(9) -#define IAVF_TX_FLAGS_VXLAN_TUNNEL BIT(10) -#define IAVF_TX_FLAGS_VLAN_MASK 0xffff0000 -#define IAVF_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 -#define IAVF_TX_FLAGS_VLAN_PRIO_SHIFT 29 -#define IAVF_TX_FLAGS_VLAN_SHIFT 16 +#define IAVF_TX_FLAGS_HW_VLAN BIT(1) +#define IAVF_TX_FLAGS_SW_VLAN BIT(2) +#define IAVF_TX_FLAGS_TSO BIT(3) +#define IAVF_TX_FLAGS_IPV4 BIT(4) +#define IAVF_TX_FLAGS_IPV6 BIT(5) +#define IAVF_TX_FLAGS_FCCRC BIT(6) +#define IAVF_TX_FLAGS_FSO BIT(7) +#define IAVF_TX_FLAGS_FD_SB BIT(9) +#define IAVF_TX_FLAGS_VXLAN_TUNNEL BIT(10) +#define IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN BIT(11) +#define IAVF_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IAVF_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 +#define IAVF_TX_FLAGS_VLAN_PRIO_SHIFT 29 +#define IAVF_TX_FLAGS_VLAN_SHIFT 16 struct iavf_tx_buffer { struct iavf_tx_desc *next_to_watch; @@ -362,6 +363,9 @@ struct iavf_ring { u16 flags; #define IAVF_TXR_FLAGS_WB_ON_ITR BIT(0) #define IAVF_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1) +#define IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(3) +#define IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2 BIT(4) +#define IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2 BIT(5) /* stats structs */ struct iavf_queue_stats stats; @@ -454,7 +458,6 @@ bool __iavf_chk_linearize(struct sk_buff *skb); /** * iavf_xmit_descriptor_count - calculate number of Tx descriptors needed * @skb: send buffer - * @tx_ring: ring to send buffer on * * Returns number of data descriptors needed for this skb. Returns 0 to indicate * there is not enough descriptors available in this ring since we need at least @@ -514,6 +517,7 @@ static inline bool iavf_chk_linearize(struct sk_buff *skb, int count) return count != IAVF_MAX_BUFFER_TXD; } /** + * txring_txq - helper to convert from a ring to a queue * @ring: Tx ring to find the netdev equivalent of **/ static inline struct netdev_queue *txring_txq(const struct iavf_ring *ring) diff --git a/drivers/net/ethernet/intel/iavf/iavf_type.h b/drivers/net/ethernet/intel/iavf/iavf_type.h index 7190a40c540c..9f1f523807c4 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_type.h +++ b/drivers/net/ethernet/intel/iavf/iavf_type.h @@ -192,14 +192,6 @@ struct iavf_hw { char err_str[16]; }; -struct iavf_driver_version { - u8 major_version; - u8 minor_version; - u8 build_version; - u8 subbuild_version; - u8 driver_string[32]; -}; - /* RX Descriptors */ union iavf_16byte_rx_desc { struct { @@ -378,7 +370,6 @@ enum iavf_rx_l2_ptype { }; struct iavf_rx_ptype_decoded { - u32 ptype:8; u32 known:1; u32 outer_ip:1; u32 outer_ip_ver:1; diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 1ab9cb339acb..24a701fd140e 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -5,10 +5,6 @@ #include "iavf_prototype.h" #include "iavf_client.h" -/* busy wait delay in msec */ -#define IAVF_BUSY_WAIT_DELAY 10 -#define IAVF_BUSY_WAIT_COUNT 50 - /** * iavf_send_pf_msg * @adapter: adapter structure @@ -22,17 +18,17 @@ static int iavf_send_pf_msg(struct iavf_adapter *adapter, enum virtchnl_ops op, u8 *msg, u16 len) { struct iavf_hw *hw = &adapter->hw; - enum iavf_status err; + enum iavf_status status; if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) return 0; /* nothing to see here, move along */ - err = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL); - if (err) - dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n", - op, iavf_stat_str(hw, err), + status = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL); + if (status) + dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, status %s, aq_err %s\n", + op, iavf_stat_str(hw, status), iavf_aq_str(hw, hw->aq.asq_last_status)); - return err; + return iavf_status_to_errno(status); } /** @@ -55,6 +51,41 @@ int iavf_send_api_ver(struct iavf_adapter *adapter) } /** + * iavf_poll_virtchnl_msg + * @hw: HW configuration structure + * @event: event to populate on success + * @op_to_poll: requested virtchnl op to poll for + * + * Initialize poll for virtchnl msg matching the requested_op. Returns 0 + * if a message of the correct opcode is in the queue or an error code + * if no message matching the op code is waiting and other failures. + */ +static int +iavf_poll_virtchnl_msg(struct iavf_hw *hw, struct iavf_arq_event_info *event, + enum virtchnl_ops op_to_poll) +{ + enum virtchnl_ops received_op; + enum iavf_status status; + u32 v_retval; + + while (1) { + /* When the AQ is empty, iavf_clean_arq_element will return + * nonzero and this loop will terminate. + */ + status = iavf_clean_arq_element(hw, event, NULL); + if (status != IAVF_SUCCESS) + return iavf_status_to_errno(status); + received_op = + (enum virtchnl_ops)le32_to_cpu(event->desc.cookie_high); + if (op_to_poll == received_op) + break; + } + + v_retval = le32_to_cpu(event->desc.cookie_low); + return virtchnl_status_to_errno((enum virtchnl_status_code)v_retval); +} + +/** * iavf_verify_api_ver * @adapter: adapter structure * @@ -65,55 +96,28 @@ int iavf_send_api_ver(struct iavf_adapter *adapter) **/ int iavf_verify_api_ver(struct iavf_adapter *adapter) { - struct virtchnl_version_info *pf_vvi; - struct iavf_hw *hw = &adapter->hw; struct iavf_arq_event_info event; - enum virtchnl_ops op; - enum iavf_status err; + int err; event.buf_len = IAVF_MAX_AQ_BUF_SIZE; - event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); - if (!event.msg_buf) { - err = -ENOMEM; - goto out; - } - - while (1) { - err = iavf_clean_arq_element(hw, &event, NULL); - /* When the AQ is empty, iavf_clean_arq_element will return - * nonzero and this loop will terminate. - */ - if (err) - goto out_alloc; - op = - (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); - if (op == VIRTCHNL_OP_VERSION) - break; - } - + event.msg_buf = kzalloc(IAVF_MAX_AQ_BUF_SIZE, GFP_KERNEL); + if (!event.msg_buf) + return -ENOMEM; - err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low); - if (err) - goto out_alloc; + err = iavf_poll_virtchnl_msg(&adapter->hw, &event, VIRTCHNL_OP_VERSION); + if (!err) { + struct virtchnl_version_info *pf_vvi = + (struct virtchnl_version_info *)event.msg_buf; + adapter->pf_version = *pf_vvi; - if (op != VIRTCHNL_OP_VERSION) { - dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n", - op); - err = -EIO; - goto out_alloc; + if (pf_vvi->major > VIRTCHNL_VERSION_MAJOR || + (pf_vvi->major == VIRTCHNL_VERSION_MAJOR && + pf_vvi->minor > VIRTCHNL_VERSION_MINOR)) + err = -EIO; } - pf_vvi = (struct virtchnl_version_info *)event.msg_buf; - adapter->pf_version = *pf_vvi; - - if ((pf_vvi->major > VIRTCHNL_VERSION_MAJOR) || - ((pf_vvi->major == VIRTCHNL_VERSION_MAJOR) && - (pf_vvi->minor > VIRTCHNL_VERSION_MINOR))) - err = -EIO; - -out_alloc: kfree(event.msg_buf); -out: + return err; } @@ -137,9 +141,14 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter) VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 | VIRTCHNL_VF_OFFLOAD_ENCAP | + VIRTCHNL_VF_OFFLOAD_VLAN_V2 | VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM | VIRTCHNL_VF_OFFLOAD_REQ_QUEUES | - VIRTCHNL_VF_OFFLOAD_ADQ; + VIRTCHNL_VF_OFFLOAD_ADQ | + VIRTCHNL_VF_OFFLOAD_USO | + VIRTCHNL_VF_OFFLOAD_FDIR_PF | + VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF | + VIRTCHNL_VF_CAP_ADV_LINK_SPEED; adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES; adapter->aq_required &= ~IAVF_FLAG_AQ_GET_CONFIG; @@ -151,6 +160,19 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter) NULL, 0); } +int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter) +{ + adapter->aq_required &= ~IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS; + + if (!VLAN_V2_ALLOWED(adapter)) + return -EOPNOTSUPP; + + adapter->current_op = VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS; + + return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS, + NULL, 0); +} + /** * iavf_validate_num_queues * @adapter: adapter structure @@ -190,33 +212,17 @@ int iavf_get_vf_config(struct iavf_adapter *adapter) { struct iavf_hw *hw = &adapter->hw; struct iavf_arq_event_info event; - enum virtchnl_ops op; - enum iavf_status err; u16 len; + int err; - len = sizeof(struct virtchnl_vf_resource) + + len = sizeof(struct virtchnl_vf_resource) + IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource); event.buf_len = len; - event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); - if (!event.msg_buf) { - err = -ENOMEM; - goto out; - } - - while (1) { - /* When the AQ is empty, iavf_clean_arq_element will return - * nonzero and this loop will terminate. - */ - err = iavf_clean_arq_element(hw, &event, NULL); - if (err) - goto out_alloc; - op = - (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); - if (op == VIRTCHNL_OP_GET_VF_RESOURCES) - break; - } + event.msg_buf = kzalloc(len, GFP_KERNEL); + if (!event.msg_buf) + return -ENOMEM; - err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low); + err = iavf_poll_virtchnl_msg(hw, &event, VIRTCHNL_OP_GET_VF_RESOURCES); memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len)); /* some PFs send more queues than we should have so validate that @@ -225,9 +231,32 @@ int iavf_get_vf_config(struct iavf_adapter *adapter) if (!err) iavf_validate_num_queues(adapter); iavf_vf_parse_hw_config(hw, adapter->vf_res); -out_alloc: + + kfree(event.msg_buf); + + return err; +} + +int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter) +{ + struct iavf_arq_event_info event; + int err; + u16 len; + + len = sizeof(struct virtchnl_vlan_caps); + event.buf_len = len; + event.msg_buf = kzalloc(len, GFP_KERNEL); + if (!event.msg_buf) + return -ENOMEM; + + err = iavf_poll_virtchnl_msg(&adapter->hw, &event, + VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS); + if (!err) + memcpy(&adapter->vlan_v2_caps, event.msg_buf, + min(event.msg_len, len)); + kfree(event.msg_buf); -out: + return err; } @@ -240,11 +269,14 @@ out: void iavf_configure_queues(struct iavf_adapter *adapter) { struct virtchnl_vsi_queue_config_info *vqci; - struct virtchnl_queue_pair_info *vqpi; + int i, max_frame = adapter->vf_res->max_mtu; int pairs = adapter->num_active_queues; - int i, max_frame = IAVF_MAX_RXBUFFER; + struct virtchnl_queue_pair_info *vqpi; size_t len; + if (max_frame > IAVF_MAX_RXBUFFER || !max_frame) + max_frame = IAVF_MAX_RXBUFFER; + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n", @@ -397,30 +429,17 @@ void iavf_map_queues(struct iavf_adapter *adapter) } /** - * iavf_request_queues - * @adapter: adapter structure - * @num: number of requested queues - * - * We get a default number of queues from the PF. This enables us to request a - * different number. Returns 0 on success, negative on failure + * iavf_set_mac_addr_type - Set the correct request type from the filter type + * @virtchnl_ether_addr: pointer to requested list element + * @filter: pointer to requested filter **/ -int iavf_request_queues(struct iavf_adapter *adapter, int num) +static void +iavf_set_mac_addr_type(struct virtchnl_ether_addr *virtchnl_ether_addr, + const struct iavf_mac_filter *filter) { - struct virtchnl_vf_res_request vfres; - - if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { - /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "Cannot request queues, command %d pending\n", - adapter->current_op); - return -EBUSY; - } - - vfres.num_queue_pairs = min_t(int, num, num_online_cpus()); - - adapter->current_op = VIRTCHNL_OP_REQUEST_QUEUES; - adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; - return iavf_send_pf_msg(adapter, VIRTCHNL_OP_REQUEST_QUEUES, - (u8 *)&vfres, sizeof(vfres)); + virtchnl_ether_addr->type = filter->is_primary ? + VIRTCHNL_ETHER_ADDR_PRIMARY : + VIRTCHNL_ETHER_ADDR_EXTRA; } /** @@ -478,6 +497,7 @@ void iavf_add_ether_addrs(struct iavf_adapter *adapter) list_for_each_entry(f, &adapter->mac_filter_list, list) { if (f->add) { ether_addr_copy(veal->list[i].addr, f->macaddr); + iavf_set_mac_addr_type(&veal->list[i], f); i++; f->add = false; if (i == count) @@ -547,6 +567,7 @@ void iavf_del_ether_addrs(struct iavf_adapter *adapter) list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { if (f->remove) { ether_addr_copy(veal->list[i].addr, f->macaddr); + iavf_set_mac_addr_type(&veal->list[i], f); i++; list_del(&f->list); kfree(f); @@ -564,6 +585,79 @@ void iavf_del_ether_addrs(struct iavf_adapter *adapter) } /** + * iavf_mac_add_ok + * @adapter: adapter structure + * + * Submit list of filters based on PF response. + **/ +static void iavf_mac_add_ok(struct iavf_adapter *adapter) +{ + struct iavf_mac_filter *f, *ftmp; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { + f->is_new_mac = false; + if (!f->add && !f->add_handled) + f->add_handled = true; + } + spin_unlock_bh(&adapter->mac_vlan_list_lock); +} + +/** + * iavf_mac_add_reject + * @adapter: adapter structure + * + * Remove filters from list based on PF response. + **/ +static void iavf_mac_add_reject(struct iavf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct iavf_mac_filter *f, *ftmp; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { + if (f->remove && ether_addr_equal(f->macaddr, netdev->dev_addr)) + f->remove = false; + + if (!f->add && !f->add_handled) + f->add_handled = true; + + if (f->is_new_mac) { + list_del(&f->list); + kfree(f); + } + } + spin_unlock_bh(&adapter->mac_vlan_list_lock); +} + +/** + * iavf_vlan_add_reject + * @adapter: adapter structure + * + * Remove VLAN filters from list based on PF response. + **/ +static void iavf_vlan_add_reject(struct iavf_adapter *adapter) +{ + struct iavf_vlan_filter *f, *ftmp; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { + if (f->is_new_vlan) { + if (f->vlan.tpid == ETH_P_8021Q) + clear_bit(f->vlan.vid, + adapter->vsi.active_cvlans); + else + clear_bit(f->vlan.vid, + adapter->vsi.active_svlans); + + list_del(&f->list); + kfree(f); + } + } + spin_unlock_bh(&adapter->mac_vlan_list_lock); +} + +/** * iavf_add_vlans * @adapter: adapter structure * @@ -571,7 +665,6 @@ void iavf_del_ether_addrs(struct iavf_adapter *adapter) **/ void iavf_add_vlans(struct iavf_adapter *adapter) { - struct virtchnl_vlan_filter_list *vvfl; int len, i = 0, count = 0; struct iavf_vlan_filter *f; bool more = false; @@ -589,48 +682,116 @@ void iavf_add_vlans(struct iavf_adapter *adapter) if (f->add) count++; } - if (!count) { + if (!count || !VLAN_FILTERING_ALLOWED(adapter)) { adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); return; } - adapter->current_op = VIRTCHNL_OP_ADD_VLAN; - len = sizeof(struct virtchnl_vlan_filter_list) + - (count * sizeof(u16)); - if (len > IAVF_MAX_AQ_BUF_SIZE) { - dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); - count = (IAVF_MAX_AQ_BUF_SIZE - - sizeof(struct virtchnl_vlan_filter_list)) / - sizeof(u16); - len = sizeof(struct virtchnl_vlan_filter_list) + - (count * sizeof(u16)); - more = true; - } - vvfl = kzalloc(len, GFP_ATOMIC); - if (!vvfl) { + if (VLAN_ALLOWED(adapter)) { + struct virtchnl_vlan_filter_list *vvfl; + + adapter->current_op = VIRTCHNL_OP_ADD_VLAN; + + len = sizeof(*vvfl) + (count * sizeof(u16)); + if (len > IAVF_MAX_AQ_BUF_SIZE) { + dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); + count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl)) / + sizeof(u16); + len = sizeof(*vvfl) + (count * sizeof(u16)); + more = true; + } + vvfl = kzalloc(len, GFP_ATOMIC); + if (!vvfl) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + vvfl->vsi_id = adapter->vsi_res->vsi_id; + vvfl->num_elements = count; + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + if (f->add) { + vvfl->vlan_id[i] = f->vlan.vid; + i++; + f->add = false; + f->is_new_vlan = true; + if (i == count) + break; + } + } + if (!more) + adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; + spin_unlock_bh(&adapter->mac_vlan_list_lock); - return; - } - vvfl->vsi_id = adapter->vsi_res->vsi_id; - vvfl->num_elements = count; - list_for_each_entry(f, &adapter->vlan_filter_list, list) { - if (f->add) { - vvfl->vlan_id[i] = f->vlan; - i++; - f->add = false; - if (i == count) - break; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); + kfree(vvfl); + } else { + u16 max_vlans = adapter->vlan_v2_caps.filtering.max_filters; + u16 current_vlans = iavf_get_num_vlans_added(adapter); + struct virtchnl_vlan_filter_list_v2 *vvfl_v2; + + adapter->current_op = VIRTCHNL_OP_ADD_VLAN_V2; + + if ((count + current_vlans) > max_vlans && + current_vlans < max_vlans) { + count = max_vlans - iavf_get_num_vlans_added(adapter); + more = true; } - } - if (!more) - adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; - spin_unlock_bh(&adapter->mac_vlan_list_lock); + len = sizeof(*vvfl_v2) + ((count - 1) * + sizeof(struct virtchnl_vlan_filter)); + if (len > IAVF_MAX_AQ_BUF_SIZE) { + dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); + count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl_v2)) / + sizeof(struct virtchnl_vlan_filter); + len = sizeof(*vvfl_v2) + + ((count - 1) * + sizeof(struct virtchnl_vlan_filter)); + more = true; + } + + vvfl_v2 = kzalloc(len, GFP_ATOMIC); + if (!vvfl_v2) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + vvfl_v2->vport_id = adapter->vsi_res->vsi_id; + vvfl_v2->num_elements = count; + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + if (f->add) { + struct virtchnl_vlan_supported_caps *filtering_support = + &adapter->vlan_v2_caps.filtering.filtering_support; + struct virtchnl_vlan *vlan; + + if (i == count) + break; + + /* give priority over outer if it's enabled */ + if (filtering_support->outer) + vlan = &vvfl_v2->filters[i].outer; + else + vlan = &vvfl_v2->filters[i].inner; + + vlan->tci = f->vlan.vid; + vlan->tpid = f->vlan.tpid; + + i++; + f->add = false; + f->is_new_vlan = true; + } + } + + if (!more) + adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; + + spin_unlock_bh(&adapter->mac_vlan_list_lock); - iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); - kfree(vvfl); + iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN_V2, + (u8 *)vvfl_v2, len); + kfree(vvfl_v2); + } } /** @@ -641,7 +802,6 @@ void iavf_add_vlans(struct iavf_adapter *adapter) **/ void iavf_del_vlans(struct iavf_adapter *adapter) { - struct virtchnl_vlan_filter_list *vvfl; struct iavf_vlan_filter *f, *ftmp; int len, i = 0, count = 0; bool more = false; @@ -655,53 +815,123 @@ void iavf_del_vlans(struct iavf_adapter *adapter) spin_lock_bh(&adapter->mac_vlan_list_lock); - list_for_each_entry(f, &adapter->vlan_filter_list, list) { - if (f->remove) + list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { + /* since VLAN capabilities are not allowed, we dont want to send + * a VLAN delete request because it will most likely fail and + * create unnecessary errors/noise, so just free the VLAN + * filters marked for removal to enable bailing out before + * sending a virtchnl message + */ + if (f->remove && !VLAN_FILTERING_ALLOWED(adapter)) { + list_del(&f->list); + kfree(f); + } else if (f->remove) { count++; + } } - if (!count) { + if (!count || !VLAN_FILTERING_ALLOWED(adapter)) { adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); return; } - adapter->current_op = VIRTCHNL_OP_DEL_VLAN; - len = sizeof(struct virtchnl_vlan_filter_list) + - (count * sizeof(u16)); - if (len > IAVF_MAX_AQ_BUF_SIZE) { - dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n"); - count = (IAVF_MAX_AQ_BUF_SIZE - - sizeof(struct virtchnl_vlan_filter_list)) / - sizeof(u16); - len = sizeof(struct virtchnl_vlan_filter_list) + - (count * sizeof(u16)); - more = true; - } - vvfl = kzalloc(len, GFP_ATOMIC); - if (!vvfl) { + if (VLAN_ALLOWED(adapter)) { + struct virtchnl_vlan_filter_list *vvfl; + + adapter->current_op = VIRTCHNL_OP_DEL_VLAN; + + len = sizeof(*vvfl) + (count * sizeof(u16)); + if (len > IAVF_MAX_AQ_BUF_SIZE) { + dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n"); + count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl)) / + sizeof(u16); + len = sizeof(*vvfl) + (count * sizeof(u16)); + more = true; + } + vvfl = kzalloc(len, GFP_ATOMIC); + if (!vvfl) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + vvfl->vsi_id = adapter->vsi_res->vsi_id; + vvfl->num_elements = count; + list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { + if (f->remove) { + vvfl->vlan_id[i] = f->vlan.vid; + i++; + list_del(&f->list); + kfree(f); + if (i == count) + break; + } + } + + if (!more) + adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; + spin_unlock_bh(&adapter->mac_vlan_list_lock); - return; - } - vvfl->vsi_id = adapter->vsi_res->vsi_id; - vvfl->num_elements = count; - list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { - if (f->remove) { - vvfl->vlan_id[i] = f->vlan; - i++; - list_del(&f->list); - kfree(f); - if (i == count) - break; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); + kfree(vvfl); + } else { + struct virtchnl_vlan_filter_list_v2 *vvfl_v2; + + adapter->current_op = VIRTCHNL_OP_DEL_VLAN_V2; + + len = sizeof(*vvfl_v2) + + ((count - 1) * sizeof(struct virtchnl_vlan_filter)); + if (len > IAVF_MAX_AQ_BUF_SIZE) { + dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); + count = (IAVF_MAX_AQ_BUF_SIZE - + sizeof(*vvfl_v2)) / + sizeof(struct virtchnl_vlan_filter); + len = sizeof(*vvfl_v2) + + ((count - 1) * + sizeof(struct virtchnl_vlan_filter)); + more = true; } - } - if (!more) - adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; - spin_unlock_bh(&adapter->mac_vlan_list_lock); + vvfl_v2 = kzalloc(len, GFP_ATOMIC); + if (!vvfl_v2) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + vvfl_v2->vport_id = adapter->vsi_res->vsi_id; + vvfl_v2->num_elements = count; + list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { + if (f->remove) { + struct virtchnl_vlan_supported_caps *filtering_support = + &adapter->vlan_v2_caps.filtering.filtering_support; + struct virtchnl_vlan *vlan; + + /* give priority over outer if it's enabled */ + if (filtering_support->outer) + vlan = &vvfl_v2->filters[i].outer; + else + vlan = &vvfl_v2->filters[i].inner; + + vlan->tci = f->vlan.vid; + vlan->tpid = f->vlan.tpid; + + list_del(&f->list); + kfree(f); + i++; + if (i == count) + break; + } + } - iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); - kfree(vvfl); + if (!more) + adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN_V2, + (u8 *)vvfl_v2, len); + kfree(vvfl_v2); + } } /** @@ -734,15 +964,23 @@ void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags) if (flags & FLAG_VF_MULTICAST_PROMISC) { adapter->flags |= IAVF_FLAG_ALLMULTI_ON; adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_ALLMULTI; - dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n"); + dev_info(&adapter->pdev->dev, "%s is entering multicast promiscuous mode\n", + adapter->netdev->name); } if (!flags) { - adapter->flags &= ~(IAVF_FLAG_PROMISC_ON | - IAVF_FLAG_ALLMULTI_ON); - adapter->aq_required &= ~(IAVF_FLAG_AQ_RELEASE_PROMISC | - IAVF_FLAG_AQ_RELEASE_ALLMULTI); - dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n"); + if (adapter->flags & IAVF_FLAG_PROMISC_ON) { + adapter->flags &= ~IAVF_FLAG_PROMISC_ON; + adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_PROMISC; + dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n"); + } + + if (adapter->flags & IAVF_FLAG_ALLMULTI_ON) { + adapter->flags &= ~IAVF_FLAG_ALLMULTI_ON; + adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_ALLMULTI; + dev_info(&adapter->pdev->dev, "%s is leaving multicast promiscuous mode\n", + adapter->netdev->name); + } } adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; @@ -766,6 +1004,8 @@ void iavf_request_stats(struct iavf_adapter *adapter) /* no error message, this isn't crucial */ return; } + + adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_STATS; adapter->current_op = VIRTCHNL_OP_GET_STATS; vqs.vsi_id = adapter->vsi_res->vsi_id; /* queue maps are ignored for this message - only the vsi is used */ @@ -919,6 +1159,206 @@ void iavf_disable_vlan_stripping(struct iavf_adapter *adapter) } /** + * iavf_tpid_to_vc_ethertype - transform from VLAN TPID to virtchnl ethertype + * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.) + */ +static u32 iavf_tpid_to_vc_ethertype(u16 tpid) +{ + switch (tpid) { + case ETH_P_8021Q: + return VIRTCHNL_VLAN_ETHERTYPE_8100; + case ETH_P_8021AD: + return VIRTCHNL_VLAN_ETHERTYPE_88A8; + } + + return 0; +} + +/** + * iavf_set_vc_offload_ethertype - set virtchnl ethertype for offload message + * @adapter: adapter structure + * @msg: message structure used for updating offloads over virtchnl to update + * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.) + * @offload_op: opcode used to determine which support structure to check + */ +static int +iavf_set_vc_offload_ethertype(struct iavf_adapter *adapter, + struct virtchnl_vlan_setting *msg, u16 tpid, + enum virtchnl_ops offload_op) +{ + struct virtchnl_vlan_supported_caps *offload_support; + u16 vc_ethertype = iavf_tpid_to_vc_ethertype(tpid); + + /* reference the correct offload support structure */ + switch (offload_op) { + case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2: + case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2: + offload_support = + &adapter->vlan_v2_caps.offloads.stripping_support; + break; + case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2: + case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2: + offload_support = + &adapter->vlan_v2_caps.offloads.insertion_support; + break; + default: + dev_err(&adapter->pdev->dev, "Invalid opcode %d for setting virtchnl ethertype to enable/disable VLAN offloads\n", + offload_op); + return -EINVAL; + } + + /* make sure ethertype is supported */ + if (offload_support->outer & vc_ethertype && + offload_support->outer & VIRTCHNL_VLAN_TOGGLE) { + msg->outer_ethertype_setting = vc_ethertype; + } else if (offload_support->inner & vc_ethertype && + offload_support->inner & VIRTCHNL_VLAN_TOGGLE) { + msg->inner_ethertype_setting = vc_ethertype; + } else { + dev_dbg(&adapter->pdev->dev, "opcode %d unsupported for VLAN TPID 0x%04x\n", + offload_op, tpid); + return -EINVAL; + } + + return 0; +} + +/** + * iavf_clear_offload_v2_aq_required - clear AQ required bit for offload request + * @adapter: adapter structure + * @tpid: VLAN TPID + * @offload_op: opcode used to determine which AQ required bit to clear + */ +static void +iavf_clear_offload_v2_aq_required(struct iavf_adapter *adapter, u16 tpid, + enum virtchnl_ops offload_op) +{ + switch (offload_op) { + case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2: + if (tpid == ETH_P_8021Q) + adapter->aq_required &= + ~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING; + else if (tpid == ETH_P_8021AD) + adapter->aq_required &= + ~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING; + break; + case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2: + if (tpid == ETH_P_8021Q) + adapter->aq_required &= + ~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING; + else if (tpid == ETH_P_8021AD) + adapter->aq_required &= + ~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING; + break; + case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2: + if (tpid == ETH_P_8021Q) + adapter->aq_required &= + ~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION; + else if (tpid == ETH_P_8021AD) + adapter->aq_required &= + ~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION; + break; + case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2: + if (tpid == ETH_P_8021Q) + adapter->aq_required &= + ~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION; + else if (tpid == ETH_P_8021AD) + adapter->aq_required &= + ~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION; + break; + default: + dev_err(&adapter->pdev->dev, "Unsupported opcode %d specified for clearing aq_required bits for VIRTCHNL_VF_OFFLOAD_VLAN_V2 offload request\n", + offload_op); + } +} + +/** + * iavf_send_vlan_offload_v2 - send offload enable/disable over virtchnl + * @adapter: adapter structure + * @tpid: VLAN TPID used for the command (i.e. 0x8100 or 0x88a8) + * @offload_op: offload_op used to make the request over virtchnl + */ +static void +iavf_send_vlan_offload_v2(struct iavf_adapter *adapter, u16 tpid, + enum virtchnl_ops offload_op) +{ + struct virtchnl_vlan_setting *msg; + int len = sizeof(*msg); + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot send %d, command %d pending\n", + offload_op, adapter->current_op); + return; + } + + adapter->current_op = offload_op; + + msg = kzalloc(len, GFP_KERNEL); + if (!msg) + return; + + msg->vport_id = adapter->vsi_res->vsi_id; + + /* always clear to prevent unsupported and endless requests */ + iavf_clear_offload_v2_aq_required(adapter, tpid, offload_op); + + /* only send valid offload requests */ + if (!iavf_set_vc_offload_ethertype(adapter, msg, tpid, offload_op)) + iavf_send_pf_msg(adapter, offload_op, (u8 *)msg, len); + else + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + + kfree(msg); +} + +/** + * iavf_enable_vlan_stripping_v2 - enable VLAN stripping + * @adapter: adapter structure + * @tpid: VLAN TPID used to enable VLAN stripping + */ +void iavf_enable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid) +{ + iavf_send_vlan_offload_v2(adapter, tpid, + VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2); +} + +/** + * iavf_disable_vlan_stripping_v2 - disable VLAN stripping + * @adapter: adapter structure + * @tpid: VLAN TPID used to disable VLAN stripping + */ +void iavf_disable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid) +{ + iavf_send_vlan_offload_v2(adapter, tpid, + VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2); +} + +/** + * iavf_enable_vlan_insertion_v2 - enable VLAN insertion + * @adapter: adapter structure + * @tpid: VLAN TPID used to enable VLAN insertion + */ +void iavf_enable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid) +{ + iavf_send_vlan_offload_v2(adapter, tpid, + VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2); +} + +/** + * iavf_disable_vlan_insertion_v2 - disable VLAN insertion + * @adapter: adapter structure + * @tpid: VLAN TPID used to disable VLAN insertion + */ +void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid) +{ + iavf_send_vlan_offload_v2(adapter, tpid, + VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2); +} + +#define IAVF_MAX_SPEED_STRLEN 13 + +/** * iavf_print_link_message - print link up or down * @adapter: adapter structure * @@ -927,41 +1367,109 @@ void iavf_disable_vlan_stripping(struct iavf_adapter *adapter) static void iavf_print_link_message(struct iavf_adapter *adapter) { struct net_device *netdev = adapter->netdev; - char *speed = "Unknown "; + int link_speed_mbps; + char *speed; if (!adapter->link_up) { netdev_info(netdev, "NIC Link is Down\n"); return; } + speed = kzalloc(IAVF_MAX_SPEED_STRLEN, GFP_KERNEL); + if (!speed) + return; + + if (ADV_LINK_SUPPORT(adapter)) { + link_speed_mbps = adapter->link_speed_mbps; + goto print_link_msg; + } + switch (adapter->link_speed) { - case IAVF_LINK_SPEED_40GB: - speed = "40 G"; + case VIRTCHNL_LINK_SPEED_40GB: + link_speed_mbps = SPEED_40000; + break; + case VIRTCHNL_LINK_SPEED_25GB: + link_speed_mbps = SPEED_25000; + break; + case VIRTCHNL_LINK_SPEED_20GB: + link_speed_mbps = SPEED_20000; break; - case IAVF_LINK_SPEED_25GB: - speed = "25 G"; + case VIRTCHNL_LINK_SPEED_10GB: + link_speed_mbps = SPEED_10000; break; - case IAVF_LINK_SPEED_20GB: - speed = "20 G"; + case VIRTCHNL_LINK_SPEED_5GB: + link_speed_mbps = SPEED_5000; break; - case IAVF_LINK_SPEED_10GB: - speed = "10 G"; + case VIRTCHNL_LINK_SPEED_2_5GB: + link_speed_mbps = SPEED_2500; break; - case IAVF_LINK_SPEED_1GB: - speed = "1000 M"; + case VIRTCHNL_LINK_SPEED_1GB: + link_speed_mbps = SPEED_1000; break; - case IAVF_LINK_SPEED_100MB: - speed = "100 M"; + case VIRTCHNL_LINK_SPEED_100MB: + link_speed_mbps = SPEED_100; break; default: + link_speed_mbps = SPEED_UNKNOWN; break; } - netdev_info(netdev, "NIC Link is Up %sbps Full Duplex\n", speed); +print_link_msg: + if (link_speed_mbps > SPEED_1000) { + if (link_speed_mbps == SPEED_2500) + snprintf(speed, IAVF_MAX_SPEED_STRLEN, "2.5 Gbps"); + else + /* convert to Gbps inline */ + snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s", + link_speed_mbps / 1000, "Gbps"); + } else if (link_speed_mbps == SPEED_UNKNOWN) { + snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%s", "Unknown Mbps"); + } else { + snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s", + link_speed_mbps, "Mbps"); + } + + netdev_info(netdev, "NIC Link is Up Speed is %s Full Duplex\n", speed); + kfree(speed); +} + +/** + * iavf_get_vpe_link_status + * @adapter: adapter structure + * @vpe: virtchnl_pf_event structure + * + * Helper function for determining the link status + **/ +static bool +iavf_get_vpe_link_status(struct iavf_adapter *adapter, + struct virtchnl_pf_event *vpe) +{ + if (ADV_LINK_SUPPORT(adapter)) + return vpe->event_data.link_event_adv.link_status; + else + return vpe->event_data.link_event.link_status; +} + +/** + * iavf_set_adapter_link_speed_from_vpe + * @adapter: adapter structure for which we are setting the link speed + * @vpe: virtchnl_pf_event structure that contains the link speed we are setting + * + * Helper function for setting iavf_adapter link speed + **/ +static void +iavf_set_adapter_link_speed_from_vpe(struct iavf_adapter *adapter, + struct virtchnl_pf_event *vpe) +{ + if (ADV_LINK_SUPPORT(adapter)) + adapter->link_speed_mbps = + vpe->event_data.link_event_adv.link_speed; + else + adapter->link_speed = vpe->event_data.link_event.link_speed; } /** - * iavf_enable_channel + * iavf_enable_channels * @adapter: adapter structure * * Request that the PF enable channels as specified by @@ -1002,7 +1510,7 @@ void iavf_enable_channels(struct iavf_adapter *adapter) } /** - * iavf_disable_channel + * iavf_disable_channels * @adapter: adapter structure * * Request that the PF disable channels that are configured @@ -1154,16 +1662,228 @@ void iavf_del_cloud_filter(struct iavf_adapter *adapter) } /** + * iavf_add_fdir_filter + * @adapter: the VF adapter structure + * + * Request that the PF add Flow Director filters as specified + * by the user via ethtool. + **/ +void iavf_add_fdir_filter(struct iavf_adapter *adapter) +{ + struct iavf_fdir_fltr *fdir; + struct virtchnl_fdir_add *f; + bool process_fltr = false; + int len; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot add Flow Director filter, command %d pending\n", + adapter->current_op); + return; + } + + len = sizeof(struct virtchnl_fdir_add); + f = kzalloc(len, GFP_KERNEL); + if (!f) + return; + + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry(fdir, &adapter->fdir_list_head, list) { + if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) { + process_fltr = true; + fdir->state = IAVF_FDIR_FLTR_ADD_PENDING; + memcpy(f, &fdir->vc_add_msg, len); + break; + } + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + + if (!process_fltr) { + /* prevent iavf_add_fdir_filter() from being called when there + * are no filters to add + */ + adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_FDIR_FILTER; + kfree(f); + return; + } + adapter->current_op = VIRTCHNL_OP_ADD_FDIR_FILTER; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_FDIR_FILTER, (u8 *)f, len); + kfree(f); +} + +/** + * iavf_del_fdir_filter + * @adapter: the VF adapter structure + * + * Request that the PF delete Flow Director filters as specified + * by the user via ethtool. + **/ +void iavf_del_fdir_filter(struct iavf_adapter *adapter) +{ + struct iavf_fdir_fltr *fdir; + struct virtchnl_fdir_del f; + bool process_fltr = false; + int len; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot remove Flow Director filter, command %d pending\n", + adapter->current_op); + return; + } + + len = sizeof(struct virtchnl_fdir_del); + + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry(fdir, &adapter->fdir_list_head, list) { + if (fdir->state == IAVF_FDIR_FLTR_DEL_REQUEST) { + process_fltr = true; + memset(&f, 0, len); + f.vsi_id = fdir->vc_add_msg.vsi_id; + f.flow_id = fdir->flow_id; + fdir->state = IAVF_FDIR_FLTR_DEL_PENDING; + break; + } + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + + if (!process_fltr) { + adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_FDIR_FILTER; + return; + } + + adapter->current_op = VIRTCHNL_OP_DEL_FDIR_FILTER; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_FDIR_FILTER, (u8 *)&f, len); +} + +/** + * iavf_add_adv_rss_cfg + * @adapter: the VF adapter structure + * + * Request that the PF add RSS configuration as specified + * by the user via ethtool. + **/ +void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter) +{ + struct virtchnl_rss_cfg *rss_cfg; + struct iavf_adv_rss *rss; + bool process_rss = false; + int len; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot add RSS configuration, command %d pending\n", + adapter->current_op); + return; + } + + len = sizeof(struct virtchnl_rss_cfg); + rss_cfg = kzalloc(len, GFP_KERNEL); + if (!rss_cfg) + return; + + spin_lock_bh(&adapter->adv_rss_lock); + list_for_each_entry(rss, &adapter->adv_rss_list_head, list) { + if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) { + process_rss = true; + rss->state = IAVF_ADV_RSS_ADD_PENDING; + memcpy(rss_cfg, &rss->cfg_msg, len); + iavf_print_adv_rss_cfg(adapter, rss, + "Input set change for", + "is pending"); + break; + } + } + spin_unlock_bh(&adapter->adv_rss_lock); + + if (process_rss) { + adapter->current_op = VIRTCHNL_OP_ADD_RSS_CFG; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_RSS_CFG, + (u8 *)rss_cfg, len); + } else { + adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_ADV_RSS_CFG; + } + + kfree(rss_cfg); +} + +/** + * iavf_del_adv_rss_cfg + * @adapter: the VF adapter structure + * + * Request that the PF delete RSS configuration as specified + * by the user via ethtool. + **/ +void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter) +{ + struct virtchnl_rss_cfg *rss_cfg; + struct iavf_adv_rss *rss; + bool process_rss = false; + int len; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot remove RSS configuration, command %d pending\n", + adapter->current_op); + return; + } + + len = sizeof(struct virtchnl_rss_cfg); + rss_cfg = kzalloc(len, GFP_KERNEL); + if (!rss_cfg) + return; + + spin_lock_bh(&adapter->adv_rss_lock); + list_for_each_entry(rss, &adapter->adv_rss_list_head, list) { + if (rss->state == IAVF_ADV_RSS_DEL_REQUEST) { + process_rss = true; + rss->state = IAVF_ADV_RSS_DEL_PENDING; + memcpy(rss_cfg, &rss->cfg_msg, len); + break; + } + } + spin_unlock_bh(&adapter->adv_rss_lock); + + if (process_rss) { + adapter->current_op = VIRTCHNL_OP_DEL_RSS_CFG; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_RSS_CFG, + (u8 *)rss_cfg, len); + } else { + adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_ADV_RSS_CFG; + } + + kfree(rss_cfg); +} + +/** * iavf_request_reset * @adapter: adapter structure * * Request that the PF reset this VF. No response is expected. **/ -void iavf_request_reset(struct iavf_adapter *adapter) +int iavf_request_reset(struct iavf_adapter *adapter) { + int err; /* Don't check CURRENT_OP - this is always higher priority */ - iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0); + err = iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0); adapter->current_op = VIRTCHNL_OP_UNKNOWN; + return err; +} + +/** + * iavf_netdev_features_vlan_strip_set - update vlan strip status + * @netdev: ptr to netdev being adjusted + * @enable: enable or disable vlan strip + * + * Helper function to change vlan strip status in netdev->features. + */ +static void iavf_netdev_features_vlan_strip_set(struct net_device *netdev, + const bool enable) +{ + if (enable) + netdev->features |= NETIF_F_HW_VLAN_CTAG_RX; + else + netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; } /** @@ -1187,12 +1907,11 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, if (v_opcode == VIRTCHNL_OP_EVENT) { struct virtchnl_pf_event *vpe = (struct virtchnl_pf_event *)msg; - bool link_up = vpe->event_data.link_event.link_status; + bool link_up = iavf_get_vpe_link_status(adapter, vpe); switch (vpe->event) { case VIRTCHNL_EVENT_LINK_CHANGE: - adapter->link_speed = - vpe->event_data.link_event.link_speed; + iavf_set_adapter_link_speed_from_vpe(adapter, vpe); /* we've already got the right link status, bail */ if (adapter->link_up == link_up) @@ -1229,7 +1948,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, iavf_print_link_message(adapter); break; case VIRTCHNL_EVENT_RESET_IMPENDING: - dev_info(&adapter->pdev->dev, "Reset warning received from the PF\n"); + dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n"); if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) { adapter->flags |= IAVF_FLAG_RESET_PENDING; dev_info(&adapter->pdev->dev, "Scheduling reset task\n"); @@ -1252,8 +1971,10 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, case VIRTCHNL_OP_ADD_ETH_ADDR: dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n", iavf_stat_str(&adapter->hw, v_retval)); + iavf_mac_add_reject(adapter); /* restore administratively set MAC address */ ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); + wake_up(&adapter->vc_waitqueue); break; case VIRTCHNL_OP_DEL_VLAN: dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n", @@ -1314,6 +2035,99 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, } } break; + case VIRTCHNL_OP_ADD_FDIR_FILTER: { + struct iavf_fdir_fltr *fdir, *fdir_tmp; + + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry_safe(fdir, fdir_tmp, + &adapter->fdir_list_head, + list) { + if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) { + dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter, error %s\n", + iavf_stat_str(&adapter->hw, + v_retval)); + iavf_print_fdir_fltr(adapter, fdir); + if (msglen) + dev_err(&adapter->pdev->dev, + "%s\n", msg); + list_del(&fdir->list); + kfree(fdir); + adapter->fdir_active_fltr--; + } + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + } + break; + case VIRTCHNL_OP_DEL_FDIR_FILTER: { + struct iavf_fdir_fltr *fdir; + + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry(fdir, &adapter->fdir_list_head, + list) { + if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) { + fdir->state = IAVF_FDIR_FLTR_ACTIVE; + dev_info(&adapter->pdev->dev, "Failed to del Flow Director filter, error %s\n", + iavf_stat_str(&adapter->hw, + v_retval)); + iavf_print_fdir_fltr(adapter, fdir); + } + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + } + break; + case VIRTCHNL_OP_ADD_RSS_CFG: { + struct iavf_adv_rss *rss, *rss_tmp; + + spin_lock_bh(&adapter->adv_rss_lock); + list_for_each_entry_safe(rss, rss_tmp, + &adapter->adv_rss_list_head, + list) { + if (rss->state == IAVF_ADV_RSS_ADD_PENDING) { + iavf_print_adv_rss_cfg(adapter, rss, + "Failed to change the input set for", + NULL); + list_del(&rss->list); + kfree(rss); + } + } + spin_unlock_bh(&adapter->adv_rss_lock); + } + break; + case VIRTCHNL_OP_DEL_RSS_CFG: { + struct iavf_adv_rss *rss; + + spin_lock_bh(&adapter->adv_rss_lock); + list_for_each_entry(rss, &adapter->adv_rss_list_head, + list) { + if (rss->state == IAVF_ADV_RSS_DEL_PENDING) { + rss->state = IAVF_ADV_RSS_ACTIVE; + dev_err(&adapter->pdev->dev, "Failed to delete RSS configuration, error %s\n", + iavf_stat_str(&adapter->hw, + v_retval)); + } + } + spin_unlock_bh(&adapter->adv_rss_lock); + } + break; + case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: + dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n"); + /* Vlan stripping could not be enabled by ethtool. + * Disable it in netdev->features. + */ + iavf_netdev_features_vlan_strip_set(netdev, false); + break; + case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: + dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n"); + /* Vlan stripping could not be disabled by ethtool. + * Enable it in netdev->features. + */ + iavf_netdev_features_vlan_strip_set(netdev, true); + break; + case VIRTCHNL_OP_ADD_VLAN_V2: + iavf_vlan_add_reject(adapter); + dev_warn(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n", + iavf_stat_str(&adapter->hw, v_retval)); + break; default: dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n", v_retval, iavf_stat_str(&adapter->hw, v_retval), @@ -1321,10 +2135,17 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, } } switch (v_opcode) { - case VIRTCHNL_OP_ADD_ETH_ADDR: { + case VIRTCHNL_OP_ADD_ETH_ADDR: + if (!v_retval) + iavf_mac_add_ok(adapter); if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr)) - ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); - } + if (!ether_addr_equal(netdev->dev_addr, + adapter->hw.mac.addr)) { + netif_addr_lock_bh(netdev); + eth_hw_addr_set(netdev, adapter->hw.mac.addr); + netif_addr_unlock_bh(netdev); + } + wake_up(&adapter->vc_waitqueue); break; case VIRTCHNL_OP_GET_STATS: { struct iavf_eth_stats *stats = @@ -1354,15 +2175,97 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, /* restore current mac address */ ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); } else { + netif_addr_lock_bh(netdev); /* refresh current mac address if changed */ - ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); + netif_addr_unlock_bh(netdev); } spin_lock_bh(&adapter->mac_vlan_list_lock); iavf_add_filter(adapter, adapter->hw.mac.addr); + + if (VLAN_ALLOWED(adapter)) { + if (!list_empty(&adapter->vlan_filter_list)) { + struct iavf_vlan_filter *vlf; + + /* re-add all VLAN filters over virtchnl */ + list_for_each_entry(vlf, + &adapter->vlan_filter_list, + list) + vlf->add = true; + + adapter->aq_required |= + IAVF_FLAG_AQ_ADD_VLAN_FILTER; + } + } + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + iavf_parse_vf_resource_msg(adapter); + + /* negotiated VIRTCHNL_VF_OFFLOAD_VLAN_V2, so wait for the + * response to VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS to finish + * configuration + */ + if (VLAN_V2_ALLOWED(adapter)) + break; + /* fallthrough and finish config if VIRTCHNL_VF_OFFLOAD_VLAN_V2 + * wasn't successfully negotiated with the PF + */ + } + fallthrough; + case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: { + struct iavf_mac_filter *f; + bool was_mac_changed; + u64 aq_required = 0; + + if (v_opcode == VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS) + memcpy(&adapter->vlan_v2_caps, msg, + min_t(u16, msglen, + sizeof(adapter->vlan_v2_caps))); + iavf_process_config(adapter); + adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES; + was_mac_changed = !ether_addr_equal(netdev->dev_addr, + adapter->hw.mac.addr); + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + /* re-add all MAC filters */ + list_for_each_entry(f, &adapter->mac_filter_list, list) { + if (was_mac_changed && + ether_addr_equal(netdev->dev_addr, f->macaddr)) + ether_addr_copy(f->macaddr, + adapter->hw.mac.addr); + + f->is_new_mac = true; + f->add = true; + f->add_handled = false; + f->remove = false; + } + + /* re-add all VLAN filters */ + if (VLAN_FILTERING_ALLOWED(adapter)) { + struct iavf_vlan_filter *vlf; + + if (!list_empty(&adapter->vlan_filter_list)) { + list_for_each_entry(vlf, + &adapter->vlan_filter_list, + list) + vlf->add = true; + + aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; + } + } + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + netif_addr_lock_bh(netdev); + eth_hw_addr_set(netdev, adapter->hw.mac.addr); + netif_addr_unlock_bh(netdev); + + adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER | + aq_required; } break; case VIRTCHNL_OP_ENABLE_QUEUES: @@ -1374,7 +2277,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, iavf_free_all_tx_resources(adapter); iavf_free_all_rx_resources(adapter); if (adapter->state == __IAVF_DOWN_PENDING) { - adapter->state = __IAVF_DOWN; + iavf_change_state(adapter, __IAVF_DOWN); wake_up(&adapter->down_waitqueue); } break; @@ -1447,6 +2350,121 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, } } break; + case VIRTCHNL_OP_ADD_FDIR_FILTER: { + struct virtchnl_fdir_add *add_fltr = (struct virtchnl_fdir_add *)msg; + struct iavf_fdir_fltr *fdir, *fdir_tmp; + + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry_safe(fdir, fdir_tmp, + &adapter->fdir_list_head, + list) { + if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) { + if (add_fltr->status == VIRTCHNL_FDIR_SUCCESS) { + dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n", + fdir->loc); + fdir->state = IAVF_FDIR_FLTR_ACTIVE; + fdir->flow_id = add_fltr->flow_id; + } else { + dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter with status: %d\n", + add_fltr->status); + iavf_print_fdir_fltr(adapter, fdir); + list_del(&fdir->list); + kfree(fdir); + adapter->fdir_active_fltr--; + } + } + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + } + break; + case VIRTCHNL_OP_DEL_FDIR_FILTER: { + struct virtchnl_fdir_del *del_fltr = (struct virtchnl_fdir_del *)msg; + struct iavf_fdir_fltr *fdir, *fdir_tmp; + + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head, + list) { + if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) { + if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS) { + dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n", + fdir->loc); + list_del(&fdir->list); + kfree(fdir); + adapter->fdir_active_fltr--; + } else { + fdir->state = IAVF_FDIR_FLTR_ACTIVE; + dev_info(&adapter->pdev->dev, "Failed to delete Flow Director filter with status: %d\n", + del_fltr->status); + iavf_print_fdir_fltr(adapter, fdir); + } + } + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + } + break; + case VIRTCHNL_OP_ADD_RSS_CFG: { + struct iavf_adv_rss *rss; + + spin_lock_bh(&adapter->adv_rss_lock); + list_for_each_entry(rss, &adapter->adv_rss_list_head, list) { + if (rss->state == IAVF_ADV_RSS_ADD_PENDING) { + iavf_print_adv_rss_cfg(adapter, rss, + "Input set change for", + "successful"); + rss->state = IAVF_ADV_RSS_ACTIVE; + } + } + spin_unlock_bh(&adapter->adv_rss_lock); + } + break; + case VIRTCHNL_OP_DEL_RSS_CFG: { + struct iavf_adv_rss *rss, *rss_tmp; + + spin_lock_bh(&adapter->adv_rss_lock); + list_for_each_entry_safe(rss, rss_tmp, + &adapter->adv_rss_list_head, list) { + if (rss->state == IAVF_ADV_RSS_DEL_PENDING) { + list_del(&rss->list); + kfree(rss); + } + } + spin_unlock_bh(&adapter->adv_rss_lock); + } + break; + case VIRTCHNL_OP_ADD_VLAN_V2: { + struct iavf_vlan_filter *f; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + if (f->is_new_vlan) { + f->is_new_vlan = false; + if (!f->vlan.vid) + continue; + if (f->vlan.tpid == ETH_P_8021Q) + set_bit(f->vlan.vid, + adapter->vsi.active_cvlans); + else + set_bit(f->vlan.vid, + adapter->vsi.active_svlans); + } + } + spin_unlock_bh(&adapter->mac_vlan_list_lock); + } + break; + case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: + /* PF enabled vlan strip on this VF. + * Update netdev->features if needed to be in sync with ethtool. + */ + if (!v_retval) + iavf_netdev_features_vlan_strip_set(netdev, true); + break; + case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: + /* PF disabled vlan strip on this VF. + * Update netdev->features if needed to be in sync with ethtool. + */ + if (!v_retval) + iavf_netdev_features_vlan_strip_set(netdev, false); + break; default: if (adapter->current_op && (v_opcode != adapter->current_op)) dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n", |