diff options
Diffstat (limited to 'drivers/net/ethernet/intel')
30 files changed, 3406 insertions, 748 deletions
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 0fd268070fb4..a65d5a9ba7db 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -2797,7 +2797,7 @@ static int e100_set_features(struct net_device *netdev, netdev->features = features; e100_exec_cb(nic, NULL, e100_configure); - return 0; + return 1; } static const struct net_device_ops e100_netdev_ops = { diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 8fe9af0e2ab7..a7c76732849f 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -820,7 +820,7 @@ static int e1000_set_features(struct net_device *netdev, else e1000_reset(adapter); - return 0; + return 1; } static const struct net_device_ops e1000_netdev_ops = { diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 7acc61e4f645..745c1242a2d9 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -7003,7 +7003,7 @@ static int e1000_set_features(struct net_device *netdev, else e1000e_reset(adapter); - return 0; + return 1; } static const struct net_device_ops e1000e_netdev_ops = { @@ -7350,7 +7350,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP); - if (pci_dev_run_wake(pdev)) + if (pci_dev_run_wake(pdev) && hw->mac.type < e1000_pch_cnp) pm_runtime_put_noidle(&pdev->dev); return 0; diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 89440775aea1..b819689da7e2 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -42,10 +42,21 @@ extern const char ice_drv_ver[]; #define ICE_BAR0 0 -#define ICE_DFLT_NUM_DESC 128 #define ICE_REQ_DESC_MULTIPLE 32 #define ICE_MIN_NUM_DESC ICE_REQ_DESC_MULTIPLE #define ICE_MAX_NUM_DESC 8160 +/* set default number of Rx/Tx descriptors to the minimum between + * ICE_MAX_NUM_DESC and the number of descriptors to fill up an entire page + */ +#define ICE_DFLT_NUM_RX_DESC min_t(u16, ICE_MAX_NUM_DESC, \ + ALIGN(PAGE_SIZE / \ + sizeof(union ice_32byte_rx_desc), \ + ICE_REQ_DESC_MULTIPLE)) +#define ICE_DFLT_NUM_TX_DESC min_t(u16, ICE_MAX_NUM_DESC, \ + ALIGN(PAGE_SIZE / \ + sizeof(struct ice_tx_desc), \ + ICE_REQ_DESC_MULTIPLE)) + #define ICE_DFLT_TRAFFIC_CLASS BIT(0) #define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16) #define ICE_ETHTOOL_FWVER_LEN 32 @@ -114,6 +125,23 @@ extern const char ice_drv_ver[]; #define ice_for_each_q_vector(vsi, i) \ for ((i) = 0; (i) < (vsi)->num_q_vectors; (i)++) +#define ICE_UCAST_PROMISC_BITS (ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX | \ + ICE_PROMISC_UCAST_RX | ICE_PROMISC_MCAST_RX) + +#define ICE_UCAST_VLAN_PROMISC_BITS (ICE_PROMISC_UCAST_TX | \ + ICE_PROMISC_MCAST_TX | \ + ICE_PROMISC_UCAST_RX | \ + ICE_PROMISC_MCAST_RX | \ + ICE_PROMISC_VLAN_TX | \ + ICE_PROMISC_VLAN_RX) + +#define ICE_MCAST_PROMISC_BITS (ICE_PROMISC_MCAST_TX | ICE_PROMISC_MCAST_RX) + +#define ICE_MCAST_VLAN_PROMISC_BITS (ICE_PROMISC_MCAST_TX | \ + ICE_PROMISC_MCAST_RX | \ + ICE_PROMISC_VLAN_TX | \ + ICE_PROMISC_VLAN_RX) + struct ice_tc_info { u16 qoffset; u16 qcount_tx; @@ -247,6 +275,7 @@ struct ice_vsi { u8 irqs_ready; u8 current_isup; /* Sync 'link up' logging */ u8 stat_offsets_loaded; + u8 vlan_ena; /* queue information */ u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ @@ -257,26 +286,33 @@ struct ice_vsi { u16 num_txq; /* Used Tx queues */ u16 alloc_rxq; /* Allocated Rx queues */ u16 num_rxq; /* Used Rx queues */ - u16 num_desc; + u16 num_rx_desc; + u16 num_tx_desc; struct ice_tc_cfg tc_cfg; } ____cacheline_internodealigned_in_smp; /* struct that defines an interrupt vector */ struct ice_q_vector { struct ice_vsi *vsi; - cpumask_t affinity_mask; - struct napi_struct napi; - struct ice_ring_container rx; - struct ice_ring_container tx; - struct irq_affinity_notify affinity_notify; + u16 v_idx; /* index in the vsi->q_vector array. */ - u8 num_ring_tx; /* total number of Tx rings in vector */ u8 num_ring_rx; /* total number of Rx rings in vector */ - char name[ICE_INT_NAME_STR_LEN]; + u8 num_ring_tx; /* total number of Tx rings in vector */ + u8 itr_countdown; /* when 0 should adjust adaptive ITR */ /* in usecs, need to use ice_intrl_to_usecs_reg() before writing this * value to the device */ u8 intrl; + + struct napi_struct napi; + + struct ice_ring_container rx; + struct ice_ring_container tx; + + cpumask_t affinity_mask; + struct irq_affinity_notify affinity_notify; + + char name[ICE_INT_NAME_STR_LEN]; } ____cacheline_internodealigned_in_smp; enum ice_pf_flags { @@ -355,8 +391,9 @@ struct ice_netdev_priv { * @vsi: pointer to vsi struct, can be NULL * @q_vector: pointer to q_vector, can be NULL */ -static inline void ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi, - struct ice_q_vector *q_vector) +static inline void +ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi, + struct ice_q_vector *q_vector) { u32 vector = (vsi && q_vector) ? vsi->hw_base_vector + q_vector->v_idx : ((struct ice_pf *)hw->back)->hw_oicr_idx; diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 242c78469181..8ff438968199 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -953,8 +953,9 @@ struct ice_aqc_set_phy_cfg_data { __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */ __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */ u8 caps; -#define ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY BIT(0) -#define ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY BIT(1) +#define ICE_AQ_PHY_ENA_VALID_MASK ICE_M(0xef, 0) +#define ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY BIT(0) +#define ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY BIT(1) #define ICE_AQ_PHY_ENA_LOW_POWER BIT(2) #define ICE_AQ_PHY_ENA_LINK BIT(3) #define ICE_AQ_PHY_ENA_AUTO_LINK_UPDT BIT(5) diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 63f003441300..5e7a31421c0d 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -262,7 +262,7 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) * * Get Link Status (0x607). Returns the link status of the adapter. */ -static enum ice_status +enum ice_status ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, struct ice_link_status *link, struct ice_sq_cd *cd) { @@ -331,7 +331,7 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, /* flag cleared so calling functions don't call AQ again */ pi->phy.get_link_info = false; - return status; + return 0; } /** @@ -358,22 +358,22 @@ static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id) */ case ICE_RXDID_FLEX_NIC: case ICE_RXDID_FLEX_NIC_2: - ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_FRG, - ICE_RXFLG_UDP_GRE, ICE_RXFLG_PKT_DSI, - ICE_RXFLG_FIN, idx++); + ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_FRG, + ICE_FLG_UDP_GRE, ICE_FLG_PKT_DSI, + ICE_FLG_FIN, idx++); /* flex flag 1 is not used for flexi-flag programming, skipping * these four FLG64 bits. */ - ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_SYN, ICE_RXFLG_RST, - ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++); - ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_DSI, - ICE_RXFLG_PKT_DSI, ICE_RXFLG_EVLAN_x8100, - ICE_RXFLG_EVLAN_x9100, idx++); - ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_VLAN_x8100, - ICE_RXFLG_TNL_VLAN, ICE_RXFLG_TNL_MAC, - ICE_RXFLG_TNL0, idx++); - ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2, - ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx); + ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_SYN, ICE_FLG_RST, + ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx++); + ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_DSI, + ICE_FLG_PKT_DSI, ICE_FLG_EVLAN_x8100, + ICE_FLG_EVLAN_x9100, idx++); + ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_VLAN_x8100, + ICE_FLG_TNL_VLAN, ICE_FLG_TNL_MAC, + ICE_FLG_TNL0, idx++); + ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_TNL1, ICE_FLG_TNL2, + ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx); break; default: @@ -1100,8 +1100,9 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = { * * Dumps debug log about control command with descriptor contents. */ -void ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc, - void *buf, u16 buf_len) +void +ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc, void *buf, + u16 buf_len) { struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc; u16 len; @@ -1415,13 +1416,15 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) } /** - * ice_get_guar_num_vsi - determine number of guar VSI for a PF + * ice_get_num_per_func - determine number of resources per PF * @hw: pointer to the hw structure + * @max: value to be evenly split between each PF * * Determine the number of valid functions by going through the bitmap returned - * from parsing capabilities and use this to calculate the number of VSI per PF. + * from parsing capabilities and use this to calculate the number of resources + * per PF based on the max value passed in. */ -static u32 ice_get_guar_num_vsi(struct ice_hw *hw) +static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max) { u8 funcs; @@ -1432,7 +1435,7 @@ static u32 ice_get_guar_num_vsi(struct ice_hw *hw) if (!funcs) return 0; - return ICE_MAX_VSI / funcs; + return max / funcs; } /** @@ -1512,7 +1515,8 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, "HW caps: Dev.VSI cnt = %d\n", dev_p->num_vsi_allocd_to_host); } else if (func_p) { - func_p->guar_num_vsi = ice_get_guar_num_vsi(hw); + func_p->guar_num_vsi = + ice_get_num_per_func(hw, ICE_MAX_VSI); ice_debug(hw, ICE_DBG_INIT, "HW caps: Func.VSI cnt = %d\n", number); @@ -1617,8 +1621,8 @@ ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, * @hw: pointer to the hardware structure * @opc: capabilities type to discover - pass in the command opcode */ -static enum ice_status ice_discover_caps(struct ice_hw *hw, - enum ice_adminq_opc opc) +static enum ice_status +ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc) { enum ice_status status; u32 cap_count; @@ -1929,6 +1933,15 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport, if (!cfg) return ICE_ERR_PARAM; + /* Ensure that only valid bits of cfg->caps can be turned on. */ + if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) { + ice_debug(hw, ICE_DBG_PHY, + "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n", + cfg->caps); + + cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK; + } + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg); desc.params.set_phy.lport_num = lport; desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); @@ -2027,8 +2040,10 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) /* clear the old pause settings */ cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | ICE_AQC_PHY_EN_RX_LINK_PAUSE); + /* set the new capabilities */ cfg.caps |= pause_mask; + /* If the capabilities have changed, then set the new config */ if (cfg.caps != pcaps->caps) { int retry_count, retry_max = 10; @@ -2136,6 +2151,32 @@ ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, } /** + * ice_aq_set_event_mask + * @hw: pointer to the HW struct + * @port_num: port number of the physical function + * @mask: event mask to be set + * @cd: pointer to command details structure or NULL + * + * Set event mask (0x0613) + */ +enum ice_status +ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, + struct ice_sq_cd *cd) +{ + struct ice_aqc_set_event_mask *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.set_event_mask; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask); + + cmd->lport_num = port_num; + + cmd->event_mask = cpu_to_le16(mask); + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); +} + +/** * ice_aq_set_port_id_led * @pi: pointer to the port information * @is_orig_mode: is this LED set to original mode (by the net-list) @@ -2534,8 +2575,8 @@ do_aq: * @dest_ctx: the context to be written to * @ce_info: a description of the struct to be filled */ -static void ice_write_byte(u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info) +static void +ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) { u8 src_byte, dest_byte, mask; u8 *from, *dest; @@ -2573,8 +2614,8 @@ static void ice_write_byte(u8 *src_ctx, u8 *dest_ctx, * @dest_ctx: the context to be written to * @ce_info: a description of the struct to be filled */ -static void ice_write_word(u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info) +static void +ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) { u16 src_word, mask; __le16 dest_word; @@ -2616,8 +2657,8 @@ static void ice_write_word(u8 *src_ctx, u8 *dest_ctx, * @dest_ctx: the context to be written to * @ce_info: a description of the struct to be filled */ -static void ice_write_dword(u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info) +static void +ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) { u32 src_dword, mask; __le32 dest_dword; @@ -2667,8 +2708,8 @@ static void ice_write_dword(u8 *src_ctx, u8 *dest_ctx, * @dest_ctx: the context to be written to * @ce_info: a description of the struct to be filled */ -static void ice_write_qword(u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info) +static void +ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) { u64 src_qword, mask; __le64 dest_qword; @@ -2908,7 +2949,7 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, mutex_lock(&pi->sched_lock); - for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { + ice_for_each_traffic_class(i) { /* configuration is possible only if TC node is present */ if (!ice_sched_get_tc_node(pi, i)) continue; @@ -3012,8 +3053,9 @@ void ice_replay_post(struct ice_hw *hw) * @prev_stat: ptr to previous loaded stat value * @cur_stat: ptr to current stat value */ -void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg, - bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat) +void +ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg, + bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat) { u64 new_data; @@ -3043,8 +3085,9 @@ void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg, * @prev_stat: ptr to previous loaded stat value * @cur_stat: ptr to current stat value */ -void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, - u64 *prev_stat, u64 *cur_stat) +void +ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, + u64 *prev_stat, u64 *cur_stat) { u32 new_data; diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index d7c7c2ed8823..fbdfdee353bc 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -9,8 +9,8 @@ #include "ice_switch.h" #include <linux/avf/virtchnl.h> -void ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf, - u16 buf_len); +void +ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf, u16 buf_len); enum ice_status ice_init_hw(struct ice_hw *hw); void ice_deinit_hw(struct ice_hw *hw); enum ice_status ice_check_reset(struct ice_hw *hw); @@ -28,8 +28,8 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, enum ice_aq_res_access_type access, u32 timeout); void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res); enum ice_status ice_init_nvm(struct ice_hw *hw); -enum ice_status ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, - u16 *data); +enum ice_status +ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data); enum ice_status ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_aq_desc *desc, void *buf, u16 buf_size, @@ -89,6 +89,12 @@ enum ice_status ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, struct ice_sq_cd *cd); enum ice_status +ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, + struct ice_link_status *link, struct ice_sq_cd *cd); +enum ice_status +ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, + struct ice_sq_cd *cd); +enum ice_status ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, struct ice_sq_cd *cd); @@ -106,8 +112,10 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps, enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle); void ice_replay_post(struct ice_hw *hw); void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf); -void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg, - bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); -void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, - u64 *prev_stat, u64 *cur_stat); +void +ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg, + bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); +void +ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, + u64 *prev_stat, u64 *cur_stat); #endif /* _ICE_COMMON_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index eb8d149e317c..4a1920e8f168 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -1156,8 +1156,9 @@ ice_get_settings_link_down(struct ethtool_link_ksettings *ks, * * Reports speed/duplex settings based on media_type */ -static int ice_get_link_ksettings(struct net_device *netdev, - struct ethtool_link_ksettings *ks) +static int +ice_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *ks) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_link_status *hw_link_info; @@ -1400,13 +1401,12 @@ ice_set_link_ksettings(struct net_device *netdev, return -EOPNOTSUPP; /* Check if this is lan vsi */ - for (idx = 0 ; idx < pf->num_alloc_vsi ; idx++) { + ice_for_each_vsi(pf, idx) if (pf->vsi[idx]->type == ICE_VSI_PF) { if (np->vsi != pf->vsi[idx]) return -EOPNOTSUPP; break; } - } if (p->phy.media_type != ICE_MEDIA_BASET && p->phy.media_type != ICE_MEDIA_FIBER && @@ -1566,8 +1566,9 @@ done: * * Returns Success if the command is supported. */ -static int ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, - u32 __always_unused *rule_locs) +static int +ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, + u32 __always_unused *rule_locs) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; @@ -2024,8 +2025,9 @@ out: * Returns -EINVAL if the table specifies an invalid queue id, otherwise * returns 0 after programming the table. */ -static int ice_set_rxfh(struct net_device *netdev, const u32 *indir, - const u8 *key, const u8 hfunc) +static int +ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, + const u8 hfunc) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; @@ -2180,8 +2182,9 @@ ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) return __ice_get_coalesce(netdev, ec, -1); } -static int ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num, - struct ethtool_coalesce *ec) +static int +ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num, + struct ethtool_coalesce *ec) { return __ice_get_coalesce(netdev, ec, q_num); } @@ -2325,8 +2328,9 @@ ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) return __ice_set_coalesce(netdev, ec, -1); } -static int ice_set_per_q_coalesce(struct net_device *netdev, u32 q_num, - struct ethtool_coalesce *ec) +static int +ice_set_per_q_coalesce(struct net_device *netdev, u32 q_num, + struct ethtool_coalesce *ec) { return __ice_set_coalesce(netdev, ec, q_num); } diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 6bf5cc064270..af6f32358363 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -106,6 +106,16 @@ #define VPGEN_VFRTRIG_VFSWR_M BIT(0) #define PFHMC_ERRORDATA 0x00520500 #define PFHMC_ERRORINFO 0x00520400 +#define GLINT_CTL 0x0016CC54 +#define GLINT_CTL_DIS_AUTOMASK_M BIT(0) +#define GLINT_CTL_ITR_GRAN_200_S 16 +#define GLINT_CTL_ITR_GRAN_200_M ICE_M(0xF, 16) +#define GLINT_CTL_ITR_GRAN_100_S 20 +#define GLINT_CTL_ITR_GRAN_100_M ICE_M(0xF, 20) +#define GLINT_CTL_ITR_GRAN_50_S 24 +#define GLINT_CTL_ITR_GRAN_50_M ICE_M(0xF, 24) +#define GLINT_CTL_ITR_GRAN_25_S 28 +#define GLINT_CTL_ITR_GRAN_25_M ICE_M(0xF, 28) #define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4)) #define GLINT_DYN_CTL_INTENA_M BIT(0) #define GLINT_DYN_CTL_CLEARPBA_M BIT(1) @@ -168,6 +178,8 @@ #define VPINT_ALLOC_PCI_LAST_S 12 #define VPINT_ALLOC_PCI_LAST_M ICE_M(0x7FF, 12) #define VPINT_ALLOC_PCI_VALID_M BIT(31) +#define VPINT_MBX_CTL(_VSI) (0x0016A000 + ((_VSI) * 4)) +#define VPINT_MBX_CTL_CAUSE_ENA_M BIT(30) #define GLLAN_RCTL_0 0x002941F8 #define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4)) #define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4)) diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index ef4c79b5aa32..a8c3fe87d7aa 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -208,23 +208,23 @@ enum ice_flex_rx_mdid { ICE_RX_MDID_HASH_HIGH, }; -/* Rx Flag64 packet flag bits */ -enum ice_rx_flg64_bits { - ICE_RXFLG_PKT_DSI = 0, - ICE_RXFLG_EVLAN_x8100 = 15, - ICE_RXFLG_EVLAN_x9100, - ICE_RXFLG_VLAN_x8100, - ICE_RXFLG_TNL_MAC = 22, - ICE_RXFLG_TNL_VLAN, - ICE_RXFLG_PKT_FRG, - ICE_RXFLG_FIN = 32, - ICE_RXFLG_SYN, - ICE_RXFLG_RST, - ICE_RXFLG_TNL0 = 38, - ICE_RXFLG_TNL1, - ICE_RXFLG_TNL2, - ICE_RXFLG_UDP_GRE, - ICE_RXFLG_RSVD = 63 +/* RX/TX Flag64 packet flag bits */ +enum ice_flg64_bits { + ICE_FLG_PKT_DSI = 0, + ICE_FLG_EVLAN_x8100 = 15, + ICE_FLG_EVLAN_x9100, + ICE_FLG_VLAN_x8100, + ICE_FLG_TNL_MAC = 22, + ICE_FLG_TNL_VLAN, + ICE_FLG_PKT_FRG, + ICE_FLG_FIN = 32, + ICE_FLG_SYN, + ICE_FLG_RST, + ICE_FLG_TNL0 = 38, + ICE_FLG_TNL1, + ICE_FLG_TNL2, + ICE_FLG_UDP_GRE, + ICE_FLG_RSVD = 63 }; /* for ice_32byte_rx_flex_desc.ptype_flexi_flags0 member */ @@ -342,12 +342,12 @@ enum ice_tx_desc_cmd_bits { ICE_TX_DESC_CMD_EOP = 0x0001, ICE_TX_DESC_CMD_RS = 0x0002, ICE_TX_DESC_CMD_IL2TAG1 = 0x0008, - ICE_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */ - ICE_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */ - ICE_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */ - ICE_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */ - ICE_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */ - ICE_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */ + ICE_TX_DESC_CMD_IIPT_IPV6 = 0x0020, + ICE_TX_DESC_CMD_IIPT_IPV4 = 0x0040, + ICE_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, + ICE_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, + ICE_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, + ICE_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, }; #define ICE_TXD_QW1_OFFSET_S 16 diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index fa61203bee26..45e361f72057 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -175,17 +175,14 @@ static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena) int i; for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) { - u32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q)); - - if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) - break; + if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) & + QRX_CTRL_QENA_STAT_M)) + return 0; usleep_range(20, 40); } - if (i >= ICE_Q_WAIT_MAX_RETRY) - return -ETIMEDOUT; - return 0; + return -ETIMEDOUT; } /** @@ -279,25 +276,50 @@ err_txrings: } /** - * ice_vsi_set_num_qs - Set num queues, descriptors and vectors for a VSI + * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI * @vsi: the VSI being configured + */ +static void ice_vsi_set_num_desc(struct ice_vsi *vsi) +{ + switch (vsi->type) { + case ICE_VSI_PF: + vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC; + vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC; + break; + default: + dev_dbg(&vsi->back->pdev->dev, + "Not setting number of Tx/Rx descriptors for VSI type %d\n", + vsi->type); + break; + } +} + +/** + * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI + * @vsi: the VSI being configured + * @vf_id: Id of the VF being configured * * Return 0 on success and a negative value on error */ -static void ice_vsi_set_num_qs(struct ice_vsi *vsi) +static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) { struct ice_pf *pf = vsi->back; + struct ice_vf *vf = NULL; + + if (vsi->type == ICE_VSI_VF) + vsi->vf_id = vf_id; + switch (vsi->type) { case ICE_VSI_PF: vsi->alloc_txq = pf->num_lan_tx; vsi->alloc_rxq = pf->num_lan_rx; - vsi->num_desc = ALIGN(ICE_DFLT_NUM_DESC, ICE_REQ_DESC_MULTIPLE); vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx); break; case ICE_VSI_VF: - vsi->alloc_txq = pf->num_vf_qps; - vsi->alloc_rxq = pf->num_vf_qps; + vf = &pf->vf[vsi->vf_id]; + vsi->alloc_txq = vf->num_vf_qs; + vsi->alloc_rxq = vf->num_vf_qs; /* pf->num_vf_msix includes (VF miscellaneous vector + * data queue interrupts). Since vsi->num_q_vectors is number * of queues vectors, subtract 1 from the original vector @@ -310,6 +332,8 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi) vsi->type); break; } + + ice_vsi_set_num_desc(vsi); } /** @@ -455,10 +479,12 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data) * ice_vsi_alloc - Allocates the next available struct VSI in the PF * @pf: board private structure * @type: type of VSI + * @vf_id: Id of the VF being configured * * returns a pointer to a VSI on success, NULL on failure. */ -static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type) +static struct ice_vsi * +ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id) { struct ice_vsi *vsi = NULL; @@ -484,7 +510,10 @@ static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type) vsi->idx = pf->next_vsi; vsi->work_lmt = ICE_DFLT_IRQ_WORK; - ice_vsi_set_num_qs(vsi); + if (type == ICE_VSI_VF) + ice_vsi_set_num_qs(vsi, vf_id); + else + ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID); switch (vsi->type) { case ICE_VSI_PF: @@ -579,11 +608,10 @@ err_scatter: /** * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI - * @qs_cfg: gathered variables needed for PF->VSI queues assignment + * @qs_cfg: gathered variables needed for pf->vsi queues assignment * - * This is an internal function for assigning queues from the PF to VSI and - * initially tries to find contiguous space. If it is not successful to find - * contiguous space, then it tries with the scatter approach. + * This function first tries to find contiguous space. If it is not successful, + * it tries with the scatter approach. * * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap */ @@ -827,7 +855,7 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) /* find the (rounded up) power-of-2 of qcount */ pow = order_base_2(qcount_rx); - for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { + ice_for_each_traffic_class(i) { if (!(vsi->tc_cfg.ena_tc & BIT(i))) { /* TC is not enabled */ vsi->tc_cfg.tc_info[i].qoffset = 0; @@ -852,7 +880,18 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) tx_count += tx_numq_tc; ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); } - vsi->num_rxq = offset; + + /* if offset is non-zero, means it is calculated correctly based on + * enabled TCs for a given VSI otherwise qcount_rx will always + * be correct and non-zero because it is based off - VSI's + * allocated Rx queues which is at least 1 (hence qcount_tx will be + * at least 1) + */ + if (offset) + vsi->num_rxq = offset; + else + vsi->num_rxq = qcount_rx; + vsi->num_txq = tx_count; if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) { @@ -923,6 +962,7 @@ static int ice_vsi_init(struct ice_vsi *vsi) if (!ctxt) return -ENOMEM; + ctxt->info = vsi->info; switch (vsi->type) { case ICE_VSI_PF: ctxt->flags = ICE_AQ_VSI_TYPE_PF; @@ -948,6 +988,14 @@ static int ice_vsi_init(struct ice_vsi *vsi) ctxt->info.sw_id = vsi->port_info->sw_id; ice_vsi_setup_q_map(vsi, ctxt); + /* Enable MAC Antispoof with new VSI being initialized or updated */ + if (vsi->type == ICE_VSI_VF && pf->vf[vsi->vf_id].spoofchk) { + ctxt->info.valid_sections |= + cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); + ctxt->info.sec_flags |= + ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF; + } + ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL); if (ret) { dev_err(&pf->pdev->dev, @@ -1215,7 +1263,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ring->ring_active = false; ring->vsi = vsi; ring->dev = &pf->pdev->dev; - ring->count = vsi->num_desc; + ring->count = vsi->num_tx_desc; vsi->tx_rings[i] = ring; } @@ -1234,7 +1282,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ring->vsi = vsi; ring->netdev = vsi->netdev; ring->dev = &pf->pdev->dev; - ring->count = vsi->num_desc; + ring->count = vsi->num_rx_desc; vsi->rx_rings[i] = ring; } @@ -1640,7 +1688,7 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset) num_q_grps = 1; /* set up and configure the Tx queues for each enabled TC */ - for (tc = 0; tc < ICE_MAX_TRAFFIC_CLASS; tc++) { + ice_for_each_traffic_class(tc) { if (!(vsi->tc_cfg.ena_tc & BIT(tc))) break; @@ -1717,6 +1765,37 @@ static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran) } /** + * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set + * @hw: board specific structure + */ +static void ice_cfg_itr_gran(struct ice_hw *hw) +{ + u32 regval = rd32(hw, GLINT_CTL); + + /* no need to update global register if ITR gran is already set */ + if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) && + (((regval & GLINT_CTL_ITR_GRAN_200_M) >> + GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) && + (((regval & GLINT_CTL_ITR_GRAN_100_M) >> + GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) && + (((regval & GLINT_CTL_ITR_GRAN_50_M) >> + GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) && + (((regval & GLINT_CTL_ITR_GRAN_25_M) >> + GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US)) + return; + + regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) & + GLINT_CTL_ITR_GRAN_200_M) | + ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) & + GLINT_CTL_ITR_GRAN_100_M) | + ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) & + GLINT_CTL_ITR_GRAN_50_M) | + ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) & + GLINT_CTL_ITR_GRAN_25_M); + wr32(hw, GLINT_CTL, regval); +} + +/** * ice_cfg_itr - configure the initial interrupt throttle values * @hw: pointer to the HW structure * @q_vector: interrupt vector that's being configured @@ -1728,6 +1807,8 @@ static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran) static void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector) { + ice_cfg_itr_gran(hw); + if (q_vector->num_ring_rx) { struct ice_ring_container *rc = &q_vector->rx; @@ -1738,7 +1819,6 @@ ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector) rc->target_itr = ITR_TO_REG(rc->itr_setting); rc->next_update = jiffies + 1; rc->current_itr = rc->target_itr; - rc->latency_range = ICE_LOW_LATENCY; wr32(hw, GLINT_ITR(rc->itr_idx, vector), ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S); } @@ -1753,7 +1833,6 @@ ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector) rc->target_itr = ITR_TO_REG(rc->itr_setting); rc->next_update = jiffies + 1; rc->current_itr = rc->target_itr; - rc->latency_range = ICE_LOW_LATENCY; wr32(hw, GLINT_ITR(rc->itr_idx, vector), ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S); } @@ -2025,8 +2104,9 @@ err_alloc_q_ids: * @rst_src: reset source * @rel_vmvf_num: Relative id of VF/VM */ -int ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, - enum ice_disq_rst_src rst_src, u16 rel_vmvf_num) +int +ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, + u16 rel_vmvf_num) { return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, 0); @@ -2036,10 +2116,11 @@ int ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI * @vsi: VSI to enable or disable VLAN pruning on * @ena: set to true to enable VLAN pruning and false to disable it + * @vlan_promisc: enable valid security flags if not in VLAN promiscuous mode * * returns 0 if VSI is updated, negative otherwise */ -int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena) +int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc) { struct ice_vsi_ctx *ctxt; struct device *dev; @@ -2067,8 +2148,10 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena) ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; } - ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID | - ICE_AQ_VSI_PROP_SW_VALID); + if (!vlan_promisc) + ctxt->info.valid_sections = + cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID | + ICE_AQ_VSI_PROP_SW_VALID); status = ice_update_vsi(&vsi->back->hw, vsi->idx, ctxt, NULL); if (status) { @@ -2112,7 +2195,11 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, struct ice_vsi *vsi; int ret, i; - vsi = ice_vsi_alloc(pf, type); + if (type == ICE_VSI_VF) + vsi = ice_vsi_alloc(pf, type, vf_id); + else + vsi = ice_vsi_alloc(pf, type, ICE_INVAL_VFID); + if (!vsi) { dev_err(dev, "could not allocate VSI\n"); return NULL; @@ -2596,6 +2683,7 @@ int ice_vsi_release(struct ice_vsi *vsi) int ice_vsi_rebuild(struct ice_vsi *vsi) { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; + struct ice_vf *vf = NULL; struct ice_pf *pf; int ret, i; @@ -2603,16 +2691,38 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) return -EINVAL; pf = vsi->back; + if (vsi->type == ICE_VSI_VF) + vf = &pf->vf[vsi->vf_id]; + ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); ice_vsi_free_q_vectors(vsi); - ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx); - ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx); - vsi->sw_base_vector = 0; + + if (vsi->type != ICE_VSI_VF) { + /* reclaim SW interrupts back to the common pool */ + ice_free_res(pf->sw_irq_tracker, vsi->sw_base_vector, vsi->idx); + pf->num_avail_sw_msix += vsi->num_q_vectors; + vsi->sw_base_vector = 0; + /* reclaim HW interrupts back to the common pool */ + ice_free_res(pf->hw_irq_tracker, vsi->hw_base_vector, + vsi->idx); + pf->num_avail_hw_msix += vsi->num_q_vectors; + } else { + /* Reclaim VF resources back to the common pool for reset and + * and rebuild, with vector reassignment + */ + ice_free_res(pf->hw_irq_tracker, vf->first_vector_idx, + vsi->idx); + pf->num_avail_hw_msix += pf->num_vf_msix; + } vsi->hw_base_vector = 0; + ice_vsi_clear_rings(vsi); ice_vsi_free_arrays(vsi, false); ice_dev_onetime_setup(&vsi->back->hw); - ice_vsi_set_num_qs(vsi); + if (vsi->type == ICE_VSI_VF) + ice_vsi_set_num_qs(vsi, vf->vf_id); + else + ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID); ice_vsi_set_tc_cfg(vsi); /* Initialize VSI struct elements and create VSI in FW */ diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 7988a53729a9..519ef59e9e43 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -35,7 +35,7 @@ int ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, u16 rel_vmvf_num); -int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena); +int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc); void ice_vsi_delete(struct ice_vsi *vsi); @@ -70,8 +70,6 @@ void ice_vsi_free_rx_rings(struct ice_vsi *vsi); void ice_vsi_free_tx_rings(struct ice_vsi *vsi); -int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc); - int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena); #endif /* !_ICE_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 47cc3f905b7f..f7073e046979 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -168,6 +168,39 @@ static bool ice_vsi_fltr_changed(struct ice_vsi *vsi) } /** + * ice_cfg_promisc - Enable or disable promiscuous mode for a given PF + * @vsi: the VSI being configured + * @promisc_m: mask of promiscuous config bits + * @set_promisc: enable or disable promisc flag request + * + */ +static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc) +{ + struct ice_hw *hw = &vsi->back->hw; + enum ice_status status = 0; + + if (vsi->type != ICE_VSI_PF) + return 0; + + if (vsi->vlan_ena) { + status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m, + set_promisc); + } else { + if (set_promisc) + status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m, + 0); + else + status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m, + 0); + } + + if (status) + return -EIO; + + return 0; +} + +/** * ice_vsi_sync_fltr - Update the VSI filter list to the HW * @vsi: ptr to the VSI * @@ -182,6 +215,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) struct ice_hw *hw = &pf->hw; enum ice_status status = 0; u32 changed_flags = 0; + u8 promisc_m; int err = 0; if (!vsi->netdev) @@ -226,7 +260,11 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) /* Add mac addresses in the sync list */ status = ice_add_mac(hw, &vsi->tmp_sync_list); ice_free_fltr_list(dev, &vsi->tmp_sync_list); - if (status) { + /* If filter is added successfully or already exists, do not go into + * 'if' condition and report it as error. Instead continue processing + * rest of the function. + */ + if (status && status != ICE_ERR_ALREADY_EXISTS) { netdev_err(netdev, "Failed to add MAC filters\n"); /* If there is no more space for new umac filters, vsi * should go into promiscuous mode. There should be some @@ -245,8 +283,35 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) } } /* check for changes in promiscuous modes */ - if (changed_flags & IFF_ALLMULTI) - netdev_warn(netdev, "Unsupported configuration\n"); + if (changed_flags & IFF_ALLMULTI) { + if (vsi->current_netdev_flags & IFF_ALLMULTI) { + if (vsi->vlan_ena) + promisc_m = ICE_MCAST_VLAN_PROMISC_BITS; + else + promisc_m = ICE_MCAST_PROMISC_BITS; + + err = ice_cfg_promisc(vsi, promisc_m, true); + if (err) { + netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n", + vsi->vsi_num); + vsi->current_netdev_flags &= ~IFF_ALLMULTI; + goto out_promisc; + } + } else if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) { + if (vsi->vlan_ena) + promisc_m = ICE_MCAST_VLAN_PROMISC_BITS; + else + promisc_m = ICE_MCAST_PROMISC_BITS; + + err = ice_cfg_promisc(vsi, promisc_m, false); + if (err) { + netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n", + vsi->vsi_num); + vsi->current_netdev_flags |= IFF_ALLMULTI; + goto out_promisc; + } + } + } if (((changed_flags & IFF_PROMISC) || promisc_forced_on) || test_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags)) { @@ -322,7 +387,7 @@ static void ice_sync_fltr_subtask(struct ice_pf *pf) clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags); - for (v = 0; v < pf->num_alloc_vsi; v++) + ice_for_each_vsi(pf, v) if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) && ice_vsi_sync_fltr(pf->vsi[v])) { /* come back and try again later */ @@ -342,6 +407,10 @@ ice_prepare_for_reset(struct ice_pf *pf) { struct ice_hw *hw = &pf->hw; + /* already prepared for reset */ + if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) + return; + /* Notify VFs of impending reset */ if (ice_check_sq_alive(hw, &hw->mailboxq)) ice_vc_notify_reset(pf); @@ -394,6 +463,7 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) ice_rebuild(pf); clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); clear_bit(__ICE_PFR_REQ, pf->state); + ice_reset_all_vfs(pf, true); } } @@ -416,10 +486,15 @@ static void ice_reset_subtask(struct ice_pf *pf) * for the reset now), poll for reset done, rebuild and return. */ if (test_bit(__ICE_RESET_OICR_RECV, pf->state)) { - clear_bit(__ICE_GLOBR_RECV, pf->state); - clear_bit(__ICE_CORER_RECV, pf->state); - if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) - ice_prepare_for_reset(pf); + /* Perform the largest reset requested */ + if (test_and_clear_bit(__ICE_CORER_RECV, pf->state)) + reset_type = ICE_RESET_CORER; + if (test_and_clear_bit(__ICE_GLOBR_RECV, pf->state)) + reset_type = ICE_RESET_GLOBR; + /* return if no valid reset type requested */ + if (reset_type == ICE_RESET_INVAL) + return; + ice_prepare_for_reset(pf); /* make sure we are ready to rebuild */ if (ice_check_reset(&pf->hw)) { @@ -436,6 +511,7 @@ static void ice_reset_subtask(struct ice_pf *pf) clear_bit(__ICE_PFR_REQ, pf->state); clear_bit(__ICE_CORER_REQ, pf->state); clear_bit(__ICE_GLOBR_REQ, pf->state); + ice_reset_all_vfs(pf, true); } return; @@ -519,6 +595,9 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) case ICE_FC_RX_PAUSE: fc = "RX"; break; + case ICE_FC_NONE: + fc = "None"; + break; default: fc = "Unknown"; break; @@ -635,19 +714,70 @@ static void ice_watchdog_subtask(struct ice_pf *pf) pf->serv_tmr_prev = jiffies; - if (ice_link_event(pf, pf->hw.port_info)) - dev_dbg(&pf->pdev->dev, "ice_link_event failed\n"); - /* Update the stats for active netdevs so the network stack * can look at updated numbers whenever it cares to */ ice_update_pf_stats(pf); - for (i = 0; i < pf->num_alloc_vsi; i++) + ice_for_each_vsi(pf, i) if (pf->vsi[i] && pf->vsi[i]->netdev) ice_update_vsi_stats(pf->vsi[i]); } /** + * ice_init_link_events - enable/initialize link events + * @pi: pointer to the port_info instance + * + * Returns -EIO on failure, 0 on success + */ +static int ice_init_link_events(struct ice_port_info *pi) +{ + u16 mask; + + mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA | + ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL)); + + if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) { + dev_dbg(ice_hw_to_dev(pi->hw), + "Failed to set link event mask for port %d\n", + pi->lport); + return -EIO; + } + + if (ice_aq_get_link_info(pi, true, NULL, NULL)) { + dev_dbg(ice_hw_to_dev(pi->hw), + "Failed to enable link events for port %d\n", + pi->lport); + return -EIO; + } + + return 0; +} + +/** + * ice_handle_link_event - handle link event via ARQ + * @pf: pf that the link event is associated with + * + * Return -EINVAL if port_info is null + * Return status on success + */ +static int ice_handle_link_event(struct ice_pf *pf) +{ + struct ice_port_info *port_info; + int status; + + port_info = pf->hw.port_info; + if (!port_info) + return -EINVAL; + + status = ice_link_event(pf, port_info); + if (status) + dev_dbg(&pf->pdev->dev, + "Could not process link event, error %d\n", status); + + return status; +} + +/** * __ice_clean_ctrlq - helper function to clean controlq rings * @pf: ptr to struct ice_pf * @q_type: specific Control queue type @@ -750,6 +880,11 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) opcode = le16_to_cpu(event.desc.opcode); switch (opcode) { + case ice_aqc_opc_get_link_status: + if (ice_handle_link_event(pf)) + dev_err(&pf->pdev->dev, + "Could not handle link event\n"); + break; case ice_mbx_opc_send_msg_to_pf: ice_vc_process_vf_msg(pf, &event); break; @@ -877,6 +1012,18 @@ static void ice_service_task_stop(struct ice_pf *pf) } /** + * ice_service_task_restart - restart service task and schedule works + * @pf: board private structure + * + * This function is needed for suspend and resume works (e.g WoL scenario) + */ +static void ice_service_task_restart(struct ice_pf *pf) +{ + clear_bit(__ICE_SERVICE_DIS, pf->state); + ice_service_task_schedule(pf); +} + +/** * ice_service_timer - timer callback to schedule service task * @t: pointer to timer_list */ @@ -1111,8 +1258,9 @@ static void ice_set_ctrlq_len(struct ice_hw *hw) * This is a callback function used by the irq_set_affinity_notifier function * so that we may register to receive changes to the irq affinity masks. */ -static void ice_irq_affinity_notify(struct irq_affinity_notify *notify, - const cpumask_t *mask) +static void +ice_irq_affinity_notify(struct irq_affinity_notify *notify, + const cpumask_t *mask) { struct ice_q_vector *q_vector = container_of(notify, struct ice_q_vector, affinity_notify); @@ -1184,10 +1332,9 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) /* skip this unused q_vector */ continue; } - err = devm_request_irq(&pf->pdev->dev, - pf->msix_entries[base + vector].vector, - vsi->irq_handler, 0, q_vector->name, - q_vector); + err = devm_request_irq(&pf->pdev->dev, irq_num, + vsi->irq_handler, 0, + q_vector->name, q_vector); if (err) { netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n", err); @@ -1656,11 +1803,13 @@ ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) * * net_device_ops implementation for adding vlan ids */ -static int ice_vlan_rx_add_vid(struct net_device *netdev, - __always_unused __be16 proto, u16 vid) +static int +ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, + u16 vid) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; + int ret; if (vid >= VLAN_N_VID) { netdev_err(netdev, "VLAN id requested %d is out of range %d\n", @@ -1673,8 +1822,7 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev, /* Enable VLAN pruning when VLAN 0 is added */ if (unlikely(!vid)) { - int ret = ice_cfg_vlan_pruning(vsi, true); - + ret = ice_cfg_vlan_pruning(vsi, true, false); if (ret) return ret; } @@ -1683,7 +1831,13 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev, * needed to continue allowing all untagged packets since VLAN prune * list is applied to all packets by the switch */ - return ice_vsi_add_vlan(vsi, vid); + ret = ice_vsi_add_vlan(vsi, vid); + if (!ret) { + vsi->vlan_ena = true; + set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags); + } + + return ret; } /** @@ -1694,12 +1848,13 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev, * * net_device_ops implementation for removing vlan ids */ -static int ice_vlan_rx_kill_vid(struct net_device *netdev, - __always_unused __be16 proto, u16 vid) +static int +ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, + u16 vid) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; - int status; + int ret; if (vsi->info.pvid) return -EINVAL; @@ -1707,15 +1862,17 @@ static int ice_vlan_rx_kill_vid(struct net_device *netdev, /* Make sure ice_vsi_kill_vlan is successful before updating VLAN * information */ - status = ice_vsi_kill_vlan(vsi, vid); - if (status) - return status; + ret = ice_vsi_kill_vlan(vsi, vid); + if (ret) + return ret; /* Disable VLAN pruning when VLAN 0 is removed */ if (unlikely(!vid)) - status = ice_cfg_vlan_pruning(vsi, false); + ret = ice_cfg_vlan_pruning(vsi, false, false); - return status; + vsi->vlan_ena = false; + set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags); + return ret; } /** @@ -2033,23 +2190,6 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf) } /** - * ice_verify_itr_gran - verify driver's assumption of ITR granularity - * @pf: pointer to the PF structure - * - * There is no error returned here because the driver will be able to handle a - * different ITR granularity, but interrupt moderation will not be accurate if - * the driver's assumptions are not verified. This assumption is made so we can - * use constants in the hot path instead of accessing structure members. - */ -static void ice_verify_itr_gran(struct ice_pf *pf) -{ - if (pf->hw.itr_gran != (ICE_ITR_GRAN_S << 1)) - dev_warn(&pf->pdev->dev, - "%d ITR granularity assumption is invalid, actual ITR granularity is %d. Interrupt moderation will be inaccurate!\n", - (ICE_ITR_GRAN_S << 1), pf->hw.itr_gran); -} - -/** * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines * @pf: pointer to the PF structure * @@ -2072,9 +2212,10 @@ static void ice_verify_cacheline_size(struct ice_pf *pf) * * Returns 0 on success, negative on failure */ -static int ice_probe(struct pci_dev *pdev, - const struct pci_device_id __always_unused *ent) +static int +ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) { + struct device *dev = &pdev->dev; struct ice_pf *pf; struct ice_hw *hw; int err; @@ -2086,20 +2227,20 @@ static int ice_probe(struct pci_dev *pdev, err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev)); if (err) { - dev_err(&pdev->dev, "BAR0 I/O map error %d\n", err); + dev_err(dev, "BAR0 I/O map error %d\n", err); return err; } - pf = devm_kzalloc(&pdev->dev, sizeof(*pf), GFP_KERNEL); + pf = devm_kzalloc(dev, sizeof(*pf), GFP_KERNEL); if (!pf) return -ENOMEM; /* set up for high or low dma */ - err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); if (err) - err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); if (err) { - dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err); + dev_err(dev, "DMA configuration failed: 0x%x\n", err); return err; } @@ -2133,12 +2274,12 @@ static int ice_probe(struct pci_dev *pdev, err = ice_init_hw(hw); if (err) { - dev_err(&pdev->dev, "ice_init_hw failed: %d\n", err); + dev_err(dev, "ice_init_hw failed: %d\n", err); err = -EIO; goto err_exit_unroll; } - dev_info(&pdev->dev, "firmware %d.%d.%05d api %d.%d\n", + dev_info(dev, "firmware %d.%d.%05d api %d.%d\n", hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build, hw->api_maj_ver, hw->api_min_ver); @@ -2152,8 +2293,8 @@ static int ice_probe(struct pci_dev *pdev, goto err_init_pf_unroll; } - pf->vsi = devm_kcalloc(&pdev->dev, pf->num_alloc_vsi, - sizeof(*pf->vsi), GFP_KERNEL); + pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi), + GFP_KERNEL); if (!pf->vsi) { err = -ENOMEM; goto err_init_pf_unroll; @@ -2161,8 +2302,7 @@ static int ice_probe(struct pci_dev *pdev, err = ice_init_interrupt_scheme(pf); if (err) { - dev_err(&pdev->dev, - "ice_init_interrupt_scheme failed: %d\n", err); + dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err); err = -EIO; goto err_init_interrupt_unroll; } @@ -2178,15 +2318,13 @@ static int ice_probe(struct pci_dev *pdev, if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { err = ice_req_irq_msix_misc(pf); if (err) { - dev_err(&pdev->dev, - "setup of misc vector failed: %d\n", err); + dev_err(dev, "setup of misc vector failed: %d\n", err); goto err_init_interrupt_unroll; } } /* create switch struct for the switch element created by FW on boot */ - pf->first_sw = devm_kzalloc(&pdev->dev, sizeof(*pf->first_sw), - GFP_KERNEL); + pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL); if (!pf->first_sw) { err = -ENOMEM; goto err_msix_misc_unroll; @@ -2204,8 +2342,7 @@ static int ice_probe(struct pci_dev *pdev, err = ice_setup_pf_sw(pf); if (err) { - dev_err(&pdev->dev, - "probe failed due to setup pf switch:%d\n", err); + dev_err(dev, "probe failed due to setup pf switch:%d\n", err); goto err_alloc_sw_unroll; } @@ -2214,8 +2351,13 @@ static int ice_probe(struct pci_dev *pdev, /* since everything is good, start the service timer */ mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); + err = ice_init_link_events(pf->hw.port_info); + if (err) { + dev_err(dev, "ice_init_link_events failed: %d\n", err); + goto err_alloc_sw_unroll; + } + ice_verify_cacheline_size(pf); - ice_verify_itr_gran(pf); return 0; @@ -2227,7 +2369,7 @@ err_msix_misc_unroll: ice_free_irq_msix_misc(pf); err_init_interrupt_unroll: ice_clear_interrupt_scheme(pf); - devm_kfree(&pdev->dev, pf->vsi); + devm_kfree(dev, pf->vsi); err_init_pf_unroll: ice_deinit_pf(pf); ice_deinit_hw(hw); @@ -2272,6 +2414,136 @@ static void ice_remove(struct pci_dev *pdev) pci_disable_pcie_error_reporting(pdev); } +/** + * ice_pci_err_detected - warning that PCI error has been detected + * @pdev: PCI device information struct + * @err: the type of PCI error + * + * Called to warn that something happened on the PCI bus and the error handling + * is in progress. Allows the driver to gracefully prepare/handle PCI errors. + */ +static pci_ers_result_t +ice_pci_err_detected(struct pci_dev *pdev, enum pci_channel_state err) +{ + struct ice_pf *pf = pci_get_drvdata(pdev); + + if (!pf) { + dev_err(&pdev->dev, "%s: unrecoverable device error %d\n", + __func__, err); + return PCI_ERS_RESULT_DISCONNECT; + } + + if (!test_bit(__ICE_SUSPENDED, pf->state)) { + ice_service_task_stop(pf); + + if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) { + set_bit(__ICE_PFR_REQ, pf->state); + ice_prepare_for_reset(pf); + } + } + + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * ice_pci_err_slot_reset - a PCI slot reset has just happened + * @pdev: PCI device information struct + * + * Called to determine if the driver can recover from the PCI slot reset by + * using a register read to determine if the device is recoverable. + */ +static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev) +{ + struct ice_pf *pf = pci_get_drvdata(pdev); + pci_ers_result_t result; + int err; + u32 reg; + + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset, error %d\n", + err); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + pci_wake_from_d3(pdev, false); + + /* Check for life */ + reg = rd32(&pf->hw, GLGEN_RTRIG); + if (!reg) + result = PCI_ERS_RESULT_RECOVERED; + else + result = PCI_ERS_RESULT_DISCONNECT; + } + + err = pci_cleanup_aer_uncorrect_error_status(pdev); + if (err) + dev_dbg(&pdev->dev, + "pci_cleanup_aer_uncorrect_error_status failed, error %d\n", + err); + /* non-fatal, continue */ + + return result; +} + +/** + * ice_pci_err_resume - restart operations after PCI error recovery + * @pdev: PCI device information struct + * + * Called to allow the driver to bring things back up after PCI error and/or + * reset recovery have finished + */ +static void ice_pci_err_resume(struct pci_dev *pdev) +{ + struct ice_pf *pf = pci_get_drvdata(pdev); + + if (!pf) { + dev_err(&pdev->dev, + "%s failed, device is unrecoverable\n", __func__); + return; + } + + if (test_bit(__ICE_SUSPENDED, pf->state)) { + dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n", + __func__); + return; + } + + ice_do_reset(pf, ICE_RESET_PFR); + ice_service_task_restart(pf); + mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); +} + +/** + * ice_pci_err_reset_prepare - prepare device driver for PCI reset + * @pdev: PCI device information struct + */ +static void ice_pci_err_reset_prepare(struct pci_dev *pdev) +{ + struct ice_pf *pf = pci_get_drvdata(pdev); + + if (!test_bit(__ICE_SUSPENDED, pf->state)) { + ice_service_task_stop(pf); + + if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) { + set_bit(__ICE_PFR_REQ, pf->state); + ice_prepare_for_reset(pf); + } + } +} + +/** + * ice_pci_err_reset_done - PCI reset done, device driver reset can begin + * @pdev: PCI device information struct + */ +static void ice_pci_err_reset_done(struct pci_dev *pdev) +{ + ice_pci_err_resume(pdev); +} + /* ice_pci_tbl - PCI Device ID Table * * Wildcard entries (PCI_ANY_ID) should come last @@ -2289,12 +2561,21 @@ static const struct pci_device_id ice_pci_tbl[] = { }; MODULE_DEVICE_TABLE(pci, ice_pci_tbl); +static const struct pci_error_handlers ice_pci_err_handler = { + .error_detected = ice_pci_err_detected, + .slot_reset = ice_pci_err_slot_reset, + .reset_prepare = ice_pci_err_reset_prepare, + .reset_done = ice_pci_err_reset_done, + .resume = ice_pci_err_resume +}; + static struct pci_driver ice_driver = { .name = KBUILD_MODNAME, .id_table = ice_pci_tbl, .probe = ice_probe, .remove = ice_remove, .sriov_configure = ice_sriov_configure, + .err_handler = &ice_pci_err_handler }; /** @@ -2512,9 +2793,10 @@ ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[], * @addr: the MAC address entry being added * @vid: VLAN id */ -static int ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[], - struct net_device *dev, const unsigned char *addr, - __always_unused u16 vid) +static int +ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr, + __always_unused u16 vid) { int err; @@ -2538,8 +2820,8 @@ static int ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[], * @netdev: ptr to the netdev being adjusted * @features: the feature set that the stack is suggesting */ -static int ice_set_features(struct net_device *netdev, - netdev_features_t features) +static int +ice_set_features(struct net_device *netdev, netdev_features_t features) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; @@ -2666,7 +2948,7 @@ static int ice_up_complete(struct ice_vsi *vsi) ice_service_task_schedule(pf); - return err; + return 0; } /** @@ -2693,8 +2975,8 @@ int ice_up(struct ice_vsi *vsi) * This function fetches stats from the ring considering the atomic operations * that needs to be performed to read u64 values in 32 bit machine. */ -static void ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, - u64 *bytes) +static void +ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes) { unsigned int start; *pkts = 0; @@ -3276,7 +3558,7 @@ static void ice_vsi_release_all(struct ice_pf *pf) if (!pf->vsi) return; - for (i = 0; i < pf->num_alloc_vsi; i++) { + ice_for_each_vsi(pf, i) { if (!pf->vsi[i]) continue; @@ -3375,16 +3657,12 @@ static int ice_vsi_rebuild_all(struct ice_pf *pf) int i; /* loop through pf->vsi array and reinit the VSI if found */ - for (i = 0; i < pf->num_alloc_vsi; i++) { + ice_for_each_vsi(pf, i) { int err; if (!pf->vsi[i]) continue; - /* VF VSI rebuild isn't supported yet */ - if (pf->vsi[i]->type == ICE_VSI_VF) - continue; - err = ice_vsi_rebuild(pf->vsi[i]); if (err) { dev_err(&pf->pdev->dev, @@ -3412,7 +3690,7 @@ static int ice_vsi_replay_all(struct ice_pf *pf) int i; /* loop through pf->vsi array and replay the VSI if found */ - for (i = 0; i < pf->num_alloc_vsi; i++) { + ice_for_each_vsi(pf, i) { if (!pf->vsi[i]) continue; @@ -3521,9 +3799,7 @@ static void ice_rebuild(struct ice_pf *pf) goto err_vsi_rebuild; } - ice_reset_all_vfs(pf, true); - - for (i = 0; i < pf->num_alloc_vsi; i++) { + ice_for_each_vsi(pf, i) { bool link_up; if (!pf->vsi[i] || pf->vsi[i]->type != ICE_VSI_PF) diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index 56049739a250..e0218f4c8f0b 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -276,7 +276,8 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent, status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size, &num_groups_removed, NULL); if (status || num_groups_removed != 1) - ice_debug(hw, ICE_DBG_SCHED, "remove elements failed\n"); + ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n", + hw->adminq.sq_last_status); devm_kfree(ice_hw_to_dev(hw), buf); return status; @@ -360,12 +361,8 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node) node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT && node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) { u32 teid = le32_to_cpu(node->info.node_teid); - enum ice_status status; - status = ice_sched_remove_elems(hw, node->parent, 1, &teid); - if (status) - ice_debug(hw, ICE_DBG_SCHED, - "remove element failed %d\n", status); + ice_sched_remove_elems(hw, node->parent, 1, &teid); } parent = node->parent; /* root has no parent */ @@ -697,7 +694,8 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, status = ice_aq_add_sched_elems(hw, 1, buf, buf_size, &num_groups_added, NULL); if (status || num_groups_added != 1) { - ice_debug(hw, ICE_DBG_SCHED, "add elements failed\n"); + ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n", + hw->adminq.sq_last_status); devm_kfree(ice_hw_to_dev(hw), buf); return ICE_ERR_CFG; } @@ -1271,42 +1269,6 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, } /** - * ice_sched_rm_vsi_child_nodes - remove VSI child nodes from the tree - * @pi: port information structure - * @vsi_node: pointer to the VSI node - * @num_nodes: pointer to the num nodes that needs to be removed per layer - * @owner: node owner (lan or rdma) - * - * This function removes the VSI child nodes from the tree. It gets called for - * lan and rdma separately. - */ -static void -ice_sched_rm_vsi_child_nodes(struct ice_port_info *pi, - struct ice_sched_node *vsi_node, u16 *num_nodes, - u8 owner) -{ - struct ice_sched_node *node, *next; - u8 i, qgl, vsil; - u16 num; - - qgl = ice_sched_get_qgrp_layer(pi->hw); - vsil = ice_sched_get_vsi_layer(pi->hw); - - for (i = qgl; i > vsil; i--) { - num = num_nodes[i]; - node = ice_sched_get_first_node(pi->hw, vsi_node, i); - while (node && num) { - next = node->sibling; - if (node->owner == owner && !node->num_children) { - ice_free_sched_node(pi, node); - num--; - } - node = next; - } - } -} - -/** * ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes * @hw: pointer to the hw struct * @tc_node: pointer to TC node @@ -1446,7 +1408,6 @@ static enum ice_status ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 new_numqs, u8 owner) { - u16 prev_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; struct ice_sched_node *vsi_node; struct ice_sched_node *tc_node; @@ -1454,7 +1415,6 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, enum ice_status status = 0; struct ice_hw *hw = pi->hw; u16 prev_numqs; - u8 i; tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) @@ -1473,36 +1433,25 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, else return ICE_ERR_PARAM; - /* num queues are not changed */ - if (prev_numqs == new_numqs) + /* num queues are not changed or less than the previous number */ + if (new_numqs <= prev_numqs) return status; - - /* calculate number of nodes based on prev/new number of qs */ - if (prev_numqs) - ice_sched_calc_vsi_child_nodes(hw, prev_numqs, prev_num_nodes); - if (new_numqs) ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes); - - if (prev_numqs > new_numqs) { - for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++) - new_num_nodes[i] = prev_num_nodes[i] - new_num_nodes[i]; - - ice_sched_rm_vsi_child_nodes(pi, vsi_node, new_num_nodes, - owner); - } else { - for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++) - new_num_nodes[i] -= prev_num_nodes[i]; - - status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node, - new_num_nodes, owner); - if (status) - return status; - } - + /* Keep the max number of queue configuration all the time. Update the + * tree only if number of queues > previous number of queues. This may + * leave some extra nodes in the tree if number of queues < previous + * number but that wouldn't harm anything. Removing those extra nodes + * may complicate the code if those nodes are part of SRL or + * individually rate limited. + */ + status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node, + new_num_nodes, owner); + if (status) + return status; vsi_ctx->sched.max_lanq[tc] = new_numqs; - return status; + return 0; } /** @@ -1527,6 +1476,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, enum ice_status status = 0; struct ice_hw *hw = pi->hw; + ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle); tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) return ICE_ERR_PARAM; @@ -1646,8 +1596,9 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner) { enum ice_status status = ICE_ERR_PARAM; struct ice_vsi_ctx *vsi_ctx; - u8 i, j = 0; + u8 i; + ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle); if (!ice_is_vsi_valid(pi->hw, vsi_handle)) return status; mutex_lock(&pi->sched_lock); @@ -1655,8 +1606,9 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner) if (!vsi_ctx) goto exit_sched_rm_vsi_cfg; - for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { + ice_for_each_traffic_class(i) { struct ice_sched_node *vsi_node, *tc_node; + u8 j = 0; tc_node = ice_sched_get_tc_node(pi, i); if (!tc_node) diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 09d1c314b68f..7dcd9ddf54f7 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -322,8 +322,8 @@ struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle) * * save the VSI context entry for a given VSI handle */ -static void ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, - struct ice_vsi_ctx *vsi) +static void +ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi) { hw->vsi_ctx[vsi_handle] = vsi; } @@ -398,7 +398,7 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num; } - return status; + return 0; } /** @@ -643,21 +643,43 @@ static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi) fi->fltr_act == ICE_FWD_TO_VSI_LIST || fi->fltr_act == ICE_FWD_TO_Q || fi->fltr_act == ICE_FWD_TO_QGRP)) { - fi->lb_en = true; - /* Do not set lan_en to TRUE if + /* Setting LB for prune actions will result in replicated + * packets to the internal switch that will be dropped. + */ + if (fi->lkup_type != ICE_SW_LKUP_VLAN) + fi->lb_en = true; + + /* Set lan_en to TRUE if * 1. The switch is a VEB AND * 2 - * 2.1 The lookup is MAC with unicast addr for MAC, OR - * 2.2 The lookup is MAC_VLAN with unicast addr for MAC + * 2.1 The lookup is a directional lookup like ethertype, + * promiscuous, ethertype-mac, promiscuous-vlan + * and default-port OR + * 2.2 The lookup is VLAN, OR + * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR + * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC. * - * In all other cases, the LAN enable has to be set to true. + * OR + * + * The switch is a VEPA. + * + * In all other cases, the LAN enable has to be set to false. */ - if (!(hw->evb_veb && - ((fi->lkup_type == ICE_SW_LKUP_MAC && - is_unicast_ether_addr(fi->l_data.mac.mac_addr)) || - (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN && - is_unicast_ether_addr(fi->l_data.mac_vlan.mac_addr))))) + if (hw->evb_veb) { + if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE || + fi->lkup_type == ICE_SW_LKUP_PROMISC || + fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || + fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN || + fi->lkup_type == ICE_SW_LKUP_DFLT || + fi->lkup_type == ICE_SW_LKUP_VLAN || + (fi->lkup_type == ICE_SW_LKUP_MAC && + !is_unicast_ether_addr(fi->l_data.mac.mac_addr)) || + (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN && + !is_unicast_ether_addr(fi->l_data.mac.mac_addr))) + fi->lan_en = true; + } else { fi->lan_en = true; + } } } @@ -2190,6 +2212,291 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, } /** + * ice_determine_promisc_mask + * @fi: filter info to parse + * + * Helper function to determine which ICE_PROMISC_ mask corresponds + * to given filter into. + */ +static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi) +{ + u16 vid = fi->l_data.mac_vlan.vlan_id; + u8 *macaddr = fi->l_data.mac.mac_addr; + bool is_tx_fltr = false; + u8 promisc_mask = 0; + + if (fi->flag == ICE_FLTR_TX) + is_tx_fltr = true; + + if (is_broadcast_ether_addr(macaddr)) + promisc_mask |= is_tx_fltr ? + ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX; + else if (is_multicast_ether_addr(macaddr)) + promisc_mask |= is_tx_fltr ? + ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX; + else if (is_unicast_ether_addr(macaddr)) + promisc_mask |= is_tx_fltr ? + ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX; + if (vid) + promisc_mask |= is_tx_fltr ? + ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX; + + return promisc_mask; +} + +/** + * ice_remove_promisc - Remove promisc based filter rules + * @hw: pointer to the hardware structure + * @recp_id: recipe id for which the rule needs to removed + * @v_list: list of promisc entries + */ +static enum ice_status +ice_remove_promisc(struct ice_hw *hw, u8 recp_id, + struct list_head *v_list) +{ + struct ice_fltr_list_entry *v_list_itr, *tmp; + + list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) { + v_list_itr->status = + ice_remove_rule_internal(hw, recp_id, v_list_itr); + if (v_list_itr->status) + return v_list_itr->status; + } + return 0; +} + +/** + * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI + * @hw: pointer to the hardware structure + * @vsi_handle: VSI handle to clear mode + * @promisc_mask: mask of promiscuous config bits to clear + * @vid: VLAN ID to clear VLAN promiscuous + */ +enum ice_status +ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, + u16 vid) +{ + struct ice_switch_info *sw = hw->switch_info; + struct ice_fltr_list_entry *fm_entry, *tmp; + struct list_head remove_list_head; + struct ice_fltr_mgmt_list_entry *itr; + struct list_head *rule_head; + struct mutex *rule_lock; /* Lock to protect filter rule list */ + enum ice_status status = 0; + u8 recipe_id; + + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + + if (vid) + recipe_id = ICE_SW_LKUP_PROMISC_VLAN; + else + recipe_id = ICE_SW_LKUP_PROMISC; + + rule_head = &sw->recp_list[recipe_id].filt_rules; + rule_lock = &sw->recp_list[recipe_id].filt_rule_lock; + + INIT_LIST_HEAD(&remove_list_head); + + mutex_lock(rule_lock); + list_for_each_entry(itr, rule_head, list_entry) { + u8 fltr_promisc_mask = 0; + + if (!ice_vsi_uses_fltr(itr, vsi_handle)) + continue; + + fltr_promisc_mask |= + ice_determine_promisc_mask(&itr->fltr_info); + + /* Skip if filter is not completely specified by given mask */ + if (fltr_promisc_mask & ~promisc_mask) + continue; + + status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle, + &remove_list_head, + &itr->fltr_info); + if (status) { + mutex_unlock(rule_lock); + goto free_fltr_list; + } + } + mutex_unlock(rule_lock); + + status = ice_remove_promisc(hw, recipe_id, &remove_list_head); + +free_fltr_list: + list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) { + list_del(&fm_entry->list_entry); + devm_kfree(ice_hw_to_dev(hw), fm_entry); + } + + return status; +} + +/** + * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s) + * @hw: pointer to the hardware structure + * @vsi_handle: VSI handle to configure + * @promisc_mask: mask of promiscuous config bits + * @vid: VLAN ID to set VLAN promiscuous + */ +enum ice_status +ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid) +{ + enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR }; + struct ice_fltr_list_entry f_list_entry; + struct ice_fltr_info new_fltr; + enum ice_status status = 0; + bool is_tx_fltr; + u16 hw_vsi_id; + int pkt_type; + u8 recipe_id; + + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); + + memset(&new_fltr, 0, sizeof(new_fltr)); + + if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) { + new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN; + new_fltr.l_data.mac_vlan.vlan_id = vid; + recipe_id = ICE_SW_LKUP_PROMISC_VLAN; + } else { + new_fltr.lkup_type = ICE_SW_LKUP_PROMISC; + recipe_id = ICE_SW_LKUP_PROMISC; + } + + /* Separate filters must be set for each direction/packet type + * combination, so we will loop over the mask value, store the + * individual type, and clear it out in the input mask as it + * is found. + */ + while (promisc_mask) { + u8 *mac_addr; + + pkt_type = 0; + is_tx_fltr = false; + + if (promisc_mask & ICE_PROMISC_UCAST_RX) { + promisc_mask &= ~ICE_PROMISC_UCAST_RX; + pkt_type = UCAST_FLTR; + } else if (promisc_mask & ICE_PROMISC_UCAST_TX) { + promisc_mask &= ~ICE_PROMISC_UCAST_TX; + pkt_type = UCAST_FLTR; + is_tx_fltr = true; + } else if (promisc_mask & ICE_PROMISC_MCAST_RX) { + promisc_mask &= ~ICE_PROMISC_MCAST_RX; + pkt_type = MCAST_FLTR; + } else if (promisc_mask & ICE_PROMISC_MCAST_TX) { + promisc_mask &= ~ICE_PROMISC_MCAST_TX; + pkt_type = MCAST_FLTR; + is_tx_fltr = true; + } else if (promisc_mask & ICE_PROMISC_BCAST_RX) { + promisc_mask &= ~ICE_PROMISC_BCAST_RX; + pkt_type = BCAST_FLTR; + } else if (promisc_mask & ICE_PROMISC_BCAST_TX) { + promisc_mask &= ~ICE_PROMISC_BCAST_TX; + pkt_type = BCAST_FLTR; + is_tx_fltr = true; + } + + /* Check for VLAN promiscuous flag */ + if (promisc_mask & ICE_PROMISC_VLAN_RX) { + promisc_mask &= ~ICE_PROMISC_VLAN_RX; + } else if (promisc_mask & ICE_PROMISC_VLAN_TX) { + promisc_mask &= ~ICE_PROMISC_VLAN_TX; + is_tx_fltr = true; + } + + /* Set filter DA based on packet type */ + mac_addr = new_fltr.l_data.mac.mac_addr; + if (pkt_type == BCAST_FLTR) { + eth_broadcast_addr(mac_addr); + } else if (pkt_type == MCAST_FLTR || + pkt_type == UCAST_FLTR) { + /* Use the dummy ether header DA */ + ether_addr_copy(mac_addr, dummy_eth_header); + if (pkt_type == MCAST_FLTR) + mac_addr[0] |= 0x1; /* Set multicast bit */ + } + + /* Need to reset this to zero for all iterations */ + new_fltr.flag = 0; + if (is_tx_fltr) { + new_fltr.flag |= ICE_FLTR_TX; + new_fltr.src = hw_vsi_id; + } else { + new_fltr.flag |= ICE_FLTR_RX; + new_fltr.src = hw->port_info->lport; + } + + new_fltr.fltr_act = ICE_FWD_TO_VSI; + new_fltr.vsi_handle = vsi_handle; + new_fltr.fwd_id.hw_vsi_id = hw_vsi_id; + f_list_entry.fltr_info = new_fltr; + + status = ice_add_rule_internal(hw, recipe_id, &f_list_entry); + if (status) + goto set_promisc_exit; + } + +set_promisc_exit: + return status; +} + +/** + * ice_set_vlan_vsi_promisc + * @hw: pointer to the hardware structure + * @vsi_handle: VSI handle to configure + * @promisc_mask: mask of promiscuous config bits + * @rm_vlan_promisc: Clear VLANs VSI promisc mode + * + * Configure VSI with all associated VLANs to given promiscuous mode(s) + */ +enum ice_status +ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, + bool rm_vlan_promisc) +{ + struct ice_switch_info *sw = hw->switch_info; + struct ice_fltr_list_entry *list_itr, *tmp; + struct list_head vsi_list_head; + struct list_head *vlan_head; + struct mutex *vlan_lock; /* Lock to protect filter rule list */ + enum ice_status status; + u16 vlan_id; + + INIT_LIST_HEAD(&vsi_list_head); + vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; + vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules; + mutex_lock(vlan_lock); + status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head, + &vsi_list_head); + mutex_unlock(vlan_lock); + if (status) + goto free_fltr_list; + + list_for_each_entry(list_itr, &vsi_list_head, list_entry) { + vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id; + if (rm_vlan_promisc) + status = ice_clear_vsi_promisc(hw, vsi_handle, + promisc_mask, vlan_id); + else + status = ice_set_vsi_promisc(hw, vsi_handle, + promisc_mask, vlan_id); + if (status) + break; + } + +free_fltr_list: + list_for_each_entry_safe(list_itr, tmp, &vsi_list_head, list_entry) { + list_del(&list_itr->list_entry); + devm_kfree(ice_hw_to_dev(hw), list_itr); + } + return status; +} + +/** * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI * @hw: pointer to the hardware structure * @vsi_handle: VSI handle to remove filters from @@ -2224,12 +2531,14 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle, case ICE_SW_LKUP_VLAN: ice_remove_vlan(hw, &remove_list_head); break; + case ICE_SW_LKUP_PROMISC: + case ICE_SW_LKUP_PROMISC_VLAN: + ice_remove_promisc(hw, lkup, &remove_list_head); + break; case ICE_SW_LKUP_MAC_VLAN: case ICE_SW_LKUP_ETHERTYPE: case ICE_SW_LKUP_ETHERTYPE_MAC: - case ICE_SW_LKUP_PROMISC: case ICE_SW_LKUP_DFLT: - case ICE_SW_LKUP_PROMISC_VLAN: case ICE_SW_LKUP_LAST: default: ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup); diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h index d5ef0bd58bf9..e4ce0720b871 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.h +++ b/drivers/net/ethernet/intel/ice/ice_switch.h @@ -178,6 +178,17 @@ struct ice_fltr_mgmt_list_entry { u8 counter_index; }; +enum ice_promisc_flags { + ICE_PROMISC_UCAST_RX = 0x1, + ICE_PROMISC_UCAST_TX = 0x2, + ICE_PROMISC_MCAST_RX = 0x4, + ICE_PROMISC_MCAST_TX = 0x8, + ICE_PROMISC_BCAST_RX = 0x10, + ICE_PROMISC_BCAST_TX = 0x20, + ICE_PROMISC_VLAN_RX = 0x40, + ICE_PROMISC_VLAN_TX = 0x80, +}; + /* VSI related commands */ enum ice_status ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, @@ -199,10 +210,22 @@ enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw); enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst); enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst); void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle); -enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *m_list); +enum ice_status +ice_add_vlan(struct ice_hw *hw, struct list_head *m_list); enum ice_status ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list); + +/* Promisc/defport setup for VSIs */ enum ice_status ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction); +enum ice_status +ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, + u16 vid); +enum ice_status +ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, + u16 vid); +enum ice_status +ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, + bool rm_vlan_promisc); enum ice_status ice_init_def_sw_recp(struct ice_hw *hw); u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle); diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index c289d97f477d..f2462799154a 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -100,8 +100,8 @@ void ice_free_tx_ring(struct ice_ring *tx_ring) * * Returns true if there's any budget left (e.g. the clean is finished) */ -static bool ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring, - int napi_budget) +static bool +ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring, int napi_budget) { unsigned int total_bytes = 0, total_pkts = 0; unsigned int budget = vsi->work_lmt; @@ -236,9 +236,9 @@ int ice_setup_tx_ring(struct ice_ring *tx_ring) if (!tx_ring->tx_buf) return -ENOMEM; - /* round up to nearest 4K */ + /* round up to nearest page */ tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), - 4096); + PAGE_SIZE); tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); if (!tx_ring->desc) { @@ -282,8 +282,17 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring) if (!rx_buf->page) continue; - dma_unmap_page(dev, rx_buf->dma, PAGE_SIZE, DMA_FROM_DEVICE); - __free_pages(rx_buf->page, 0); + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(dev, rx_buf->dma, + rx_buf->page_offset, + ICE_RXBUF_2048, DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(dev, rx_buf->dma, PAGE_SIZE, + DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); + __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); rx_buf->page = NULL; rx_buf->page_offset = 0; @@ -339,9 +348,9 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring) if (!rx_ring->rx_buf) return -ENOMEM; - /* round up to nearest 4K */ - rx_ring->size = rx_ring->count * sizeof(union ice_32byte_rx_desc); - rx_ring->size = ALIGN(rx_ring->size, 4096); + /* round up to nearest page */ + rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), + PAGE_SIZE); rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); if (!rx_ring->desc) { @@ -389,8 +398,8 @@ static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val) * Returns true if the page was successfully allocated or * reused. */ -static bool ice_alloc_mapped_page(struct ice_ring *rx_ring, - struct ice_rx_buf *bi) +static bool +ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi) { struct page *page = bi->page; dma_addr_t dma; @@ -409,7 +418,8 @@ static bool ice_alloc_mapped_page(struct ice_ring *rx_ring, } /* map page for use */ - dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); + dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE, + DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); /* if mapping failed free memory back to system since * there isn't much point in holding memory we can't use @@ -423,6 +433,8 @@ static bool ice_alloc_mapped_page(struct ice_ring *rx_ring, bi->dma = dma; bi->page = page; bi->page_offset = 0; + page_ref_add(page, USHRT_MAX - 1); + bi->pagecnt_bias = USHRT_MAX; return true; } @@ -452,6 +464,12 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) if (!ice_alloc_mapped_page(rx_ring, bi)) goto no_bufs; + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + bi->page_offset, + ICE_RXBUF_2048, + DMA_FROM_DEVICE); + /* Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info. */ @@ -497,61 +515,43 @@ static bool ice_page_is_reserved(struct page *page) } /** - * ice_add_rx_frag - Add contents of Rx buffer to sk_buff - * @rx_buf: buffer containing page to add - * @rx_desc: descriptor containing length of buffer written by hardware - * @skb: sk_buf to place the data into + * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse + * @rx_buf: Rx buffer to adjust + * @size: Size of adjustment * - * This function will add the data contained in rx_buf->page to the skb. - * This is done either through a direct copy if the data in the buffer is - * less than the skb header size, otherwise it will just attach the page as - * a frag to the skb. - * - * The function will then update the page offset if necessary and return - * true if the buffer can be reused by the adapter. + * Update the offset within page so that Rx buf will be ready to be reused. + * For systems with PAGE_SIZE < 8192 this function will flip the page offset + * so the second half of page assigned to Rx buffer will be used, otherwise + * the offset is moved by the @size bytes */ -static bool ice_add_rx_frag(struct ice_rx_buf *rx_buf, - union ice_32b_rx_flex_desc *rx_desc, - struct sk_buff *skb) +static void +ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size) { #if (PAGE_SIZE < 8192) - unsigned int truesize = ICE_RXBUF_2048; + /* flip page offset to other buffer */ + rx_buf->page_offset ^= size; #else - unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048; - unsigned int truesize; -#endif /* PAGE_SIZE < 8192) */ - - struct page *page; - unsigned int size; - - size = le16_to_cpu(rx_desc->wb.pkt_len) & - ICE_RX_FLX_DESC_PKT_LEN_M; - - page = rx_buf->page; + /* move offset up to the next cache line */ + rx_buf->page_offset += size; +#endif +} +/** + * ice_can_reuse_rx_page - Determine if page can be reused for another Rx + * @rx_buf: buffer containing the page + * + * If page is reusable, we have a green light for calling ice_reuse_rx_page, + * which will assign the current buffer to the buffer that next_to_alloc is + * pointing to; otherwise, the DMA mapping needs to be destroyed and + * page freed + */ +static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf) +{ #if (PAGE_SIZE >= 8192) - truesize = ALIGN(size, L1_CACHE_BYTES); -#endif /* PAGE_SIZE >= 8192) */ - - /* will the data fit in the skb we allocated? if so, just - * copy it as it is pretty small anyway - */ - if (size <= ICE_RX_HDR_SIZE && !skb_is_nonlinear(skb)) { - unsigned char *va = page_address(page) + rx_buf->page_offset; - - memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); - - /* page is not reserved, we can reuse buffer as-is */ - if (likely(!ice_page_is_reserved(page))) - return true; - - /* this page cannot be reused so discard it */ - __free_pages(page, 0); - return false; - } - - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, - rx_buf->page_offset, size, truesize); + unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048; +#endif + unsigned int pagecnt_bias = rx_buf->pagecnt_bias; + struct page *page = rx_buf->page; /* avoid re-using remote pages */ if (unlikely(ice_page_is_reserved(page))) @@ -559,36 +559,61 @@ static bool ice_add_rx_frag(struct ice_rx_buf *rx_buf, #if (PAGE_SIZE < 8192) /* if we are only owner of page we can reuse it */ - if (unlikely(page_count(page) != 1)) + if (unlikely((page_count(page) - pagecnt_bias) > 1)) return false; - - /* flip page offset to other buffer */ - rx_buf->page_offset ^= truesize; #else - /* move offset up to the next cache line */ - rx_buf->page_offset += truesize; - if (rx_buf->page_offset > last_offset) return false; #endif /* PAGE_SIZE < 8192) */ - /* Even if we own the page, we are not allowed to use atomic_set() - * This would break get_page_unless_zero() users. + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the + * number of references the driver holds. */ - get_page(rx_buf->page); + if (unlikely(pagecnt_bias == 1)) { + page_ref_add(page, USHRT_MAX - 1); + rx_buf->pagecnt_bias = USHRT_MAX; + } return true; } /** + * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag + * @rx_buf: buffer containing page to add + * @skb: sk_buff to place the data into + * @size: packet length from rx_desc + * + * This function will add the data contained in rx_buf->page to the skb. + * It will just attach the page as a frag to the skb. + * The function will then update the page offset. + */ +static void +ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb, + unsigned int size) +{ +#if (PAGE_SIZE >= 8192) + unsigned int truesize = SKB_DATA_ALIGN(size); +#else + unsigned int truesize = ICE_RXBUF_2048; +#endif + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, + rx_buf->page_offset, size, truesize); + + /* page is being used so we must update the page offset */ + ice_rx_buf_adjust_pg_offset(rx_buf, truesize); +} + +/** * ice_reuse_rx_page - page flip buffer and store it back on the ring * @rx_ring: Rx descriptor ring to store buffers on * @old_buf: donor buffer to have page reused * * Synchronizes page for reuse by the adapter */ -static void ice_reuse_rx_page(struct ice_ring *rx_ring, - struct ice_rx_buf *old_buf) +static void +ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf) { u16 nta = rx_ring->next_to_alloc; struct ice_rx_buf *new_buf; @@ -599,121 +624,132 @@ static void ice_reuse_rx_page(struct ice_ring *rx_ring, nta++; rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; - /* transfer page from old buffer to new buffer */ - *new_buf = *old_buf; + /* Transfer page from old buffer to new buffer. + * Move each member individually to avoid possible store + * forwarding stalls and unnecessary copy of skb. + */ + new_buf->dma = old_buf->dma; + new_buf->page = old_buf->page; + new_buf->page_offset = old_buf->page_offset; + new_buf->pagecnt_bias = old_buf->pagecnt_bias; } /** - * ice_fetch_rx_buf - Allocate skb and populate it + * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use * @rx_ring: Rx descriptor ring to transact packets on - * @rx_desc: descriptor containing info written by hardware + * @skb: skb to be used + * @size: size of buffer to add to skb * - * This function allocates an skb on the fly, and populates it with the page - * data from the current receive descriptor, taking care to set up the skb - * correctly, as well as handling calling the page recycle function if - * necessary. + * This function will pull an Rx buffer from the ring and synchronize it + * for use by the CPU. */ -static struct sk_buff *ice_fetch_rx_buf(struct ice_ring *rx_ring, - union ice_32b_rx_flex_desc *rx_desc) +static struct ice_rx_buf * +ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb, + const unsigned int size) { struct ice_rx_buf *rx_buf; - struct sk_buff *skb; - struct page *page; rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; - page = rx_buf->page; - prefetchw(page); + prefetchw(rx_buf->page); + *skb = rx_buf->skb; + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, + rx_buf->page_offset, size, + DMA_FROM_DEVICE); - skb = rx_buf->skb; + /* We have pulled a buffer for use, so decrement pagecnt_bias */ + rx_buf->pagecnt_bias--; - if (likely(!skb)) { - u8 *page_addr = page_address(page) + rx_buf->page_offset; + return rx_buf; +} - /* prefetch first cache line of first page */ - prefetch(page_addr); +/** + * ice_construct_skb - Allocate skb and populate it + * @rx_ring: Rx descriptor ring to transact packets on + * @rx_buf: Rx buffer to pull data from + * @size: the length of the packet + * + * This function allocates an skb. It then populates it with the page + * data from the current receive descriptor, taking care to set up the + * skb correctly. + */ +static struct sk_buff * +ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, + unsigned int size) +{ + void *va = page_address(rx_buf->page) + rx_buf->page_offset; + unsigned int headlen; + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + prefetch(va); #if L1_CACHE_BYTES < 128 - prefetch((void *)(page_addr + L1_CACHE_BYTES)); + prefetch((u8 *)va + L1_CACHE_BYTES); #endif /* L1_CACHE_BYTES */ - /* allocate a skb to store the frags */ - skb = __napi_alloc_skb(&rx_ring->q_vector->napi, - ICE_RX_HDR_SIZE, - GFP_ATOMIC | __GFP_NOWARN); - if (unlikely(!skb)) { - rx_ring->rx_stats.alloc_buf_failed++; - return NULL; - } - - /* we will be copying header into skb->data in - * pskb_may_pull so it is in our interest to prefetch - * it now to avoid a possible cache miss - */ - prefetchw(skb->data); + /* allocate a skb to store the frags */ + skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) + return NULL; - skb_record_rx_queue(skb, rx_ring->q_index); - } else { - /* we are reusing so sync this buffer for CPU use */ - dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, - rx_buf->page_offset, - ICE_RXBUF_2048, - DMA_FROM_DEVICE); + skb_record_rx_queue(skb, rx_ring->q_index); + /* Determine available headroom for copy */ + headlen = size; + if (headlen > ICE_RX_HDR_SIZE) + headlen = eth_get_headlen(va, ICE_RX_HDR_SIZE); - rx_buf->skb = NULL; - } + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); - /* pull page into skb */ - if (ice_add_rx_frag(rx_buf, rx_desc, skb)) { - /* hand second half of page back to the ring */ - ice_reuse_rx_page(rx_ring, rx_buf); - rx_ring->rx_stats.page_reuse_count++; + /* if we exhaust the linear part then add what is left as a frag */ + size -= headlen; + if (size) { +#if (PAGE_SIZE >= 8192) + unsigned int truesize = SKB_DATA_ALIGN(size); +#else + unsigned int truesize = ICE_RXBUF_2048; +#endif + skb_add_rx_frag(skb, 0, rx_buf->page, + rx_buf->page_offset + headlen, size, truesize); + /* buffer is used by skb, update page_offset */ + ice_rx_buf_adjust_pg_offset(rx_buf, truesize); } else { - /* we are not reusing the buffer so unmap it */ - dma_unmap_page(rx_ring->dev, rx_buf->dma, PAGE_SIZE, - DMA_FROM_DEVICE); + /* buffer is unused, reset bias back to rx_buf; data was copied + * onto skb's linear part so there's no need for adjusting + * page offset and we can reuse this buffer as-is + */ + rx_buf->pagecnt_bias++; } - /* clear contents of buffer_info */ - rx_buf->page = NULL; - return skb; } /** - * ice_pull_tail - ice specific version of skb_pull_tail - * @skb: pointer to current skb being adjusted + * ice_put_rx_buf - Clean up used buffer and either recycle or free + * @rx_ring: Rx descriptor ring to transact packets on + * @rx_buf: Rx buffer to pull data from * - * This function is an ice specific version of __pskb_pull_tail. The - * main difference between this version and the original function is that - * this function can make several assumptions about the state of things - * that allow for significant optimizations versus the standard function. - * As a result we can do things like drop a frag and maintain an accurate - * truesize for the skb. + * This function will clean up the contents of the rx_buf. It will + * either recycle the buffer or unmap it and free the associated resources. */ -static void ice_pull_tail(struct sk_buff *skb) +static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; - unsigned int pull_len; - unsigned char *va; - - /* it is valid to use page_address instead of kmap since we are - * working with pages allocated out of the lomem pool per - * alloc_page(GFP_ATOMIC) - */ - va = skb_frag_address(frag); - - /* we need the header to contain the greater of either ETH_HLEN or - * 60 bytes if the skb->len is less than 60 for skb_pad. - */ - pull_len = eth_get_headlen(va, ICE_RX_HDR_SIZE); - - /* align pull length to size of long to optimize memcpy performance */ - skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + /* hand second half of page back to the ring */ + if (ice_can_reuse_rx_page(rx_buf)) { + ice_reuse_rx_page(rx_ring, rx_buf); + rx_ring->rx_stats.page_reuse_count++; + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, PAGE_SIZE, + DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); + __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); + } - /* update all of the pointers */ - skb_frag_size_sub(frag, pull_len); - frag->page_offset += pull_len; - skb->data_len -= pull_len; - skb->tail += pull_len; + /* clear contents of buffer_info */ + rx_buf->page = NULL; + rx_buf->skb = NULL; } /** @@ -730,10 +766,6 @@ static void ice_pull_tail(struct sk_buff *skb) */ static bool ice_cleanup_headers(struct sk_buff *skb) { - /* place header in linear portion of buffer */ - if (skb_is_nonlinear(skb)) - ice_pull_tail(skb); - /* if eth_skb_pad returns an error the skb was freed */ if (eth_skb_pad(skb)) return true; @@ -751,8 +783,8 @@ static bool ice_cleanup_headers(struct sk_buff *skb) * The status_error_len doesn't need to be shifted because it begins * at offset zero. */ -static bool ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, - const u16 stat_err_bits) +static bool +ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits) { return !!(rx_desc->wb.status_error0 & cpu_to_le16(stat_err_bits)); @@ -769,9 +801,9 @@ static bool ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, * sk_buff in the next buffer to be chained and return true indicating * that this is in fact a non-EOP buffer. */ -static bool ice_is_non_eop(struct ice_ring *rx_ring, - union ice_32b_rx_flex_desc *rx_desc, - struct sk_buff *skb) +static bool +ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, + struct sk_buff *skb) { u32 ntc = rx_ring->next_to_clean + 1; @@ -838,8 +870,9 @@ ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, * * skb->protocol must be set before this function is called */ -static void ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb, - union ice_32b_rx_flex_desc *rx_desc, u8 ptype) +static void +ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb, + union ice_32b_rx_flex_desc *rx_desc, u8 ptype) { struct ice_rx_ptype_decoded decoded; u32 rx_error, rx_status; @@ -909,9 +942,10 @@ checksum_fail: * order to populate the hash, checksum, VLAN, protocol, and * other fields within the skb. */ -static void ice_process_skb_fields(struct ice_ring *rx_ring, - union ice_32b_rx_flex_desc *rx_desc, - struct sk_buff *skb, u8 ptype) +static void +ice_process_skb_fields(struct ice_ring *rx_ring, + union ice_32b_rx_flex_desc *rx_desc, + struct sk_buff *skb, u8 ptype) { ice_rx_hash(rx_ring, rx_desc, skb, ptype); @@ -930,13 +964,12 @@ static void ice_process_skb_fields(struct ice_ring *rx_ring, * This function sends the completed packet (via. skb) up the stack using * gro receive functions (with/without vlan tag) */ -static void ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, - u16 vlan_tag) +static void +ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) { if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && - (vlan_tag & VLAN_VID_MASK)) { + (vlan_tag & VLAN_VID_MASK)) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); - } napi_gro_receive(&rx_ring->q_vector->napi, skb); } @@ -961,7 +994,9 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) /* start the loop to process RX packets bounded by 'budget' */ while (likely(total_rx_pkts < (unsigned int)budget)) { union ice_32b_rx_flex_desc *rx_desc; + struct ice_rx_buf *rx_buf; struct sk_buff *skb; + unsigned int size; u16 stat_err_bits; u16 vlan_tag = 0; u8 rx_ptype; @@ -991,11 +1026,24 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) */ dma_rmb(); + size = le16_to_cpu(rx_desc->wb.pkt_len) & + ICE_RX_FLX_DESC_PKT_LEN_M; + + rx_buf = ice_get_rx_buf(rx_ring, &skb, size); /* allocate (if needed) and populate skb */ - skb = ice_fetch_rx_buf(rx_ring, rx_desc); - if (!skb) + if (skb) + ice_add_rx_frag(rx_buf, skb, size); + else + skb = ice_construct_skb(rx_ring, rx_buf, size); + + /* exit if we failed to retrieve a buffer */ + if (!skb) { + rx_ring->rx_stats.alloc_buf_failed++; + rx_buf->pagecnt_bias++; break; + } + ice_put_rx_buf(rx_ring, rx_buf); cleaned_count++; /* skip if it is NOP desc */ @@ -1048,18 +1096,257 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) return failure ? budget : (int)total_rx_pkts; } +static unsigned int ice_itr_divisor(struct ice_port_info *pi) +{ + switch (pi->phy.link_info.link_speed) { + case ICE_AQ_LINK_SPEED_40GB: + return ICE_ITR_ADAPTIVE_MIN_INC * 1024; + case ICE_AQ_LINK_SPEED_25GB: + case ICE_AQ_LINK_SPEED_20GB: + return ICE_ITR_ADAPTIVE_MIN_INC * 512; + case ICE_AQ_LINK_SPEED_100MB: + return ICE_ITR_ADAPTIVE_MIN_INC * 32; + default: + return ICE_ITR_ADAPTIVE_MIN_INC * 256; + } +} + +/** + * ice_update_itr - update the adaptive ITR value based on statistics + * @q_vector: structure containing interrupt and ring information + * @rc: structure containing ring performance data + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + */ +static void +ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc) +{ + unsigned int avg_wire_size, packets, bytes, itr; + unsigned long next_update = jiffies; + bool container_is_rx; + + if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting)) + return; + + /* If itr_countdown is set it means we programmed an ITR within + * the last 4 interrupt cycles. This has a side effect of us + * potentially firing an early interrupt. In order to work around + * this we need to throw out any data received for a few + * interrupts following the update. + */ + if (q_vector->itr_countdown) { + itr = rc->target_itr; + goto clear_counts; + } + + container_is_rx = (&q_vector->rx == rc); + /* For Rx we want to push the delay up and default to low latency. + * for Tx we want to pull the delay down and default to high latency. + */ + itr = container_is_rx ? + ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY : + ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY; + + /* If we didn't update within up to 1 - 2 jiffies we can assume + * that either packets are coming in so slow there hasn't been + * any work, or that there is so much work that NAPI is dealing + * with interrupt moderation and we don't need to do anything. + */ + if (time_after(next_update, rc->next_update)) + goto clear_counts; + + packets = rc->total_pkts; + bytes = rc->total_bytes; + + if (container_is_rx) { + /* If Rx there are 1 to 4 packets and bytes are less than + * 9000 assume insufficient data to use bulk rate limiting + * approach unless Tx is already in bulk rate limiting. We + * are likely latency driven. + */ + if (packets && packets < 4 && bytes < 9000 && + (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) { + itr = ICE_ITR_ADAPTIVE_LATENCY; + goto adjust_by_size; + } + } else if (packets < 4) { + /* If we have Tx and Rx ITR maxed and Tx ITR is running in + * bulk mode and we are receiving 4 or fewer packets just + * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so + * that the Rx can relax. + */ + if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS && + (q_vector->rx.target_itr & ICE_ITR_MASK) == + ICE_ITR_ADAPTIVE_MAX_USECS) + goto clear_counts; + } else if (packets > 32) { + /* If we have processed over 32 packets in a single interrupt + * for Tx assume we need to switch over to "bulk" mode. + */ + rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY; + } + + /* We have no packets to actually measure against. This means + * either one of the other queues on this vector is active or + * we are a Tx queue doing TSO with too high of an interrupt rate. + * + * Between 4 and 56 we can assume that our current interrupt delay + * is only slightly too low. As such we should increase it by a small + * fixed amount. + */ + if (packets < 56) { + itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC; + if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) { + itr &= ICE_ITR_ADAPTIVE_LATENCY; + itr += ICE_ITR_ADAPTIVE_MAX_USECS; + } + goto clear_counts; + } + + if (packets <= 256) { + itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr); + itr &= ICE_ITR_MASK; + + /* Between 56 and 112 is our "goldilocks" zone where we are + * working out "just right". Just report that our current + * ITR is good for us. + */ + if (packets <= 112) + goto clear_counts; + + /* If packet count is 128 or greater we are likely looking + * at a slight overrun of the delay we want. Try halving + * our delay to see if that will cut the number of packets + * in half per interrupt. + */ + itr >>= 1; + itr &= ICE_ITR_MASK; + if (itr < ICE_ITR_ADAPTIVE_MIN_USECS) + itr = ICE_ITR_ADAPTIVE_MIN_USECS; + + goto clear_counts; + } + + /* The paths below assume we are dealing with a bulk ITR since + * number of packets is greater than 256. We are just going to have + * to compute a value and try to bring the count under control, + * though for smaller packet sizes there isn't much we can do as + * NAPI polling will likely be kicking in sooner rather than later. + */ + itr = ICE_ITR_ADAPTIVE_BULK; + +adjust_by_size: + /* If packet counts are 256 or greater we can assume we have a gross + * overestimation of what the rate should be. Instead of trying to fine + * tune it just use the formula below to try and dial in an exact value + * gives the current packet size of the frame. + */ + avg_wire_size = bytes / packets; + + /* The following is a crude approximation of: + * wmem_default / (size + overhead) = desired_pkts_per_int + * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate + * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value + * + * Assuming wmem_default is 212992 and overhead is 640 bytes per + * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the + * formula down to + * + * (170 * (size + 24)) / (size + 640) = ITR + * + * We first do some math on the packet size and then finally bitshift + * by 8 after rounding up. We also have to account for PCIe link speed + * difference as ITR scales based on this. + */ + if (avg_wire_size <= 60) { + /* Start at 250k ints/sec */ + avg_wire_size = 4096; + } else if (avg_wire_size <= 380) { + /* 250K ints/sec to 60K ints/sec */ + avg_wire_size *= 40; + avg_wire_size += 1696; + } else if (avg_wire_size <= 1084) { + /* 60K ints/sec to 36K ints/sec */ + avg_wire_size *= 15; + avg_wire_size += 11452; + } else if (avg_wire_size <= 1980) { + /* 36K ints/sec to 30K ints/sec */ + avg_wire_size *= 5; + avg_wire_size += 22420; + } else { + /* plateau at a limit of 30K ints/sec */ + avg_wire_size = 32256; + } + + /* If we are in low latency mode halve our delay which doubles the + * rate to somewhere between 100K to 16K ints/sec + */ + if (itr & ICE_ITR_ADAPTIVE_LATENCY) + avg_wire_size >>= 1; + + /* Resultant value is 256 times larger than it needs to be. This + * gives us room to adjust the value as needed to either increase + * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc. + * + * Use addition as we have already recorded the new latency flag + * for the ITR value. + */ + itr += DIV_ROUND_UP(avg_wire_size, + ice_itr_divisor(q_vector->vsi->port_info)) * + ICE_ITR_ADAPTIVE_MIN_INC; + + if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) { + itr &= ICE_ITR_ADAPTIVE_LATENCY; + itr += ICE_ITR_ADAPTIVE_MAX_USECS; + } + +clear_counts: + /* write back value */ + rc->target_itr = itr; + + /* next update should occur within next jiffy */ + rc->next_update = next_update + 1; + + rc->total_bytes = 0; + rc->total_pkts = 0; +} + /** * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register * @itr_idx: interrupt throttling index - * @reg_itr: interrupt throttling value adjusted based on ITR granularity + * @itr: interrupt throttling value in usecs */ -static u32 ice_buildreg_itr(int itr_idx, u16 reg_itr) +static u32 ice_buildreg_itr(u16 itr_idx, u16 itr) { + /* The itr value is reported in microseconds, and the register value is + * recorded in 2 microsecond units. For this reason we only need to + * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this + * granularity as a shift instead of division. The mask makes sure the + * ITR value is never odd so we don't accidentally write into the field + * prior to the ITR field. + */ + itr &= ICE_ITR_MASK; + return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) | - (reg_itr << GLINT_DYN_CTL_INTERVAL_S); + (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)); } +/* The act of updating the ITR will cause it to immediately trigger. In order + * to prevent this from throwing off adaptive update statistics we defer the + * update so that it can only happen so often. So after either Tx or Rx are + * updated we make the adaptive scheme wait until either the ITR completely + * expires via the next_update expiration or we have been through at least + * 3 interrupts. + */ +#define ITR_COUNTDOWN_START 3 + /** * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt * @vsi: the VSI associated with the q_vector @@ -1068,10 +1355,14 @@ static u32 ice_buildreg_itr(int itr_idx, u16 reg_itr) static void ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector) { - struct ice_hw *hw = &vsi->back->hw; - struct ice_ring_container *rc; + struct ice_ring_container *tx = &q_vector->tx; + struct ice_ring_container *rx = &q_vector->rx; u32 itr_val; + /* This will do nothing if dynamic updates are not enabled */ + ice_update_itr(q_vector, tx); + ice_update_itr(q_vector, rx); + /* This block of logic allows us to get away with only updating * one ITR value with each interrupt. The idea is to perform a * pseudo-lazy update with the following criteria. @@ -1080,35 +1371,36 @@ ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector) * 2. If we must reduce an ITR that is given highest priority. * 3. We then give priority to increasing ITR based on amount. */ - if (q_vector->rx.target_itr < q_vector->rx.current_itr) { - rc = &q_vector->rx; + if (rx->target_itr < rx->current_itr) { /* Rx ITR needs to be reduced, this is highest priority */ - itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr); - rc->current_itr = rc->target_itr; - } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) || - ((q_vector->rx.target_itr - q_vector->rx.current_itr) < - (q_vector->tx.target_itr - q_vector->tx.current_itr))) { - rc = &q_vector->tx; + itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr); + rx->current_itr = rx->target_itr; + q_vector->itr_countdown = ITR_COUNTDOWN_START; + } else if ((tx->target_itr < tx->current_itr) || + ((rx->target_itr - rx->current_itr) < + (tx->target_itr - tx->current_itr))) { /* Tx ITR needs to be reduced, this is second priority * Tx ITR needs to be increased more than Rx, fourth priority */ - itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr); - rc->current_itr = rc->target_itr; - } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) { - rc = &q_vector->rx; + itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr); + tx->current_itr = tx->target_itr; + q_vector->itr_countdown = ITR_COUNTDOWN_START; + } else if (rx->current_itr != rx->target_itr) { /* Rx ITR needs to be increased, third priority */ - itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr); - rc->current_itr = rc->target_itr; + itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr); + rx->current_itr = rx->target_itr; + q_vector->itr_countdown = ITR_COUNTDOWN_START; } else { /* Still have to re-enable the interrupts */ itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0); + if (q_vector->itr_countdown) + q_vector->itr_countdown--; } - if (!test_bit(__ICE_DOWN, vsi->state)) { - int vector = vsi->hw_base_vector + q_vector->v_idx; - - wr32(hw, GLINT_DYN_CTL(vector), itr_val); - } + if (!test_bit(__ICE_DOWN, vsi->state)) + wr32(&vsi->back->hw, + GLINT_DYN_CTL(vsi->hw_base_vector + q_vector->v_idx), + itr_val); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index fc358ea81816..60131b84b021 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -47,6 +47,9 @@ #define ICE_TX_FLAGS_VLAN_M 0xffff0000 #define ICE_TX_FLAGS_VLAN_S 16 +#define ICE_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + struct ice_tx_buf { struct ice_tx_desc *next_to_watch; struct sk_buff *skb; @@ -73,6 +76,7 @@ struct ice_rx_buf { dma_addr_t dma; struct page *page; unsigned int page_offset; + u16 pagecnt_bias; }; struct ice_q_stats { @@ -124,10 +128,17 @@ enum ice_rx_dtype { #define ICE_ITR_DYNAMIC 0x8000 /* used as flag for itr_setting */ #define ITR_IS_DYNAMIC(setting) (!!((setting) & ICE_ITR_DYNAMIC)) #define ITR_TO_REG(setting) ((setting) & ~ICE_ITR_DYNAMIC) -#define ICE_ITR_GRAN_S 1 /* Assume ITR granularity is 2us */ +#define ICE_ITR_GRAN_S 1 /* ITR granularity is always 2us */ +#define ICE_ITR_GRAN_US BIT(ICE_ITR_GRAN_S) #define ICE_ITR_MASK 0x1FFE /* ITR register value alignment mask */ #define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~ICE_ITR_MASK) +#define ICE_ITR_ADAPTIVE_MIN_INC 0x0002 +#define ICE_ITR_ADAPTIVE_MIN_USECS 0x0002 +#define ICE_ITR_ADAPTIVE_MAX_USECS 0x00FA +#define ICE_ITR_ADAPTIVE_LATENCY 0x8000 +#define ICE_ITR_ADAPTIVE_BULK 0x0000 + #define ICE_DFLT_INTRL 0 /* Legacy or Advanced Mode Queue */ @@ -173,21 +184,13 @@ struct ice_ring { u16 next_to_alloc; } ____cacheline_internodealigned_in_smp; -enum ice_latency_range { - ICE_LOWEST_LATENCY = 0, - ICE_LOW_LATENCY = 1, - ICE_BULK_LATENCY = 2, - ICE_ULTRA_LATENCY = 3, -}; - struct ice_ring_container { /* head of linked-list of rings */ struct ice_ring *ring; unsigned long next_update; /* jiffies value of next queue update */ unsigned int total_bytes; /* total bytes processed this int */ unsigned int total_pkts; /* total packets processed this int */ - enum ice_latency_range latency_range; - int itr_idx; /* index in the interrupt vector */ + u16 itr_idx; /* index in the interrupt vector */ u16 target_itr; /* value in usecs divided by the hw->itr_gran */ u16 current_itr; /* value in usecs divided by the hw->itr_gran */ /* high bit set means dynamic ITR, rest is used to store user diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 17086d5b5c33..3a4e67484487 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -24,6 +24,7 @@ static inline bool ice_is_tc_ena(u8 bitmap, u8 tc) /* debug masks - set these bits in hw->debug_mask to control output */ #define ICE_DBG_INIT BIT_ULL(1) #define ICE_DBG_LINK BIT_ULL(4) +#define ICE_DBG_PHY BIT_ULL(5) #define ICE_DBG_QCTX BIT_ULL(6) #define ICE_DBG_NVM BIT_ULL(7) #define ICE_DBG_LAN BIT_ULL(8) @@ -209,6 +210,9 @@ struct ice_nvm_info { #define ICE_MAX_TRAFFIC_CLASS 8 #define ICE_TXSCHED_MAX_BRANCHES ICE_MAX_TRAFFIC_CLASS +#define ice_for_each_traffic_class(_i) \ + for ((_i) = 0; (_i) < ICE_MAX_TRAFFIC_CLASS; (_i)++) + struct ice_sched_node { struct ice_sched_node *parent; struct ice_sched_node *sibling; /* next sibling in the same layer */ @@ -247,7 +251,6 @@ struct ice_sched_vsi_info { struct ice_sched_node *ag_node[ICE_MAX_TRAFFIC_CLASS]; struct list_head list_entry; u16 max_lanq[ICE_MAX_TRAFFIC_CLASS]; - u16 vsi_id; }; /* driver defines the policy */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 57155b4a59dc..84e51a0a0795 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -5,6 +5,37 @@ #include "ice_lib.h" /** + * ice_err_to_virt err - translate errors for VF return code + * @ice_err: error return code + */ +static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err) +{ + switch (ice_err) { + case ICE_SUCCESS: + return VIRTCHNL_STATUS_SUCCESS; + case ICE_ERR_BAD_PTR: + case ICE_ERR_INVAL_SIZE: + case ICE_ERR_DEVICE_NOT_SUPPORTED: + case ICE_ERR_PARAM: + case ICE_ERR_CFG: + return VIRTCHNL_STATUS_ERR_PARAM; + case ICE_ERR_NO_MEMORY: + return VIRTCHNL_STATUS_ERR_NO_MEMORY; + case ICE_ERR_NOT_READY: + case ICE_ERR_RESET_FAILED: + case ICE_ERR_FW_API_VER: + case ICE_ERR_AQ_ERROR: + case ICE_ERR_AQ_TIMEOUT: + case ICE_ERR_AQ_FULL: + case ICE_ERR_AQ_NO_WORK: + case ICE_ERR_AQ_EMPTY: + return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; + default: + return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; + } +} + +/** * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF * @pf: pointer to the PF structure * @v_opcode: operation code @@ -14,7 +45,7 @@ */ static void ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode, - enum ice_status v_retval, u8 *msg, u16 msglen) + enum virtchnl_status_code v_retval, u8 *msg, u16 msglen) { struct ice_hw *hw = &pf->hw; struct ice_vf *vf = pf->vf; @@ -104,7 +135,8 @@ static void ice_vc_notify_vf_link_state(struct ice_vf *vf) ice_set_pfe_link(vf, &pfe, ls->link_speed, ls->link_info & ICE_AQ_LINK_UP); - ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe, + ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, + VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe), NULL); } @@ -343,11 +375,41 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr) } /** - * ice_vsi_set_pvid - Set port VLAN id for the VSI - * @vsi: the VSI being changed + * ice_vsi_set_pvid_fill_ctxt - Set VSI ctxt for add pvid + * @ctxt: the vsi ctxt to fill * @vid: the VLAN id to set as a PVID */ -static int ice_vsi_set_pvid(struct ice_vsi *vsi, u16 vid) +static void ice_vsi_set_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt, u16 vid) +{ + ctxt->info.vlan_flags = (ICE_AQ_VSI_VLAN_MODE_UNTAGGED | + ICE_AQ_VSI_PVLAN_INSERT_PVID | + ICE_AQ_VSI_VLAN_EMOD_STR); + ctxt->info.pvid = cpu_to_le16(vid); + ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; + ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID | + ICE_AQ_VSI_PROP_SW_VALID); +} + +/** + * ice_vsi_kill_pvid_fill_ctxt - Set VSI ctx for remove pvid + * @ctxt: the VSI ctxt to fill + */ +static void ice_vsi_kill_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt) +{ + ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING; + ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL; + ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; + ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID | + ICE_AQ_VSI_PROP_SW_VALID); +} + +/** + * ice_vsi_manage_pvid - Enable or disable port VLAN for VSI + * @vsi: the VSI to update + * @vid: the VLAN id to set as a PVID + * @enable: true for enable pvid false for disable + */ +static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable) { struct device *dev = &vsi->back->pdev->dev; struct ice_hw *hw = &vsi->back->hw; @@ -359,46 +421,27 @@ static int ice_vsi_set_pvid(struct ice_vsi *vsi, u16 vid) if (!ctxt) return -ENOMEM; - ctxt->info.vlan_flags = (ICE_AQ_VSI_VLAN_MODE_UNTAGGED | - ICE_AQ_VSI_PVLAN_INSERT_PVID | - ICE_AQ_VSI_VLAN_EMOD_STR); - ctxt->info.pvid = cpu_to_le16(vid); - ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); + ctxt->info = vsi->info; + if (enable) + ice_vsi_set_pvid_fill_ctxt(ctxt, vid); + else + ice_vsi_kill_pvid_fill_ctxt(ctxt); status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); if (status) { - dev_info(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n", + dev_info(dev, "update VSI for port VLAN failed, err %d aq_err %d\n", status, hw->adminq.sq_last_status); ret = -EIO; goto out; } - vsi->info.pvid = ctxt->info.pvid; - vsi->info.vlan_flags = ctxt->info.vlan_flags; + vsi->info = ctxt->info; out: devm_kfree(dev, ctxt); return ret; } /** - * ice_vsi_kill_pvid - Remove port VLAN id from the VSI - * @vsi: the VSI being changed - */ -static int ice_vsi_kill_pvid(struct ice_vsi *vsi) -{ - struct ice_pf *pf = vsi->back; - - if (ice_vsi_manage_vlan_stripping(vsi, false)) { - dev_err(&pf->pdev->dev, "Error removing Port VLAN on VSI %i\n", - vsi->vsi_num); - return -ENODEV; - } - - vsi->info.pvid = 0; - return 0; -} - -/** * ice_vf_vsi_setup - Set up a VF VSI * @pf: board private structure * @pi: pointer to the port_info instance @@ -446,8 +489,10 @@ static int ice_alloc_vsi_res(struct ice_vf *vf) vsi->hw_base_vector += 1; /* Check if port VLAN exist before, and restore it accordingly */ - if (vf->port_vlan_id) - ice_vsi_set_pvid(vsi, vf->port_vlan_id); + if (vf->port_vlan_id) { + ice_vsi_manage_pvid(vsi, vf->port_vlan_id, true); + ice_vsi_add_vlan(vsi, vf->port_vlan_id & ICE_VLAN_M); + } eth_broadcast_addr(broadcast); @@ -484,6 +529,8 @@ ice_alloc_vsi_res_exit: */ static int ice_alloc_vf_res(struct ice_vf *vf) { + struct ice_pf *pf = vf->pf; + int tx_rx_queue_left; int status; /* setup VF VSI and necessary resources */ @@ -491,6 +538,15 @@ static int ice_alloc_vf_res(struct ice_vf *vf) if (status) goto ice_alloc_vf_res_exit; + /* Update number of VF queues, in case VF had requested for queue + * changes + */ + tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx); + tx_rx_queue_left += ICE_DFLT_QS_PER_VF; + if (vf->num_req_qs && vf->num_req_qs <= tx_rx_queue_left && + vf->num_req_qs != vf->num_vf_qs) + vf->num_vf_qs = vf->num_req_qs; + if (vf->trusted) set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); else @@ -548,6 +604,10 @@ static void ice_ena_vf_mappings(struct ice_vf *vf) wr32(hw, GLINT_VECT2FUNC(v), reg); } + /* Map mailbox interrupt. We put an explicit 0 here to remind us that + * VF admin queue interrupts will go to VF MSI-X vector 0. + */ + wr32(hw, VPINT_MBX_CTL(abs_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M | 0); /* set regardless of mapping mode */ wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M); @@ -750,6 +810,47 @@ static void ice_cleanup_and_realloc_vf(struct ice_vf *vf) } /** + * ice_vf_set_vsi_promisc - set given VF VSI to given promiscuous mode(s) + * @vf: pointer to the VF info + * @vsi: the VSI being configured + * @promisc_m: mask of promiscuous config bits + * @rm_promisc: promisc flag request from the VF to remove or add filter + * + * This function configures VF VSI promiscuous mode, based on the VF requests, + * for Unicast, Multicast and VLAN + */ +static enum ice_status +ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m, + bool rm_promisc) +{ + struct ice_pf *pf = vf->pf; + enum ice_status status = 0; + struct ice_hw *hw; + + hw = &pf->hw; + if (vf->num_vlan) { + status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m, + rm_promisc); + } else if (vf->port_vlan_id) { + if (rm_promisc) + status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m, + vf->port_vlan_id); + else + status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m, + vf->port_vlan_id); + } else { + if (rm_promisc) + status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m, + 0); + else + status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m, + 0); + } + + return status; +} + +/** * ice_reset_all_vfs - reset all allocated VFs in one go * @pf: pointer to the PF structure * @is_vflr: true if VFLR was issued, false if not @@ -764,6 +865,7 @@ static void ice_cleanup_and_realloc_vf(struct ice_vf *vf) bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) { struct ice_hw *hw = &pf->hw; + struct ice_vf *vf; int v, i; /* If we don't have any VFs, then there is nothing to reset */ @@ -778,12 +880,17 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) for (v = 0; v < pf->num_alloc_vfs; v++) ice_trigger_vf_reset(&pf->vf[v], is_vflr); - /* Call Disable LAN Tx queue AQ call with VFR bit set and 0 - * queues to inform Firmware about VF reset. - */ - for (v = 0; v < pf->num_alloc_vfs; v++) - ice_dis_vsi_txq(pf->vsi[0]->port_info, 0, NULL, NULL, - ICE_VF_RESET, v, NULL); + for (v = 0; v < pf->num_alloc_vfs; v++) { + struct ice_vsi *vsi; + + vf = &pf->vf[v]; + vsi = pf->vsi[vf->lan_vsi_idx]; + if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) { + ice_vsi_stop_lan_tx_rings(vsi, ICE_VF_RESET, vf->vf_id); + ice_vsi_stop_rx_rings(vsi); + clear_bit(ICE_VF_STATE_ENA, vf->vf_states); + } + } /* HW requires some time to make sure it can flush the FIFO for a VF * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in @@ -796,9 +903,9 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) /* Check each VF in sequence */ while (v < pf->num_alloc_vfs) { - struct ice_vf *vf = &pf->vf[v]; u32 reg; + vf = &pf->vf[v]; reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id)); if (!(reg & VPGEN_VFRSTAT_VFRD_M)) break; @@ -818,8 +925,18 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) usleep_range(10000, 20000); /* free VF resources to begin resetting the VSI state */ - for (v = 0; v < pf->num_alloc_vfs; v++) - ice_free_vf_res(&pf->vf[v]); + for (v = 0; v < pf->num_alloc_vfs; v++) { + vf = &pf->vf[v]; + + ice_free_vf_res(vf); + + /* Free VF queues as well, and reallocate later. + * If a given VF has different number of queues + * configured, the request for update will come + * via mailbox communication. + */ + vf->num_vf_qs = 0; + } if (ice_check_avail_res(pf)) { dev_err(&pf->pdev->dev, @@ -828,8 +945,15 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) } /* Finish the reset on each VF */ - for (v = 0; v < pf->num_alloc_vfs; v++) - ice_cleanup_and_realloc_vf(&pf->vf[v]); + for (v = 0; v < pf->num_alloc_vfs; v++) { + vf = &pf->vf[v]; + + vf->num_vf_qs = pf->num_vf_qps; + dev_dbg(&pf->pdev->dev, + "VF-id %d has %d queues configured\n", + vf->vf_id, vf->num_vf_qs); + ice_cleanup_and_realloc_vf(vf); + } ice_flush(hw); clear_bit(__ICE_VF_DIS, pf->state); @@ -847,9 +971,10 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) { struct ice_pf *pf = vf->pf; - struct ice_hw *hw = &pf->hw; struct ice_vsi *vsi; + struct ice_hw *hw; bool rsd = false; + u8 promisc_m; u32 reg; int i; @@ -875,6 +1000,7 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) vf->vf_id, NULL); } + hw = &pf->hw; /* poll VPGEN_VFRSTAT reg to make sure * that reset is complete */ @@ -900,6 +1026,21 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) usleep_range(10000, 20000); + /* disable promiscuous modes in case they were enabled + * ignore any error if disabling process failed + */ + if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) || + test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) { + if (vf->port_vlan_id || vf->num_vlan) + promisc_m = ICE_UCAST_VLAN_PROMISC_BITS; + else + promisc_m = ICE_UCAST_PROMISC_BITS; + + vsi = pf->vsi[vf->lan_vsi_idx]; + if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true)) + dev_err(&pf->pdev->dev, "disabling promiscuous mode failed\n"); + } + /* free VF resources to begin resetting the VSI state */ ice_free_vf_res(vf); @@ -938,7 +1079,7 @@ void ice_vc_notify_reset(struct ice_pf *pf) pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; - ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, ICE_SUCCESS, + ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(struct virtchnl_pf_event)); } @@ -961,8 +1102,9 @@ static void ice_vc_notify_vf_reset(struct ice_vf *vf) pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; - ice_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0, - (u8 *)&pfe, sizeof(pfe), NULL); + ice_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT, + VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe), + NULL); } /** @@ -1012,7 +1154,7 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs) pf->num_alloc_vfs = num_alloc_vfs; /* VF resources get allocated during reset */ - if (!ice_reset_all_vfs(pf, false)) + if (!ice_reset_all_vfs(pf, true)) goto err_unroll_sriov; goto err_unroll_intr; @@ -1182,8 +1324,9 @@ static void ice_vc_dis_vf(struct ice_vf *vf) * * send msg to VF */ -static int ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, - enum ice_status v_retval, u8 *msg, u16 msglen) +static int +ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, + enum virtchnl_status_code v_retval, u8 *msg, u16 msglen) { enum ice_status aq_ret; struct ice_pf *pf; @@ -1243,8 +1386,8 @@ static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg) if (VF_IS_V10(&vf->vf_ver)) info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; - return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, ICE_SUCCESS, - (u8 *)&info, + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, + VIRTCHNL_STATUS_SUCCESS, (u8 *)&info, sizeof(struct virtchnl_version_info)); } @@ -1257,15 +1400,15 @@ static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg) */ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) { + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_vf_resource *vfres = NULL; - enum ice_status aq_ret = 0; struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; int len = 0; int ret; if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto err; } @@ -1273,7 +1416,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) vfres = devm_kzalloc(&pf->pdev->dev, len, GFP_KERNEL); if (!vfres) { - aq_ret = ICE_ERR_NO_MEMORY; + v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; len = 0; goto err; } @@ -1286,6 +1429,11 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2; vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + if (!vsi->info.pvid) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN; @@ -1336,7 +1484,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) err: /* send the response back to the VF */ - ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, aq_ret, + ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret, (u8 *)vfres, len); devm_kfree(&pf->pdev->dev, vfres); @@ -1368,7 +1516,7 @@ static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id) { int i; - for (i = 0; i < pf->num_alloc_vsi; i++) + ice_for_each_vsi(pf, i) if (pf->vsi[i] && pf->vsi[i]->vsi_num == id) return pf->vsi[i]; @@ -1416,42 +1564,42 @@ static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid) */ static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg) { + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg; + struct ice_pf *pf = vf->pf; struct ice_vsi *vsi = NULL; - enum ice_status aq_ret; - int ret; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - vsi = ice_find_vsi_from_id(vf->pf, vrk->vsi_id); + vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - ret = ice_set_rss(vsi, vrk->key, NULL, 0); - aq_ret = ret ? ICE_ERR_PARAM : ICE_SUCCESS; + if (ice_set_rss(vsi, vrk->key, NULL, 0)) + v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; error_param: - return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, aq_ret, + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret, NULL, 0); } @@ -1465,40 +1613,40 @@ error_param: static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg) { struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg; + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + struct ice_pf *pf = vf->pf; struct ice_vsi *vsi = NULL; - enum ice_status aq_ret; - int ret; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - vsi = ice_find_vsi_from_id(vf->pf, vrl->vsi_id); + vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - ret = ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE); - aq_ret = ret ? ICE_ERR_PARAM : ICE_SUCCESS; + if (ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE)) + v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; error_param: - return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, aq_ret, + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret, NULL, 0); } @@ -1511,25 +1659,26 @@ error_param: */ static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg) { + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_queue_select *vqs = (struct virtchnl_queue_select *)msg; - enum ice_status aq_ret = 0; + struct ice_pf *pf = vf->pf; struct ice_eth_stats stats; struct ice_vsi *vsi; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id); + vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1540,7 +1689,7 @@ static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg) error_param: /* send the response to the VF */ - return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret, + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret, (u8 *)&stats, sizeof(stats)); } @@ -1553,29 +1702,30 @@ error_param: */ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) { + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_queue_select *vqs = (struct virtchnl_queue_select *)msg; - enum ice_status aq_ret = 0; + struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } if (!vqs->rx_queues && !vqs->tx_queues) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id); + vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1584,15 +1734,15 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) * programmed using ice_vsi_cfg_txqs */ if (ice_vsi_start_rx_rings(vsi)) - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; /* Set flag to indicate that queues are enabled */ - if (!aq_ret) + if (v_ret == VIRTCHNL_STATUS_SUCCESS) set_bit(ICE_VF_STATE_ENA, vf->vf_states); error_param: /* send the response to the VF */ - return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, aq_ret, + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret, NULL, 0); } @@ -1606,30 +1756,31 @@ error_param: */ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) { + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_queue_select *vqs = (struct virtchnl_queue_select *)msg; - enum ice_status aq_ret = 0; + struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) && !test_bit(ICE_VF_STATE_ENA, vf->vf_states)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } if (!vqs->rx_queues && !vqs->tx_queues) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id); + vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1637,23 +1788,23 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) dev_err(&vsi->back->pdev->dev, "Failed to stop tx rings on VSI %d\n", vsi->vsi_num); - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; } if (ice_vsi_stop_rx_rings(vsi)) { dev_err(&vsi->back->pdev->dev, "Failed to stop rx rings on VSI %d\n", vsi->vsi_num); - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; } /* Clear enabled queues flag */ - if (!aq_ret) + if (v_ret == VIRTCHNL_STATUS_SUCCESS) clear_bit(ICE_VF_STATE_ENA, vf->vf_states); error_param: /* send the response to the VF */ - return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, aq_ret, + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret, NULL, 0); } @@ -1666,18 +1817,18 @@ error_param: */ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) { + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_irq_map_info *irqmap_info = (struct virtchnl_irq_map_info *)msg; u16 vsi_id, vsi_q_id, vector_id; struct virtchnl_vector_map *map; struct ice_vsi *vsi = NULL; struct ice_pf *pf = vf->pf; - enum ice_status aq_ret = 0; unsigned long qmap; int i; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1689,13 +1840,13 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) /* validate msg params */ if (!(vector_id < pf->hw.func_caps.common_cap .num_msix_vectors) || !ice_vc_isvalid_vsi_id(vf, vsi_id)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - vsi = ice_find_vsi_from_id(vf->pf, vsi_id); + vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1705,7 +1856,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) struct ice_q_vector *q_vector; if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } q_vector = vsi->q_vectors[i]; @@ -1719,7 +1870,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) struct ice_q_vector *q_vector; if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } q_vector = vsi->q_vectors[i]; @@ -1733,7 +1884,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) ice_vsi_cfg_msix(vsi); error_param: /* send the response to the VF */ - return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, aq_ret, + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret, NULL, 0); } @@ -1746,26 +1897,34 @@ error_param: */ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) { + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_vsi_queue_config_info *qci = (struct virtchnl_vsi_queue_config_info *)msg; struct virtchnl_queue_pair_info *qpi; - enum ice_status aq_ret = 0; + struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; int i; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - vsi = ice_find_vsi_from_id(vf->pf, qci->vsi_id); + vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi) { - aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF) { + dev_err(&pf->pdev->dev, + "VF-%d requesting more than supported number of queues: %d\n", + vf->vf_id, qci->num_queue_pairs); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1775,7 +1934,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) qpi->rxq.vsi_id != qci->vsi_id || qpi->rxq.queue_id != qpi->txq.queue_id || !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } /* copy Tx queue info from VF into VSI */ @@ -1785,13 +1944,13 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr; vsi->rx_rings[i]->count = qpi->rxq.ring_len; if (qpi->rxq.databuffer_size > ((16 * 1024) - 128)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } vsi->rx_buf_len = qpi->rxq.databuffer_size; if (qpi->rxq.max_pkt_size >= (16 * 1024) || qpi->rxq.max_pkt_size < 64) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } vsi->max_frame = qpi->rxq.max_pkt_size; @@ -1802,15 +1961,16 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) */ vsi->num_txq = qci->num_queue_pairs; vsi->num_rxq = qci->num_queue_pairs; + /* All queues of VF VSI are in TC 0 */ + vsi->tc_cfg.tc_info[0].qcount_tx = qci->num_queue_pairs; + vsi->tc_cfg.tc_info[0].qcount_rx = qci->num_queue_pairs; - if (!ice_vsi_cfg_lan_txqs(vsi) && !ice_vsi_cfg_rxqs(vsi)) - aq_ret = 0; - else - aq_ret = ICE_ERR_PARAM; + if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi)) + v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; error_param: /* send the response to the VF */ - return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, aq_ret, + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret, NULL, 0); } @@ -1852,11 +2012,11 @@ static bool ice_can_vf_change_mac(struct ice_vf *vf) static int ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set) { + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_ether_addr_list *al = (struct virtchnl_ether_addr_list *)msg; struct ice_pf *pf = vf->pf; enum virtchnl_ops vc_op; - enum ice_status ret; LIST_HEAD(mac_list); struct ice_vsi *vsi; int mac_count = 0; @@ -1869,19 +2029,27 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set) if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) || !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) { - ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto handle_mac_exit; } if (set && !ice_is_vf_trusted(vf) && (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) { dev_err(&pf->pdev->dev, - "Can't add more MAC addresses, because VF is not trusted, switch the VF to trusted mode in order to add more functionalities\n"); - ret = ICE_ERR_PARAM; + "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n", + vf->vf_id); + /* There is no need to let VF know about not being trusted + * to add more MAC addr, so we can just return success message. + */ + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto handle_mac_exit; } vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto handle_mac_exit; + } for (i = 0; i < al->num_elements; i++) { u8 *maddr = al->list[i].addr; @@ -1893,40 +2061,39 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set) * already added. Just continue. */ dev_info(&pf->pdev->dev, - "mac %pM already set for VF %d\n", + "MAC %pM already set for VF %d\n", maddr, vf->vf_id); continue; } else { /* VF can't remove dflt_lan_addr/bcast mac */ dev_err(&pf->pdev->dev, - "can't remove mac %pM for VF %d\n", + "VF can't remove default MAC address or MAC %pM programmed by PF for VF %d\n", maddr, vf->vf_id); - ret = ICE_ERR_PARAM; - goto handle_mac_exit; + continue; } } /* check for the invalid cases and bail if necessary */ if (is_zero_ether_addr(maddr)) { dev_err(&pf->pdev->dev, - "invalid mac %pM provided for VF %d\n", + "invalid MAC %pM provided for VF %d\n", maddr, vf->vf_id); - ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto handle_mac_exit; } if (is_unicast_ether_addr(maddr) && !ice_can_vf_change_mac(vf)) { dev_err(&pf->pdev->dev, - "can't change unicast mac for untrusted VF %d\n", + "can't change unicast MAC for untrusted VF %d\n", vf->vf_id); - ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto handle_mac_exit; } /* get here if maddr is multicast or if VF can change mac */ if (ice_add_mac_to_list(vsi, &mac_list, al->list[i].addr)) { - ret = ICE_ERR_NO_MEMORY; + v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; goto handle_mac_exit; } mac_count++; @@ -1934,14 +2101,14 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set) /* program the updated filter list */ if (set) - ret = ice_add_mac(&pf->hw, &mac_list); + v_ret = ice_err_to_virt_err(ice_add_mac(&pf->hw, &mac_list)); else - ret = ice_remove_mac(&pf->hw, &mac_list); + v_ret = ice_err_to_virt_err(ice_remove_mac(&pf->hw, &mac_list)); - if (ret) { + if (v_ret) { dev_err(&pf->pdev->dev, - "can't update mac filters for VF %d, error %d\n", - vf->vf_id, ret); + "can't update MAC filters for VF %d, error %d\n", + vf->vf_id, v_ret); } else { if (set) vf->num_mac += mac_count; @@ -1952,7 +2119,7 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set) handle_mac_exit: ice_free_fltr_list(&pf->pdev->dev, &mac_list); /* send the response to the VF */ - return ice_vc_send_msg_to_vf(vf, vc_op, ret, NULL, 0); + return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0); } /** @@ -1991,35 +2158,38 @@ static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg) */ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg) { + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_vf_res_request *vfres = (struct virtchnl_vf_res_request *)msg; int req_queues = vfres->num_queue_pairs; - enum ice_status aq_ret = 0; struct ice_pf *pf = vf->pf; + int max_allowed_vf_queues; int tx_rx_queue_left; int cur_queues; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - cur_queues = pf->num_vf_qps; + cur_queues = vf->num_vf_qs; tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx); + max_allowed_vf_queues = tx_rx_queue_left + cur_queues; if (req_queues <= 0) { dev_err(&pf->pdev->dev, "VF %d tried to request %d queues. Ignoring.\n", vf->vf_id, req_queues); - } else if (req_queues > ICE_MAX_QS_PER_VF) { + } else if (req_queues > ICE_MAX_BASE_QS_PER_VF) { dev_err(&pf->pdev->dev, "VF %d tried to request more than %d queues.\n", - vf->vf_id, ICE_MAX_QS_PER_VF); - vfres->num_queue_pairs = ICE_MAX_QS_PER_VF; + vf->vf_id, ICE_MAX_BASE_QS_PER_VF); + vfres->num_queue_pairs = ICE_MAX_BASE_QS_PER_VF; } else if (req_queues - cur_queues > tx_rx_queue_left) { dev_warn(&pf->pdev->dev, "VF %d requested %d more queues, but only %d left.\n", vf->vf_id, req_queues - cur_queues, tx_rx_queue_left); - vfres->num_queue_pairs = tx_rx_queue_left + cur_queues; + vfres->num_queue_pairs = min_t(int, max_allowed_vf_queues, + ICE_MAX_BASE_QS_PER_VF); } else { /* request is successful, then reset VF */ vf->num_req_qs = req_queues; @@ -2033,7 +2203,7 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg) error_param: /* send the response to the VF */ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, - aq_ret, (u8 *)vfres, sizeof(*vfres)); + v_ret, (u8 *)vfres, sizeof(*vfres)); } /** @@ -2093,11 +2263,12 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, VLAN_VID_MASK)); if (vlan_id || qos) { - ret = ice_vsi_set_pvid(vsi, vlanprio); + ret = ice_vsi_manage_pvid(vsi, vlanprio, true); if (ret) goto error_set_pvid; } else { - ice_vsi_kill_pvid(vsi); + ice_vsi_manage_pvid(vsi, 0, false); + vsi->info.pvid = 0; } if (vlan_id) { @@ -2129,48 +2300,57 @@ error_set_pvid: */ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) { + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_vlan_filter_list *vfl = (struct virtchnl_vlan_filter_list *)msg; - enum ice_status aq_ret = 0; struct ice_pf *pf = vf->pf; + bool vlan_promisc = false; struct ice_vsi *vsi; + struct ice_hw *hw; + int status = 0; + u8 promisc_m; int i; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } if (add_v && !ice_is_vf_trusted(vf) && vf->num_vlan >= ICE_MAX_VLAN_PER_VF) { dev_info(&pf->pdev->dev, - "VF is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n"); - aq_ret = ICE_ERR_PARAM; + "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n", + vf->vf_id); + /* There is no need to let VF know about being not trusted, + * so we can just return success message here + */ + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } for (i = 0; i < vfl->num_elements; i++) { if (vfl->vlan_id[i] > ICE_MAX_VLANID) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; dev_err(&pf->pdev->dev, "invalid VF VLAN id %d\n", vfl->vlan_id[i]); goto error_param; } } - vsi = ice_find_vsi_from_id(vf->pf, vfl->vsi_id); + hw = &pf->hw; + vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } if (vsi->info.pvid) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -2178,23 +2358,47 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) dev_err(&pf->pdev->dev, "%sable VLAN stripping failed for VSI %i\n", add_v ? "en" : "dis", vsi->vsi_num); - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } + if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) || + test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) + vlan_promisc = true; + if (add_v) { for (i = 0; i < vfl->num_elements; i++) { u16 vid = vfl->vlan_id[i]; - if (!ice_vsi_add_vlan(vsi, vid)) { - vf->num_vlan++; + if (ice_vsi_add_vlan(vsi, vid)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } - /* Enable VLAN pruning when VLAN 0 is added */ - if (unlikely(!vid)) - if (ice_cfg_vlan_pruning(vsi, true)) - aq_ret = ICE_ERR_PARAM; + vf->num_vlan++; + /* Enable VLAN pruning when VLAN is added */ + if (!vlan_promisc) { + status = ice_cfg_vlan_pruning(vsi, true, false); + if (status) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + dev_err(&pf->pdev->dev, + "Enable VLAN pruning on VLAN ID: %d failed error-%d\n", + vid, status); + goto error_param; + } } else { - aq_ret = ICE_ERR_PARAM; + /* Enable Ucast/Mcast VLAN promiscuous mode */ + promisc_m = ICE_PROMISC_VLAN_TX | + ICE_PROMISC_VLAN_RX; + + status = ice_set_vsi_promisc(hw, vsi->idx, + promisc_m, vid); + if (status) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + dev_err(&pf->pdev->dev, + "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n", + vid, status); + } } } } else { @@ -2204,12 +2408,22 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) /* Make sure ice_vsi_kill_vlan is successful before * updating VLAN information */ - if (!ice_vsi_kill_vlan(vsi, vid)) { - vf->num_vlan--; + if (ice_vsi_kill_vlan(vsi, vid)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + vf->num_vlan--; + /* Disable VLAN pruning when removing VLAN */ + ice_cfg_vlan_pruning(vsi, false, false); - /* Disable VLAN pruning when removing VLAN 0 */ - if (unlikely(!vid)) - ice_cfg_vlan_pruning(vsi, false); + /* Disable Unicast/Multicast VLAN promiscuous mode */ + if (vlan_promisc) { + promisc_m = ICE_PROMISC_VLAN_TX | + ICE_PROMISC_VLAN_RX; + + ice_clear_vsi_promisc(hw, vsi->idx, + promisc_m, vid); } } } @@ -2217,10 +2431,10 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) error_param: /* send the response to the VF */ if (add_v) - return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret, + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret, NULL, 0); else - return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret, + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret, NULL, 0); } @@ -2256,22 +2470,22 @@ static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg) */ static int ice_vc_ena_vlan_stripping(struct ice_vf *vf) { - enum ice_status aq_ret = 0; + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } vsi = pf->vsi[vf->lan_vsi_idx]; if (ice_vsi_manage_vlan_stripping(vsi, true)) - aq_ret = ICE_ERR_AQ_ERROR; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; error_param: return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, - aq_ret, NULL, 0); + v_ret, NULL, 0); } /** @@ -2282,22 +2496,27 @@ error_param: */ static int ice_vc_dis_vlan_stripping(struct ice_vf *vf) { - enum ice_status aq_ret = 0; + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + if (ice_vsi_manage_vlan_stripping(vsi, false)) - aq_ret = ICE_ERR_AQ_ERROR; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; error_param: return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, - aq_ret, NULL, 0); + v_ret, NULL, 0); } /** @@ -2333,7 +2552,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) /* Perform basic checks on the msg */ err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen); if (err) { - if (err == VIRTCHNL_ERR_PARAM) + if (err == VIRTCHNL_STATUS_ERR_PARAM) err = -EPERM; else err = -EINVAL; @@ -2355,7 +2574,8 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) error_handler: if (err) { - ice_vc_send_msg_to_vf(vf, v_opcode, ICE_ERR_PARAM, NULL, 0); + ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM, + NULL, 0); dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n", vf_id, v_opcode, msglen, err); return; @@ -2418,7 +2638,8 @@ error_handler: default: dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", v_opcode, vf_id); - err = ice_vc_send_msg_to_vf(vf, v_opcode, ICE_ERR_NOT_IMPL, + err = ice_vc_send_msg_to_vf(vf, v_opcode, + VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL, 0); break; } @@ -2427,7 +2648,7 @@ error_handler: * as it is busy with pending work. */ dev_info(&pf->pdev->dev, - "PF failed to honor VF %d, opcode %d\n, error %d\n", + "PF failed to honor VF %d, opcode %d, error %d\n", vf_id, v_opcode, err); } } @@ -2440,8 +2661,8 @@ error_handler: * * return VF configuration */ -int ice_get_vf_cfg(struct net_device *netdev, int vf_id, - struct ifla_vf_info *ivi) +int +ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; @@ -2587,7 +2808,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) ether_addr_copy(vf->dflt_lan_addr.addr, mac); vf->pf_set_mac = true; netdev_info(netdev, - "mac on VF %d set to %pM\n. VF driver will be reinitialized\n", + "MAC on VF %d set to %pM. VF driver will be reinitialized\n", vf_id, mac); ice_vc_dis_vf(vf); @@ -2690,7 +2911,8 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state) ice_set_pfe_link(vf, &pfe, ls->link_speed, vf->link_up); /* Notify the VF of its new link state */ - ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe, + ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, + VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe), NULL); return 0; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h index 01470a8ee03a..932e2ab3380b 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -70,6 +70,7 @@ struct ice_vf { u8 spoofchk; u16 num_mac; u16 num_vlan; + u16 num_vf_qs; /* num of queue configured per VF */ u8 num_req_qs; /* num of queue pairs requested by VF */ }; @@ -77,8 +78,8 @@ struct ice_vf { void ice_process_vflr_event(struct ice_pf *pf); int ice_sriov_configure(struct pci_dev *pdev, int num_vfs); int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac); -int ice_get_vf_cfg(struct net_device *netdev, int vf_id, - struct ifla_vf_info *ivi); +int +ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi); void ice_free_vfs(struct ice_pf *pf); void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event); @@ -86,11 +87,9 @@ void ice_vc_notify_link_state(struct ice_pf *pf); void ice_vc_notify_reset(struct ice_pf *pf); bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr); -int ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, - u16 vlan_id, u8 qos, __be16 vlan_proto); - -int ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, - int max_tx_rate); +int +ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, + __be16 vlan_proto); int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted); @@ -162,12 +161,5 @@ ice_set_vf_link_state(struct net_device __always_unused *netdev, return -EOPNOTSUPP; } -static inline int -ice_set_vf_bw(struct net_device __always_unused *netdev, - int __always_unused vf_id, int __always_unused min_tx_rate, - int __always_unused max_tx_rate) -{ - return -EOPNOTSUPP; -} #endif /* CONFIG_PCI_IOV */ #endif /* _ICE_VIRTCHNL_PF_H_ */ diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index c57671068245..c645d9e648e0 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -3158,8 +3158,8 @@ static int igb_set_eee(struct net_device *netdev, } else if (!edata->eee_enabled) { dev_err(&adapter->pdev->dev, "Setting EEE options are not supported with EEE disabled\n"); - return -EINVAL; - } + return -EINVAL; + } adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised); if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) { diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 69b230c53fed..bea7175d171b 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -2480,7 +2480,7 @@ static int igb_set_features(struct net_device *netdev, else igb_reset(adapter); - return 0; + return 1; } static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], @@ -3452,6 +3452,9 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) break; } } + + dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP); + pm_runtime_put_noidle(&pdev->dev); return 0; diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 80faccc34cda..0f5534ce27b0 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -29,9 +29,15 @@ unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter); void igc_set_flag_queue_pairs(struct igc_adapter *adapter, const u32 max_rss_queues); int igc_reinit_queues(struct igc_adapter *adapter); +void igc_write_rss_indir_tbl(struct igc_adapter *adapter); bool igc_has_link(struct igc_adapter *adapter); void igc_reset(struct igc_adapter *adapter); int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx); +int igc_add_mac_steering_filter(struct igc_adapter *adapter, + const u8 *addr, u8 queue, u8 flags); +int igc_del_mac_steering_filter(struct igc_adapter *adapter, + const u8 *addr, u8 queue, u8 flags); +void igc_update_stats(struct igc_adapter *adapter); extern char igc_driver_name[]; extern char igc_driver_version[]; @@ -51,6 +57,13 @@ extern char igc_driver_version[]; #define IGC_FLAG_VLAN_PROMISC BIT(15) #define IGC_FLAG_RX_LEGACY BIT(16) +#define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6) +#define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7) + +#define IGC_MRQC_ENABLE_RSS_MQ 0x00000002 +#define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 + #define IGC_START_ITR 648 /* ~6000 ints/sec */ #define IGC_4K_ITR 980 #define IGC_20K_ITR 196 @@ -284,15 +297,50 @@ struct igc_q_vector { struct igc_ring ring[0] ____cacheline_internodealigned_in_smp; }; +#define MAX_ETYPE_FILTER (4 - 1) + +enum igc_filter_match_flags { + IGC_FILTER_FLAG_ETHER_TYPE = 0x1, + IGC_FILTER_FLAG_VLAN_TCI = 0x2, + IGC_FILTER_FLAG_SRC_MAC_ADDR = 0x4, + IGC_FILTER_FLAG_DST_MAC_ADDR = 0x8, +}; + +/* RX network flow classification data structure */ +struct igc_nfc_input { + /* Byte layout in order, all values with MSB first: + * match_flags - 1 byte + * etype - 2 bytes + * vlan_tci - 2 bytes + */ + u8 match_flags; + __be16 etype; + __be16 vlan_tci; + u8 src_addr[ETH_ALEN]; + u8 dst_addr[ETH_ALEN]; +}; + +struct igc_nfc_filter { + struct hlist_node nfc_node; + struct igc_nfc_input filter; + unsigned long cookie; + u16 etype_reg_index; + u16 sw_idx; + u16 action; +}; + struct igc_mac_addr { u8 addr[ETH_ALEN]; u8 queue; u8 state; /* bitmask */ }; -#define IGC_MAC_STATE_DEFAULT 0x1 -#define IGC_MAC_STATE_MODIFIED 0x2 -#define IGC_MAC_STATE_IN_USE 0x4 +#define IGC_MAC_STATE_DEFAULT 0x1 +#define IGC_MAC_STATE_IN_USE 0x2 +#define IGC_MAC_STATE_SRC_ADDR 0x4 +#define IGC_MAC_STATE_QUEUE_STEERING 0x8 + +#define IGC_MAX_RXNFC_FILTERS 16 /* Board specific private data structure */ struct igc_adapter { @@ -356,12 +404,22 @@ struct igc_adapter { u16 tx_ring_count; u16 rx_ring_count; + u32 tx_hwtstamp_timeouts; + u32 tx_hwtstamp_skipped; + u32 rx_hwtstamp_cleared; u32 *shadow_vfta; u32 rss_queues; + u32 rss_indir_tbl_init; + + /* RX network flow classification support */ + struct hlist_head nfc_filter_list; + struct hlist_head cls_flower_list; + unsigned int nfc_filter_count; /* lock for RX network flow classification filter */ spinlock_t nfc_lock; + bool etype_bitmap[MAX_ETYPE_FILTER]; struct igc_mac_addr *mac_table; @@ -447,6 +505,10 @@ static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data) /* forward declaration */ void igc_reinit_locked(struct igc_adapter *); +int igc_add_filter(struct igc_adapter *adapter, + struct igc_nfc_filter *input); +int igc_erase_filter(struct igc_adapter *adapter, + struct igc_nfc_filter *input); #define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring)) diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h index 76d4991d7284..58d1109d7f3f 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.h +++ b/drivers/net/ethernet/intel/igc/igc_base.h @@ -1,8 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2018 Intel Corporation */ -#ifndef _IGC_BASE_H -#define _IGC_BASE_H +#ifndef _IGC_BASE_H_ +#define _IGC_BASE_H_ /* forward declaration */ void igc_rx_fifo_flush_base(struct igc_hw *hw); diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index 7d1bdcd1225a..a9a30268de59 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -310,6 +310,12 @@ IGC_RXDEXT_STATERR_CXE | \ IGC_RXDEXT_STATERR_RXE) +#define IGC_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define IGC_MRQC_RSS_FIELD_IPV4 0x00020000 +#define IGC_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define IGC_MRQC_RSS_FIELD_IPV6 0x00100000 +#define IGC_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + /* Header split receive */ #define IGC_RFCTL_IPV6_EX_DIS 0x00010000 #define IGC_RFCTL_LEF 0x00040000 @@ -325,6 +331,10 @@ #define I225_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ #define I225_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ +/* Receive Checksum Control */ +#define IGC_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ +#define IGC_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + /* GPY211 - I225 defines */ #define GPY_MMD_MASK 0xFFFF0000 #define GPY_MMD_SHIFT 16 @@ -390,4 +400,11 @@ #define IGC_N0_QUEUE -1 +#define IGC_MAX_MAC_HDR_LEN 127 +#define IGC_MAX_NETWORK_HDR_LEN 511 + +#define IGC_VLAPQF_QUEUE_SEL(_n, q_idx) ((q_idx) << ((_n) * 4)) +#define IGC_VLAPQF_P_VALID(_n) (0x1 << (3 + (_n) * 4)) +#define IGC_VLAPQF_QUEUE_MASK 0x03 + #endif /* _IGC_DEFINES_H_ */ diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index eff37a6c0afa..ac98f1d96892 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -2,10 +2,120 @@ /* Copyright (c) 2018 Intel Corporation */ /* ethtool support for igc */ +#include <linux/if_vlan.h> #include <linux/pm_runtime.h> #include "igc.h" +/* forward declaration */ +struct igc_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define IGC_STAT(_name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = FIELD_SIZEOF(struct igc_adapter, _stat), \ + .stat_offset = offsetof(struct igc_adapter, _stat) \ +} + +static const struct igc_stats igc_gstrings_stats[] = { + IGC_STAT("rx_packets", stats.gprc), + IGC_STAT("tx_packets", stats.gptc), + IGC_STAT("rx_bytes", stats.gorc), + IGC_STAT("tx_bytes", stats.gotc), + IGC_STAT("rx_broadcast", stats.bprc), + IGC_STAT("tx_broadcast", stats.bptc), + IGC_STAT("rx_multicast", stats.mprc), + IGC_STAT("tx_multicast", stats.mptc), + IGC_STAT("multicast", stats.mprc), + IGC_STAT("collisions", stats.colc), + IGC_STAT("rx_crc_errors", stats.crcerrs), + IGC_STAT("rx_no_buffer_count", stats.rnbc), + IGC_STAT("rx_missed_errors", stats.mpc), + IGC_STAT("tx_aborted_errors", stats.ecol), + IGC_STAT("tx_carrier_errors", stats.tncrs), + IGC_STAT("tx_window_errors", stats.latecol), + IGC_STAT("tx_abort_late_coll", stats.latecol), + IGC_STAT("tx_deferred_ok", stats.dc), + IGC_STAT("tx_single_coll_ok", stats.scc), + IGC_STAT("tx_multi_coll_ok", stats.mcc), + IGC_STAT("tx_timeout_count", tx_timeout_count), + IGC_STAT("rx_long_length_errors", stats.roc), + IGC_STAT("rx_short_length_errors", stats.ruc), + IGC_STAT("rx_align_errors", stats.algnerrc), + IGC_STAT("tx_tcp_seg_good", stats.tsctc), + IGC_STAT("tx_tcp_seg_failed", stats.tsctfc), + IGC_STAT("rx_flow_control_xon", stats.xonrxc), + IGC_STAT("rx_flow_control_xoff", stats.xoffrxc), + IGC_STAT("tx_flow_control_xon", stats.xontxc), + IGC_STAT("tx_flow_control_xoff", stats.xofftxc), + IGC_STAT("rx_long_byte_count", stats.gorc), + IGC_STAT("tx_dma_out_of_sync", stats.doosync), + IGC_STAT("tx_smbus", stats.mgptc), + IGC_STAT("rx_smbus", stats.mgprc), + IGC_STAT("dropped_smbus", stats.mgpdc), + IGC_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), + IGC_STAT("os2bmc_tx_by_bmc", stats.b2ospc), + IGC_STAT("os2bmc_tx_by_host", stats.o2bspc), + IGC_STAT("os2bmc_rx_by_host", stats.b2ogprc), + IGC_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), + IGC_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped), + IGC_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), +}; + +#define IGC_NETDEV_STAT(_net_stat) { \ + .stat_string = __stringify(_net_stat), \ + .sizeof_stat = FIELD_SIZEOF(struct rtnl_link_stats64, _net_stat), \ + .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \ +} + +static const struct igc_stats igc_gstrings_net_stats[] = { + IGC_NETDEV_STAT(rx_errors), + IGC_NETDEV_STAT(tx_errors), + IGC_NETDEV_STAT(tx_dropped), + IGC_NETDEV_STAT(rx_length_errors), + IGC_NETDEV_STAT(rx_over_errors), + IGC_NETDEV_STAT(rx_frame_errors), + IGC_NETDEV_STAT(rx_fifo_errors), + IGC_NETDEV_STAT(tx_fifo_errors), + IGC_NETDEV_STAT(tx_heartbeat_errors) +}; + +enum igc_diagnostics_results { + TEST_REG = 0, + TEST_EEP, + TEST_IRQ, + TEST_LOOP, + TEST_LINK +}; + +static const char igc_gstrings_test[][ETH_GSTRING_LEN] = { + [TEST_REG] = "Register test (offline)", + [TEST_EEP] = "Eeprom test (offline)", + [TEST_IRQ] = "Interrupt test (offline)", + [TEST_LOOP] = "Loopback test (offline)", + [TEST_LINK] = "Link test (on/offline)" +}; + +#define IGC_TEST_LEN (sizeof(igc_gstrings_test) / ETH_GSTRING_LEN) + +#define IGC_GLOBAL_STATS_LEN \ + (sizeof(igc_gstrings_stats) / sizeof(struct igc_stats)) +#define IGC_NETDEV_STATS_LEN \ + (sizeof(igc_gstrings_net_stats) / sizeof(struct igc_stats)) +#define IGC_RX_QUEUE_STATS_LEN \ + (sizeof(struct igc_rx_queue_stats) / sizeof(u64)) +#define IGC_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */ +#define IGC_QUEUE_STATS_LEN \ + ((((struct igc_adapter *)netdev_priv(netdev))->num_rx_queues * \ + IGC_RX_QUEUE_STATS_LEN) + \ + (((struct igc_adapter *)netdev_priv(netdev))->num_tx_queues * \ + IGC_TX_QUEUE_STATS_LEN)) +#define IGC_STATS_LEN \ + (IGC_GLOBAL_STATS_LEN + IGC_NETDEV_STATS_LEN + IGC_QUEUE_STATS_LEN) + static const char igc_priv_flags_strings[][ETH_GSTRING_LEN] = { #define IGC_PRIV_FLAGS_LEGACY_RX BIT(0) "legacy-rx", @@ -545,6 +655,127 @@ static int igc_set_pauseparam(struct net_device *netdev, return retval; } +static void igc_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *igc_gstrings_test, + IGC_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) { + memcpy(p, igc_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < IGC_NETDEV_STATS_LEN; i++) { + memcpy(p, igc_gstrings_net_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->num_tx_queues; i++) { + sprintf(p, "tx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_restart", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->num_rx_queues; i++) { + sprintf(p, "rx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_drops", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_csum_err", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_alloc_failed", i); + p += ETH_GSTRING_LEN; + } + /* BUG_ON(p - data != IGC_STATS_LEN * ETH_GSTRING_LEN); */ + break; + case ETH_SS_PRIV_FLAGS: + memcpy(data, igc_priv_flags_strings, + IGC_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); + break; + } +} + +static int igc_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return IGC_STATS_LEN; + case ETH_SS_TEST: + return IGC_TEST_LEN; + case ETH_SS_PRIV_FLAGS: + return IGC_PRIV_FLAGS_STR_LEN; + default: + return -ENOTSUPP; + } +} + +static void igc_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct rtnl_link_stats64 *net_stats = &adapter->stats64; + unsigned int start; + struct igc_ring *ring; + int i, j; + char *p; + + spin_lock(&adapter->stats64_lock); + igc_update_stats(adapter); + + for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) { + p = (char *)adapter + igc_gstrings_stats[i].stat_offset; + data[i] = (igc_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < IGC_NETDEV_STATS_LEN; j++, i++) { + p = (char *)net_stats + igc_gstrings_net_stats[j].stat_offset; + data[i] = (igc_gstrings_net_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < adapter->num_tx_queues; j++) { + u64 restart2; + + ring = adapter->tx_ring[j]; + do { + start = u64_stats_fetch_begin_irq(&ring->tx_syncp); + data[i] = ring->tx_stats.packets; + data[i + 1] = ring->tx_stats.bytes; + data[i + 2] = ring->tx_stats.restart_queue; + } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); + do { + start = u64_stats_fetch_begin_irq(&ring->tx_syncp2); + restart2 = ring->tx_stats.restart_queue2; + } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start)); + data[i + 2] += restart2; + + i += IGC_TX_QUEUE_STATS_LEN; + } + for (j = 0; j < adapter->num_rx_queues; j++) { + ring = adapter->rx_ring[j]; + do { + start = u64_stats_fetch_begin_irq(&ring->rx_syncp); + data[i] = ring->rx_stats.packets; + data[i + 1] = ring->rx_stats.bytes; + data[i + 2] = ring->rx_stats.drops; + data[i + 3] = ring->rx_stats.csum_err; + data[i + 4] = ring->rx_stats.alloc_failed; + } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); + i += IGC_RX_QUEUE_STATS_LEN; + } + spin_unlock(&adapter->stats64_lock); +} + static int igc_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) { @@ -643,6 +874,605 @@ static int igc_set_coalesce(struct net_device *netdev, return 0; } +#define ETHER_TYPE_FULL_MASK ((__force __be16)~0) +static int igc_get_ethtool_nfc_entry(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = &cmd->fs; + struct igc_nfc_filter *rule = NULL; + + /* report total rule count */ + cmd->data = IGC_MAX_RXNFC_FILTERS; + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + if (fsp->location <= rule->sw_idx) + break; + } + + if (!rule || fsp->location != rule->sw_idx) + return -EINVAL; + + if (rule->filter.match_flags) { + fsp->flow_type = ETHER_FLOW; + fsp->ring_cookie = rule->action; + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { + fsp->h_u.ether_spec.h_proto = rule->filter.etype; + fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK; + } + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { + fsp->flow_type |= FLOW_EXT; + fsp->h_ext.vlan_tci = rule->filter.vlan_tci; + fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK); + } + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { + ether_addr_copy(fsp->h_u.ether_spec.h_dest, + rule->filter.dst_addr); + /* As we only support matching by the full + * mask, return the mask to userspace + */ + eth_broadcast_addr(fsp->m_u.ether_spec.h_dest); + } + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { + ether_addr_copy(fsp->h_u.ether_spec.h_source, + rule->filter.src_addr); + /* As we only support matching by the full + * mask, return the mask to userspace + */ + eth_broadcast_addr(fsp->m_u.ether_spec.h_source); + } + + return 0; + } + return -EINVAL; +} + +static int igc_get_ethtool_nfc_all(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct igc_nfc_filter *rule; + int cnt = 0; + + /* report total rule count */ + cmd->data = IGC_MAX_RXNFC_FILTERS; + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + rule_locs[cnt] = rule->sw_idx; + cnt++; + } + + cmd->rule_cnt = cnt; + + return 0; +} + +static int igc_get_rss_hash_opts(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + /* Report default options for RSS on igc */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* Fall through */ + case UDP_V4_FLOW: + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* Fall through */ + case SCTP_V4_FLOW: + /* Fall through */ + case AH_ESP_V4_FLOW: + /* Fall through */ + case AH_V4_FLOW: + /* Fall through */ + case ESP_V4_FLOW: + /* Fall through */ + case IPV4_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* Fall through */ + case UDP_V6_FLOW: + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* Fall through */ + case SCTP_V6_FLOW: + /* Fall through */ + case AH_ESP_V6_FLOW: + /* Fall through */ + case AH_V6_FLOW: + /* Fall through */ + case ESP_V6_FLOW: + /* Fall through */ + case IPV6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int igc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct igc_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->num_rx_queues; + ret = 0; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = adapter->nfc_filter_count; + ret = 0; + break; + case ETHTOOL_GRXCLSRULE: + ret = igc_get_ethtool_nfc_entry(adapter, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + ret = igc_get_ethtool_nfc_all(adapter, cmd, rule_locs); + break; + case ETHTOOL_GRXFH: + ret = igc_get_rss_hash_opts(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +#define UDP_RSS_FLAGS (IGC_FLAG_RSS_FIELD_IPV4_UDP | \ + IGC_FLAG_RSS_FIELD_IPV6_UDP) +static int igc_set_rss_hash_opt(struct igc_adapter *adapter, + struct ethtool_rxnfc *nfc) +{ + u32 flags = adapter->flags; + + /* RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + !(nfc->data & RXH_L4_B_0_1) || + !(nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + case UDP_V4_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~IGC_FLAG_RSS_FIELD_IPV4_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= IGC_FLAG_RSS_FIELD_IPV4_UDP; + break; + default: + return -EINVAL; + } + break; + case UDP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~IGC_FLAG_RSS_FIELD_IPV6_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= IGC_FLAG_RSS_FIELD_IPV6_UDP; + break; + default: + return -EINVAL; + } + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + (nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + /* if we changed something we need to update flags */ + if (flags != adapter->flags) { + struct igc_hw *hw = &adapter->hw; + u32 mrqc = rd32(IGC_MRQC); + + if ((flags & UDP_RSS_FLAGS) && + !(adapter->flags & UDP_RSS_FLAGS)) + dev_err(&adapter->pdev->dev, + "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); + + adapter->flags = flags; + + /* Perform hash on these packet types */ + mrqc |= IGC_MRQC_RSS_FIELD_IPV4 | + IGC_MRQC_RSS_FIELD_IPV4_TCP | + IGC_MRQC_RSS_FIELD_IPV6 | + IGC_MRQC_RSS_FIELD_IPV6_TCP; + + mrqc &= ~(IGC_MRQC_RSS_FIELD_IPV4_UDP | + IGC_MRQC_RSS_FIELD_IPV6_UDP); + + if (flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP; + + if (flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP; + + wr32(IGC_MRQC, mrqc); + } + + return 0; +} + +static int igc_rxnfc_write_etype_filter(struct igc_adapter *adapter, + struct igc_nfc_filter *input) +{ + struct igc_hw *hw = &adapter->hw; + u8 i; + u32 etqf; + u16 etype; + + /* find an empty etype filter register */ + for (i = 0; i < MAX_ETYPE_FILTER; ++i) { + if (!adapter->etype_bitmap[i]) + break; + } + if (i == MAX_ETYPE_FILTER) { + dev_err(&adapter->pdev->dev, "ethtool -N: etype filters are all used.\n"); + return -EINVAL; + } + + adapter->etype_bitmap[i] = true; + + etqf = rd32(IGC_ETQF(i)); + etype = ntohs(input->filter.etype & ETHER_TYPE_FULL_MASK); + + etqf |= IGC_ETQF_FILTER_ENABLE; + etqf &= ~IGC_ETQF_ETYPE_MASK; + etqf |= (etype & IGC_ETQF_ETYPE_MASK); + + etqf &= ~IGC_ETQF_QUEUE_MASK; + etqf |= ((input->action << IGC_ETQF_QUEUE_SHIFT) + & IGC_ETQF_QUEUE_MASK); + etqf |= IGC_ETQF_QUEUE_ENABLE; + + wr32(IGC_ETQF(i), etqf); + + input->etype_reg_index = i; + + return 0; +} + +static int igc_rxnfc_write_vlan_prio_filter(struct igc_adapter *adapter, + struct igc_nfc_filter *input) +{ + struct igc_hw *hw = &adapter->hw; + u8 vlan_priority; + u16 queue_index; + u32 vlapqf; + + vlapqf = rd32(IGC_VLAPQF); + vlan_priority = (ntohs(input->filter.vlan_tci) & VLAN_PRIO_MASK) + >> VLAN_PRIO_SHIFT; + queue_index = (vlapqf >> (vlan_priority * 4)) & IGC_VLAPQF_QUEUE_MASK; + + /* check whether this vlan prio is already set */ + if (vlapqf & IGC_VLAPQF_P_VALID(vlan_priority) && + queue_index != input->action) { + dev_err(&adapter->pdev->dev, "ethtool rxnfc set vlan prio filter failed.\n"); + return -EEXIST; + } + + vlapqf |= IGC_VLAPQF_P_VALID(vlan_priority); + vlapqf |= IGC_VLAPQF_QUEUE_SEL(vlan_priority, input->action); + + wr32(IGC_VLAPQF, vlapqf); + + return 0; +} + +int igc_add_filter(struct igc_adapter *adapter, struct igc_nfc_filter *input) +{ + struct igc_hw *hw = &adapter->hw; + int err = -EINVAL; + + if (hw->mac.type == igc_i225 && + !(input->filter.match_flags & ~IGC_FILTER_FLAG_SRC_MAC_ADDR)) { + dev_err(&adapter->pdev->dev, + "i225 doesn't support flow classification rules specifying only source addresses.\n"); + return -EOPNOTSUPP; + } + + if (input->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { + err = igc_rxnfc_write_etype_filter(adapter, input); + if (err) + return err; + } + + if (input->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { + err = igc_add_mac_steering_filter(adapter, + input->filter.dst_addr, + input->action, 0); + err = min_t(int, err, 0); + if (err) + return err; + } + + if (input->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { + err = igc_add_mac_steering_filter(adapter, + input->filter.src_addr, + input->action, + IGC_MAC_STATE_SRC_ADDR); + err = min_t(int, err, 0); + if (err) + return err; + } + + if (input->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) + err = igc_rxnfc_write_vlan_prio_filter(adapter, input); + + return err; +} + +static void igc_clear_etype_filter_regs(struct igc_adapter *adapter, + u16 reg_index) +{ + struct igc_hw *hw = &adapter->hw; + u32 etqf = rd32(IGC_ETQF(reg_index)); + + etqf &= ~IGC_ETQF_QUEUE_ENABLE; + etqf &= ~IGC_ETQF_QUEUE_MASK; + etqf &= ~IGC_ETQF_FILTER_ENABLE; + + wr32(IGC_ETQF(reg_index), etqf); + + adapter->etype_bitmap[reg_index] = false; +} + +static void igc_clear_vlan_prio_filter(struct igc_adapter *adapter, + u16 vlan_tci) +{ + struct igc_hw *hw = &adapter->hw; + u8 vlan_priority; + u32 vlapqf; + + vlan_priority = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; + + vlapqf = rd32(IGC_VLAPQF); + vlapqf &= ~IGC_VLAPQF_P_VALID(vlan_priority); + vlapqf &= ~IGC_VLAPQF_QUEUE_SEL(vlan_priority, + IGC_VLAPQF_QUEUE_MASK); + + wr32(IGC_VLAPQF, vlapqf); +} + +int igc_erase_filter(struct igc_adapter *adapter, struct igc_nfc_filter *input) +{ + if (input->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) + igc_clear_etype_filter_regs(adapter, + input->etype_reg_index); + + if (input->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) + igc_clear_vlan_prio_filter(adapter, + ntohs(input->filter.vlan_tci)); + + if (input->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) + igc_del_mac_steering_filter(adapter, input->filter.src_addr, + input->action, + IGC_MAC_STATE_SRC_ADDR); + + if (input->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) + igc_del_mac_steering_filter(adapter, input->filter.dst_addr, + input->action, 0); + + return 0; +} + +static int igc_update_ethtool_nfc_entry(struct igc_adapter *adapter, + struct igc_nfc_filter *input, + u16 sw_idx) +{ + struct igc_nfc_filter *rule, *parent; + int err = -EINVAL; + + parent = NULL; + rule = NULL; + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + /* hash found, or no matching entry */ + if (rule->sw_idx >= sw_idx) + break; + parent = rule; + } + + /* if there is an old rule occupying our place remove it */ + if (rule && rule->sw_idx == sw_idx) { + if (!input) + err = igc_erase_filter(adapter, rule); + + hlist_del(&rule->nfc_node); + kfree(rule); + adapter->nfc_filter_count--; + } + + /* If no input this was a delete, err should be 0 if a rule was + * successfully found and removed from the list else -EINVAL + */ + if (!input) + return err; + + /* initialize node */ + INIT_HLIST_NODE(&input->nfc_node); + + /* add filter to the list */ + if (parent) + hlist_add_behind(&input->nfc_node, &parent->nfc_node); + else + hlist_add_head(&input->nfc_node, &adapter->nfc_filter_list); + + /* update counts */ + adapter->nfc_filter_count++; + + return 0; +} + +static int igc_add_ethtool_nfc_entry(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct net_device *netdev = adapter->netdev; + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct igc_nfc_filter *input, *rule; + int err = 0; + + if (!(netdev->hw_features & NETIF_F_NTUPLE)) + return -EOPNOTSUPP; + + /* Don't allow programming if the action is a queue greater than + * the number of online Rx queues. + */ + if (fsp->ring_cookie == RX_CLS_FLOW_DISC || + fsp->ring_cookie >= adapter->num_rx_queues) { + dev_err(&adapter->pdev->dev, "ethtool -N: The specified action is invalid\n"); + return -EINVAL; + } + + /* Don't allow indexes to exist outside of available space */ + if (fsp->location >= IGC_MAX_RXNFC_FILTERS) { + dev_err(&adapter->pdev->dev, "Location out of range\n"); + return -EINVAL; + } + + if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW) + return -EINVAL; + + input = kzalloc(sizeof(*input), GFP_KERNEL); + if (!input) + return -ENOMEM; + + if (fsp->m_u.ether_spec.h_proto == ETHER_TYPE_FULL_MASK) { + input->filter.etype = fsp->h_u.ether_spec.h_proto; + input->filter.match_flags = IGC_FILTER_FLAG_ETHER_TYPE; + } + + /* Only support matching addresses by the full mask */ + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_source)) { + input->filter.match_flags |= IGC_FILTER_FLAG_SRC_MAC_ADDR; + ether_addr_copy(input->filter.src_addr, + fsp->h_u.ether_spec.h_source); + } + + /* Only support matching addresses by the full mask */ + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_dest)) { + input->filter.match_flags |= IGC_FILTER_FLAG_DST_MAC_ADDR; + ether_addr_copy(input->filter.dst_addr, + fsp->h_u.ether_spec.h_dest); + } + + if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) { + if (fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) { + err = -EINVAL; + goto err_out; + } + input->filter.vlan_tci = fsp->h_ext.vlan_tci; + input->filter.match_flags |= IGC_FILTER_FLAG_VLAN_TCI; + } + + input->action = fsp->ring_cookie; + input->sw_idx = fsp->location; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + if (!memcmp(&input->filter, &rule->filter, + sizeof(input->filter))) { + err = -EEXIST; + dev_err(&adapter->pdev->dev, + "ethtool: this filter is already set\n"); + goto err_out_w_lock; + } + } + + err = igc_add_filter(adapter, input); + if (err) + goto err_out_w_lock; + + igc_update_ethtool_nfc_entry(adapter, input, input->sw_idx); + + spin_unlock(&adapter->nfc_lock); + return 0; + +err_out_w_lock: + spin_unlock(&adapter->nfc_lock); +err_out: + kfree(input); + return err; +} + +static int igc_del_ethtool_nfc_entry(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + int err; + + spin_lock(&adapter->nfc_lock); + err = igc_update_ethtool_nfc_entry(adapter, NULL, fsp->location); + spin_unlock(&adapter->nfc_lock); + + return err; +} + +static int igc_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct igc_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + ret = igc_set_rss_hash_opt(adapter, cmd); + break; + case ETHTOOL_SRXCLSRLINS: + ret = igc_add_ethtool_nfc_entry(adapter, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = igc_del_ethtool_nfc_entry(adapter, cmd); + default: + break; + } + + return ret; +} + void igc_write_rss_indir_tbl(struct igc_adapter *adapter) { struct igc_hw *hw = &adapter->hw; @@ -885,17 +1715,13 @@ static int igc_get_link_ksettings(struct net_device *netdev, if (hw->mac.type == igc_i225 && (status & IGC_STATUS_SPEED_2500)) { speed = SPEED_2500; - hw_dbg("2500 Mbs, "); } else { speed = SPEED_1000; - hw_dbg("1000 Mbs, "); } } else if (status & IGC_STATUS_SPEED_100) { speed = SPEED_100; - hw_dbg("100 Mbs, "); } else { speed = SPEED_10; - hw_dbg("10 Mbs, "); } if ((status & IGC_STATUS_FD) || hw->phy.media_type != igc_media_type_copper) @@ -1011,8 +1837,13 @@ static const struct ethtool_ops igc_ethtool_ops = { .set_ringparam = igc_set_ringparam, .get_pauseparam = igc_get_pauseparam, .set_pauseparam = igc_set_pauseparam, + .get_strings = igc_get_strings, + .get_sset_count = igc_get_sset_count, + .get_ethtool_stats = igc_get_ethtool_stats, .get_coalesce = igc_get_coalesce, .set_coalesce = igc_set_coalesce, + .get_rxnfc = igc_get_rxnfc, + .set_rxnfc = igc_set_rxnfc, .get_rxfh_indir_size = igc_get_rxfh_indir_size, .get_rxfh = igc_get_rxfh, .set_rxfh = igc_set_rxfh, diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 87a11879bf2d..a883b3f357e7 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -620,6 +620,55 @@ static void igc_configure_tx(struct igc_adapter *adapter) */ static void igc_setup_mrqc(struct igc_adapter *adapter) { + struct igc_hw *hw = &adapter->hw; + u32 j, num_rx_queues; + u32 mrqc, rxcsum; + u32 rss_key[10]; + + netdev_rss_key_fill(rss_key, sizeof(rss_key)); + for (j = 0; j < 10; j++) + wr32(IGC_RSSRK(j), rss_key[j]); + + num_rx_queues = adapter->rss_queues; + + if (adapter->rss_indir_tbl_init != num_rx_queues) { + for (j = 0; j < IGC_RETA_SIZE; j++) + adapter->rss_indir_tbl[j] = + (j * num_rx_queues) / IGC_RETA_SIZE; + adapter->rss_indir_tbl_init = num_rx_queues; + } + igc_write_rss_indir_tbl(adapter); + + /* Disable raw packet checksumming so that RSS hash is placed in + * descriptor on writeback. No need to enable TCP/UDP/IP checksum + * offloads as they are enabled by default + */ + rxcsum = rd32(IGC_RXCSUM); + rxcsum |= IGC_RXCSUM_PCSD; + + /* Enable Receive Checksum Offload for SCTP */ + rxcsum |= IGC_RXCSUM_CRCOFL; + + /* Don't need to set TUOFL or IPOFL, they default to 1 */ + wr32(IGC_RXCSUM, rxcsum); + + /* Generate RSS hash based on packet types, TCP/UDP + * port numbers and/or IPv4/v6 src and dst addresses + */ + mrqc = IGC_MRQC_RSS_FIELD_IPV4 | + IGC_MRQC_RSS_FIELD_IPV4_TCP | + IGC_MRQC_RSS_FIELD_IPV6 | + IGC_MRQC_RSS_FIELD_IPV6_TCP | + IGC_MRQC_RSS_FIELD_IPV6_TCP_EX; + + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP; + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP; + + mrqc |= IGC_MRQC_ENABLE_RSS_MQ; + + wr32(IGC_MRQC, mrqc); } /** @@ -1738,12 +1787,200 @@ void igc_up(struct igc_adapter *adapter) * igc_update_stats - Update the board statistics counters * @adapter: board private structure */ -static void igc_update_stats(struct igc_adapter *adapter) +void igc_update_stats(struct igc_adapter *adapter) { + struct rtnl_link_stats64 *net_stats = &adapter->stats64; + struct pci_dev *pdev = adapter->pdev; + struct igc_hw *hw = &adapter->hw; + u64 _bytes, _packets; + u64 bytes, packets; + unsigned int start; + u32 mpc; + int i; + + /* Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; + if (pci_channel_offline(pdev)) + return; + + packets = 0; + bytes = 0; + + rcu_read_lock(); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igc_ring *ring = adapter->rx_ring[i]; + u32 rqdpc = rd32(IGC_RQDPC(i)); + + if (hw->mac.type >= igc_i225) + wr32(IGC_RQDPC(i), 0); + + if (rqdpc) { + ring->rx_stats.drops += rqdpc; + net_stats->rx_fifo_errors += rqdpc; + } + + do { + start = u64_stats_fetch_begin_irq(&ring->rx_syncp); + _bytes = ring->rx_stats.bytes; + _packets = ring->rx_stats.packets; + } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); + bytes += _bytes; + packets += _packets; + } + + net_stats->rx_bytes = bytes; + net_stats->rx_packets = packets; + + packets = 0; + bytes = 0; + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + do { + start = u64_stats_fetch_begin_irq(&ring->tx_syncp); + _bytes = ring->tx_stats.bytes; + _packets = ring->tx_stats.packets; + } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); + bytes += _bytes; + packets += _packets; + } + net_stats->tx_bytes = bytes; + net_stats->tx_packets = packets; + rcu_read_unlock(); + + /* read stats registers */ + adapter->stats.crcerrs += rd32(IGC_CRCERRS); + adapter->stats.gprc += rd32(IGC_GPRC); + adapter->stats.gorc += rd32(IGC_GORCL); + rd32(IGC_GORCH); /* clear GORCL */ + adapter->stats.bprc += rd32(IGC_BPRC); + adapter->stats.mprc += rd32(IGC_MPRC); + adapter->stats.roc += rd32(IGC_ROC); + + adapter->stats.prc64 += rd32(IGC_PRC64); + adapter->stats.prc127 += rd32(IGC_PRC127); + adapter->stats.prc255 += rd32(IGC_PRC255); + adapter->stats.prc511 += rd32(IGC_PRC511); + adapter->stats.prc1023 += rd32(IGC_PRC1023); + adapter->stats.prc1522 += rd32(IGC_PRC1522); + adapter->stats.symerrs += rd32(IGC_SYMERRS); + adapter->stats.sec += rd32(IGC_SEC); + + mpc = rd32(IGC_MPC); + adapter->stats.mpc += mpc; + net_stats->rx_fifo_errors += mpc; + adapter->stats.scc += rd32(IGC_SCC); + adapter->stats.ecol += rd32(IGC_ECOL); + adapter->stats.mcc += rd32(IGC_MCC); + adapter->stats.latecol += rd32(IGC_LATECOL); + adapter->stats.dc += rd32(IGC_DC); + adapter->stats.rlec += rd32(IGC_RLEC); + adapter->stats.xonrxc += rd32(IGC_XONRXC); + adapter->stats.xontxc += rd32(IGC_XONTXC); + adapter->stats.xoffrxc += rd32(IGC_XOFFRXC); + adapter->stats.xofftxc += rd32(IGC_XOFFTXC); + adapter->stats.fcruc += rd32(IGC_FCRUC); + adapter->stats.gptc += rd32(IGC_GPTC); + adapter->stats.gotc += rd32(IGC_GOTCL); + rd32(IGC_GOTCH); /* clear GOTCL */ + adapter->stats.rnbc += rd32(IGC_RNBC); + adapter->stats.ruc += rd32(IGC_RUC); + adapter->stats.rfc += rd32(IGC_RFC); + adapter->stats.rjc += rd32(IGC_RJC); + adapter->stats.tor += rd32(IGC_TORH); + adapter->stats.tot += rd32(IGC_TOTH); + adapter->stats.tpr += rd32(IGC_TPR); + + adapter->stats.ptc64 += rd32(IGC_PTC64); + adapter->stats.ptc127 += rd32(IGC_PTC127); + adapter->stats.ptc255 += rd32(IGC_PTC255); + adapter->stats.ptc511 += rd32(IGC_PTC511); + adapter->stats.ptc1023 += rd32(IGC_PTC1023); + adapter->stats.ptc1522 += rd32(IGC_PTC1522); + + adapter->stats.mptc += rd32(IGC_MPTC); + adapter->stats.bptc += rd32(IGC_BPTC); + + adapter->stats.tpt += rd32(IGC_TPT); + adapter->stats.colc += rd32(IGC_COLC); + + adapter->stats.algnerrc += rd32(IGC_ALGNERRC); + + adapter->stats.tsctc += rd32(IGC_TSCTC); + adapter->stats.tsctfc += rd32(IGC_TSCTFC); + + adapter->stats.iac += rd32(IGC_IAC); + adapter->stats.icrxoc += rd32(IGC_ICRXOC); + adapter->stats.icrxptc += rd32(IGC_ICRXPTC); + adapter->stats.icrxatc += rd32(IGC_ICRXATC); + adapter->stats.ictxptc += rd32(IGC_ICTXPTC); + adapter->stats.ictxatc += rd32(IGC_ICTXATC); + adapter->stats.ictxqec += rd32(IGC_ICTXQEC); + adapter->stats.ictxqmtc += rd32(IGC_ICTXQMTC); + adapter->stats.icrxdmtc += rd32(IGC_ICRXDMTC); + + /* Fill out the OS statistics structure */ + net_stats->multicast = adapter->stats.mprc; + net_stats->collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + net_stats->rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + + adapter->stats.cexterr; + net_stats->rx_length_errors = adapter->stats.ruc + + adapter->stats.roc; + net_stats->rx_crc_errors = adapter->stats.crcerrs; + net_stats->rx_frame_errors = adapter->stats.algnerrc; + net_stats->rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + net_stats->tx_errors = adapter->stats.ecol + + adapter->stats.latecol; + net_stats->tx_aborted_errors = adapter->stats.ecol; + net_stats->tx_window_errors = adapter->stats.latecol; + net_stats->tx_carrier_errors = adapter->stats.tncrs; + + /* Tx Dropped needs to be maintained elsewhere */ + + /* Management Stats */ + adapter->stats.mgptc += rd32(IGC_MGTPTC); + adapter->stats.mgprc += rd32(IGC_MGTPRC); + adapter->stats.mgpdc += rd32(IGC_MGTPDC); } static void igc_nfc_filter_exit(struct igc_adapter *adapter) { + struct igc_nfc_filter *rule; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) + igc_erase_filter(adapter, rule); + + hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node) + igc_erase_filter(adapter, rule); + + spin_unlock(&adapter->nfc_lock); +} + +static void igc_nfc_filter_restore(struct igc_adapter *adapter) +{ + struct igc_nfc_filter *rule; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) + igc_add_filter(adapter, rule); + + spin_unlock(&adapter->nfc_lock); } /** @@ -1890,6 +2127,86 @@ static struct net_device_stats *igc_get_stats(struct net_device *netdev) return &netdev->stats; } +static netdev_features_t igc_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* Since there is no support for separate Rx/Tx vlan accel + * enable/disable make sure Tx flag is always in same state as Rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + return features; +} + +static int igc_set_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t changed = netdev->features ^ features; + struct igc_adapter *adapter = netdev_priv(netdev); + + /* Add VLAN support */ + if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE))) + return 0; + + if (!(features & NETIF_F_NTUPLE)) { + struct hlist_node *node2; + struct igc_nfc_filter *rule; + + spin_lock(&adapter->nfc_lock); + hlist_for_each_entry_safe(rule, node2, + &adapter->nfc_filter_list, nfc_node) { + igc_erase_filter(adapter, rule); + hlist_del(&rule->nfc_node); + kfree(rule); + } + spin_unlock(&adapter->nfc_lock); + adapter->nfc_filter_count = 0; + } + + netdev->features = features; + + if (netif_running(netdev)) + igc_reinit_locked(adapter); + else + igc_reset(adapter); + + return 1; +} + +static netdev_features_t +igc_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + unsigned int network_hdr_len, mac_hdr_len; + + /* Make certain the headers can be described by a context descriptor */ + mac_hdr_len = skb_network_header(skb) - skb->data; + if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_TSO | + NETIF_F_TSO6); + + network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); + if (unlikely(network_hdr_len > IGC_MAX_NETWORK_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_TSO | + NETIF_F_TSO6); + + /* We can only support IPv4 TSO in tunnels if we can mangle the + * inner IP ID field, so strip TSO if MANGLEID is not supported. + */ + if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) + features &= ~NETIF_F_TSO; + + return features; +} + /** * igc_configure - configure the hardware for RX and TX * @adapter: private board structure @@ -1906,6 +2223,7 @@ static void igc_configure(struct igc_adapter *adapter) igc_setup_mrqc(adapter); igc_setup_rctl(adapter); + igc_nfc_filter_restore(adapter); igc_configure_tx(adapter); igc_configure_rx(adapter); @@ -1967,6 +2285,127 @@ static void igc_set_default_mac_filter(struct igc_adapter *adapter) igc_rar_set_index(adapter, 0); } +/* If the filter to be added and an already existing filter express + * the same address and address type, it should be possible to only + * override the other configurations, for example the queue to steer + * traffic. + */ +static bool igc_mac_entry_can_be_used(const struct igc_mac_addr *entry, + const u8 *addr, const u8 flags) +{ + if (!(entry->state & IGC_MAC_STATE_IN_USE)) + return true; + + if ((entry->state & IGC_MAC_STATE_SRC_ADDR) != + (flags & IGC_MAC_STATE_SRC_ADDR)) + return false; + + if (!ether_addr_equal(addr, entry->addr)) + return false; + + return true; +} + +/* Add a MAC filter for 'addr' directing matching traffic to 'queue', + * 'flags' is used to indicate what kind of match is made, match is by + * default for the destination address, if matching by source address + * is desired the flag IGC_MAC_STATE_SRC_ADDR can be used. + */ +static int igc_add_mac_filter_flags(struct igc_adapter *adapter, + const u8 *addr, const u8 queue, + const u8 flags) +{ + struct igc_hw *hw = &adapter->hw; + int rar_entries = hw->mac.rar_entry_count; + int i; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + /* Search for the first empty entry in the MAC table. + * Do not touch entries at the end of the table reserved for the VF MAC + * addresses. + */ + for (i = 0; i < rar_entries; i++) { + if (!igc_mac_entry_can_be_used(&adapter->mac_table[i], + addr, flags)) + continue; + + ether_addr_copy(adapter->mac_table[i].addr, addr); + adapter->mac_table[i].queue = queue; + adapter->mac_table[i].state |= IGC_MAC_STATE_IN_USE | flags; + + igc_rar_set_index(adapter, i); + return i; + } + + return -ENOSPC; +} + +int igc_add_mac_steering_filter(struct igc_adapter *adapter, + const u8 *addr, u8 queue, u8 flags) +{ + return igc_add_mac_filter_flags(adapter, addr, queue, + IGC_MAC_STATE_QUEUE_STEERING | flags); +} + +/* Remove a MAC filter for 'addr' directing matching traffic to + * 'queue', 'flags' is used to indicate what kind of match need to be + * removed, match is by default for the destination address, if + * matching by source address is to be removed the flag + * IGC_MAC_STATE_SRC_ADDR can be used. + */ +static int igc_del_mac_filter_flags(struct igc_adapter *adapter, + const u8 *addr, const u8 queue, + const u8 flags) +{ + struct igc_hw *hw = &adapter->hw; + int rar_entries = hw->mac.rar_entry_count; + int i; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + /* Search for matching entry in the MAC table based on given address + * and queue. Do not touch entries at the end of the table reserved + * for the VF MAC addresses. + */ + for (i = 0; i < rar_entries; i++) { + if (!(adapter->mac_table[i].state & IGC_MAC_STATE_IN_USE)) + continue; + if ((adapter->mac_table[i].state & flags) != flags) + continue; + if (adapter->mac_table[i].queue != queue) + continue; + if (!ether_addr_equal(adapter->mac_table[i].addr, addr)) + continue; + + /* When a filter for the default address is "deleted", + * we return it to its initial configuration + */ + if (adapter->mac_table[i].state & IGC_MAC_STATE_DEFAULT) { + adapter->mac_table[i].state = + IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE; + } else { + adapter->mac_table[i].state = 0; + adapter->mac_table[i].queue = 0; + memset(adapter->mac_table[i].addr, 0, ETH_ALEN); + } + + igc_rar_set_index(adapter, i); + return 0; + } + + return -ENOENT; +} + +int igc_del_mac_steering_filter(struct igc_adapter *adapter, + const u8 *addr, u8 queue, u8 flags) +{ + return igc_del_mac_filter_flags(adapter, addr, queue, + IGC_MAC_STATE_QUEUE_STEERING | flags); +} + /** * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set * @netdev: network interface device structure @@ -3434,6 +3873,9 @@ static const struct net_device_ops igc_netdev_ops = { .ndo_set_mac_address = igc_set_mac, .ndo_change_mtu = igc_change_mtu, .ndo_get_stats = igc_get_stats, + .ndo_fix_features = igc_fix_features, + .ndo_set_features = igc_set_features, + .ndo_features_check = igc_features_check, }; /* PCIe configuration access */ @@ -3663,6 +4105,9 @@ static int igc_probe(struct pci_dev *pdev, if (err) goto err_sw_init; + /* copy netdev features into list of user selectable features */ + netdev->hw_features |= NETIF_F_NTUPLE; + /* MTU range: 68 - 9216 */ netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h index 5afe7a8d3faf..50d7c04dccf5 100644 --- a/drivers/net/ethernet/intel/igc/igc_regs.h +++ b/drivers/net/ethernet/intel/igc/igc_regs.h @@ -80,8 +80,23 @@ /* MSI-X Table Register Descriptions */ #define IGC_PBACL 0x05B68 /* MSIx PBA Clear - R/W 1 to clear */ +/* RSS registers */ +#define IGC_MRQC 0x05818 /* Multiple Receive Control - RW */ + +/* Filtering Registers */ +#define IGC_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ + +/* ETQF register bit definitions */ +#define IGC_ETQF_FILTER_ENABLE BIT(26) +#define IGC_ETQF_QUEUE_ENABLE BIT(31) +#define IGC_ETQF_QUEUE_SHIFT 16 +#define IGC_ETQF_QUEUE_MASK 0x00070000 +#define IGC_ETQF_ETYPE_MASK 0x0000FFFF + /* Redirection Table - RW Array */ #define IGC_RETA(_i) (0x05C00 + ((_i) * 4)) +/* RSS Random Key - RW Array */ +#define IGC_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* Receive Register Descriptions */ #define IGC_RCTL 0x00100 /* Rx Control - RW */ @@ -101,6 +116,7 @@ #define IGC_UTA 0x0A000 /* Unicast Table Array - RW */ #define IGC_RAL(_n) (0x05400 + ((_n) * 0x08)) #define IGC_RAH(_n) (0x05404 + ((_n) * 0x08)) +#define IGC_VLAPQF 0x055B0 /* VLAN Priority Queue Filter VLAPQF */ /* Transmit Register Descriptions */ #define IGC_TCTL 0x00400 /* Tx Control - RW */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index e100054a3765..16c728984164 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -8483,8 +8483,7 @@ static void ixgbe_atr(struct ixgbe_ring *ring, #ifdef IXGBE_FCOE static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback) + struct net_device *sb_dev) { struct ixgbe_adapter *adapter; struct ixgbe_ring_feature *f; @@ -8514,7 +8513,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, break; /* fall through */ default: - return fallback(dev, skb, sb_dev); + return netdev_pick_tx(dev, skb, sb_dev); } f = &adapter->ring_feature[RING_F_FCOE]; @@ -9796,7 +9795,7 @@ static int ixgbe_set_features(struct net_device *netdev, NETIF_F_HW_VLAN_CTAG_FILTER)) ixgbe_set_rx_mode(netdev); - return 0; + return 1; } /** |