diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ice')
-rw-r--r-- | drivers/net/ethernet/intel/ice/Makefile | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice.h | 44 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_adminq_cmd.h | 33 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_common.c | 220 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_common.h | 9 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_dcb_lib.c | 19 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_hw_autogen.h | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_idc.c | 334 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_idc_int.h | 14 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_lag.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_lib.c | 11 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_lib.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_main.c | 142 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_sched.c | 69 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_switch.c | 28 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_switch.h | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_type.h | 4 |
17 files changed, 905 insertions, 34 deletions
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile index 07fe857e9e3a..dfb64fb504a2 100644 --- a/drivers/net/ethernet/intel/ice/Makefile +++ b/drivers/net/ethernet/intel/ice/Makefile @@ -22,6 +22,7 @@ ice-y := ice_main.o \ ice_ethtool_fdir.o \ ice_flex_pipe.o \ ice_flow.o \ + ice_idc.o \ ice_devlink.o \ ice_fw_update.o \ ice_lag.o \ diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 2924c67567b8..ea1ab7ca82c4 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -34,6 +34,7 @@ #include <linux/if_bridge.h> #include <linux/ctype.h> #include <linux/bpf.h> +#include <linux/auxiliary_bus.h> #include <linux/avf/virtchnl.h> #include <linux/cpu_rmap.h> #include <linux/dim.h> @@ -55,6 +56,7 @@ #include "ice_switch.h" #include "ice_common.h" #include "ice_sched.h" +#include "ice_idc_int.h" #include "ice_virtchnl_pf.h" #include "ice_sriov.h" #include "ice_fdir.h" @@ -78,6 +80,8 @@ #define ICE_MIN_LAN_OICR_MSIX 1 #define ICE_MIN_MSIX (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX) #define ICE_FDIR_MSIX 2 +#define ICE_RDMA_NUM_AEQ_MSIX 4 +#define ICE_MIN_RDMA_MSIX 2 #define ICE_NO_VSI 0xffff #define ICE_VSI_MAP_CONTIG 0 #define ICE_VSI_MAP_SCATTER 1 @@ -88,8 +92,9 @@ #define ICE_MAX_LG_RSS_QS 256 #define ICE_RES_VALID_BIT 0x8000 #define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1) +#define ICE_RES_RDMA_VEC_ID (ICE_RES_MISC_VEC_ID - 1) /* All VF control VSIs share the same IRQ, so assign a unique ID for them */ -#define ICE_RES_VF_CTRL_VEC_ID (ICE_RES_MISC_VEC_ID - 1) +#define ICE_RES_VF_CTRL_VEC_ID (ICE_RES_RDMA_VEC_ID - 1) #define ICE_INVAL_Q_INDEX 0xffff #define ICE_INVAL_VFID 256 @@ -203,9 +208,9 @@ enum ice_pf_state { ICE_NEEDS_RESTART, ICE_PREPARED_FOR_RESET, /* set by driver when prepared */ ICE_RESET_OICR_RECV, /* set by driver after rcv reset OICR */ - ICE_PFR_REQ, /* set by driver and peers */ - ICE_CORER_REQ, /* set by driver and peers */ - ICE_GLOBR_REQ, /* set by driver and peers */ + ICE_PFR_REQ, /* set by driver */ + ICE_CORER_REQ, /* set by driver */ + ICE_GLOBR_REQ, /* set by driver */ ICE_CORER_RECV, /* set by OICR handler */ ICE_GLOBR_RECV, /* set by OICR handler */ ICE_EMPR_RECV, /* set by OICR handler */ @@ -332,6 +337,7 @@ struct ice_vsi { u16 req_rxq; /* User requested Rx queues */ u16 num_rx_desc; u16 num_tx_desc; + u16 qset_handle[ICE_MAX_TRAFFIC_CLASS]; struct ice_tc_cfg tc_cfg; struct bpf_prog *xdp_prog; struct ice_ring **xdp_rings; /* XDP ring array */ @@ -374,12 +380,14 @@ struct ice_q_vector { enum ice_pf_flags { ICE_FLAG_FLTR_SYNC, + ICE_FLAG_RDMA_ENA, ICE_FLAG_RSS_ENA, ICE_FLAG_SRIOV_ENA, ICE_FLAG_SRIOV_CAPABLE, ICE_FLAG_DCB_CAPABLE, ICE_FLAG_DCB_ENA, ICE_FLAG_FD_ENA, + ICE_FLAG_AUX_ENA, ICE_FLAG_ADV_FEATURES, ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, @@ -440,6 +448,8 @@ struct ice_pf { struct mutex sw_mutex; /* lock for protecting VSI alloc flow */ struct mutex tc_mutex; /* lock to protect TC changes */ u32 msg_enable; + u16 num_rdma_msix; /* Total MSIX vectors for RDMA driver */ + u16 rdma_base_vector; /* spinlock to protect the AdminQ wait list */ spinlock_t aq_wait_lock; @@ -472,6 +482,8 @@ struct ice_pf { unsigned long tx_timeout_last_recovery; u32 tx_timeout_recovery_level; char int_name[ICE_INT_NAME_STR_LEN]; + struct auxiliary_device *adev; + int aux_idx; u32 sw_int_count; __le64 nvm_phy_type_lo; /* NVM PHY type low */ @@ -638,6 +650,9 @@ int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed); void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size); int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset); void ice_print_link_msg(struct ice_vsi *vsi, bool isup); +int ice_plug_aux_dev(struct ice_pf *pf); +void ice_unplug_aux_dev(struct ice_pf *pf); +int ice_init_rdma(struct ice_pf *pf); const char *ice_stat_str(enum ice_status stat_err); const char *ice_aq_str(enum ice_aq_err aq_err); bool ice_is_wol_supported(struct ice_hw *hw); @@ -662,4 +677,25 @@ int ice_open_internal(struct net_device *netdev); int ice_stop(struct net_device *netdev); void ice_service_task_schedule(struct ice_pf *pf); +/** + * ice_set_rdma_cap - enable RDMA support + * @pf: PF struct + */ +static inline void ice_set_rdma_cap(struct ice_pf *pf) +{ + if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) { + set_bit(ICE_FLAG_RDMA_ENA, pf->flags); + ice_plug_aux_dev(pf); + } +} + +/** + * ice_clear_rdma_cap - disable RDMA support + * @pf: PF struct + */ +static inline void ice_clear_rdma_cap(struct ice_pf *pf) +{ + ice_unplug_aux_dev(pf); + clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); +} #endif /* _ICE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 5cdfe406af84..ff11a618bef7 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -115,6 +115,7 @@ struct ice_aqc_list_caps_elem { #define ICE_AQC_CAPS_PENDING_OROM_VER 0x004B #define ICE_AQC_CAPS_NET_VER 0x004C #define ICE_AQC_CAPS_PENDING_NET_VER 0x004D +#define ICE_AQC_CAPS_RDMA 0x0051 #define ICE_AQC_CAPS_NVM_MGMT 0x0080 u8 major_ver; @@ -1684,6 +1685,36 @@ struct ice_aqc_dis_txq_item { __le16 q_id[]; } __packed; +/* Add Tx RDMA Queue Set (indirect 0x0C33) */ +struct ice_aqc_add_rdma_qset { + u8 num_qset_grps; + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; +}; + +/* This is the descriptor of each Qset entry for the Add Tx RDMA Queue Set + * command (0x0C33). Only used within struct ice_aqc_add_rdma_qset. + */ +struct ice_aqc_add_tx_rdma_qset_entry { + __le16 tx_qset_id; + u8 rsvd[2]; + __le32 qset_teid; + struct ice_aqc_txsched_elem info; +}; + +/* The format of the command buffer for Add Tx RDMA Queue Set(0x0C33) + * is an array of the following structs. Please note that the length of + * each struct ice_aqc_add_rdma_qset is variable due to the variable + * number of queues in each group! + */ +struct ice_aqc_add_rdma_qset_data { + __le32 parent_teid; + __le16 num_qsets; + u8 rsvd[2]; + struct ice_aqc_add_tx_rdma_qset_entry rdma_qsets[]; +}; + /* Configure Firmware Logging Command (indirect 0xFF09) * Logging Information Read Response (indirect 0xFF10) * Note: The 0xFF10 command has no input parameters. @@ -1880,6 +1911,7 @@ struct ice_aq_desc { struct ice_aqc_get_set_rss_key get_set_rss_key; struct ice_aqc_add_txqs add_txqs; struct ice_aqc_dis_txqs dis_txqs; + struct ice_aqc_add_rdma_qset add_rdma_qset; struct ice_aqc_add_get_update_free_vsi vsi_cmd; struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res; struct ice_aqc_fw_logging fw_logging; @@ -2028,6 +2060,7 @@ enum ice_adminq_opc { /* Tx queue handling commands/events */ ice_aqc_opc_add_txqs = 0x0C30, ice_aqc_opc_dis_txqs = 0x0C31, + ice_aqc_opc_add_rdma_qset = 0x0C33, /* package commands */ ice_aqc_opc_download_pkg = 0x0C40, diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index e93b1e40f627..b8cc737ea261 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -2,6 +2,7 @@ /* Copyright (c) 2018, Intel Corporation. */ #include "ice_common.h" +#include "ice_lib.h" #include "ice_sched.h" #include "ice_adminq_cmd.h" #include "ice_flow.h" @@ -1062,7 +1063,8 @@ enum ice_status ice_check_reset(struct ice_hw *hw) GLNVM_ULD_POR_DONE_1_M |\ GLNVM_ULD_PCIER_DONE_2_M) - uld_mask = ICE_RESET_DONE_MASK; + uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.rdma ? + GLNVM_ULD_PE_DONE_M : 0); /* Device is Active; check Global Reset processes are done */ for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { @@ -1938,6 +1940,10 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix, caps->nvm_unified_update); break; + case ICE_AQC_CAPS_RDMA: + caps->rdma = (number == 1); + ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma); + break; case ICE_AQC_CAPS_MAX_MTU: caps->max_mtu = number; ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", @@ -1971,6 +1977,16 @@ ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps) caps->maxtc = 4; ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n", caps->maxtc); + if (caps->rdma) { + ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n"); + caps->rdma = 0; + } + + /* print message only when processing device capabilities + * during initialization. + */ + if (caps == &hw->dev_caps.common_cap) + dev_info(ice_hw_to_dev(hw), "RDMA functionality is not available with the current device configuration.\n"); } } @@ -3635,6 +3651,52 @@ do_aq: return status; } +/** + * ice_aq_add_rdma_qsets + * @hw: pointer to the hardware structure + * @num_qset_grps: Number of RDMA Qset groups + * @qset_list: list of Qset groups to be added + * @buf_size: size of buffer for indirect command + * @cd: pointer to command details structure or NULL + * + * Add Tx RDMA Qsets (0x0C33) + */ +static int +ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps, + struct ice_aqc_add_rdma_qset_data *qset_list, + u16 buf_size, struct ice_sq_cd *cd) +{ + struct ice_aqc_add_rdma_qset_data *list; + struct ice_aqc_add_rdma_qset *cmd; + struct ice_aq_desc desc; + u16 i, sum_size = 0; + + cmd = &desc.params.add_rdma_qset; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset); + + if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS) + return -EINVAL; + + for (i = 0, list = qset_list; i < num_qset_grps; i++) { + u16 num_qsets = le16_to_cpu(list->num_qsets); + + sum_size += struct_size(list, rdma_qsets, num_qsets); + list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets + + num_qsets); + } + + if (buf_size != sum_size) + return -EINVAL; + + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + cmd->num_qset_grps = num_qset_grps; + + return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, qset_list, + buf_size, cd)); +} + /* End of FW Admin Queue command wrappers */ /** @@ -4133,6 +4195,162 @@ ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, } /** + * ice_cfg_vsi_rdma - configure the VSI RDMA queues + * @pi: port information structure + * @vsi_handle: software VSI handle + * @tc_bitmap: TC bitmap + * @max_rdmaqs: max RDMA queues array per TC + * + * This function adds/updates the VSI RDMA queues per TC. + */ +int +ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, + u16 *max_rdmaqs) +{ + return ice_status_to_errno(ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, + max_rdmaqs, + ICE_SCHED_NODE_OWNER_RDMA)); +} + +/** + * ice_ena_vsi_rdma_qset + * @pi: port information structure + * @vsi_handle: software VSI handle + * @tc: TC number + * @rdma_qset: pointer to RDMA Qset + * @num_qsets: number of RDMA Qsets + * @qset_teid: pointer to Qset node TEIDs + * + * This function adds RDMA Qset + */ +int +ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc, + u16 *rdma_qset, u16 num_qsets, u32 *qset_teid) +{ + struct ice_aqc_txsched_elem_data node = { 0 }; + struct ice_aqc_add_rdma_qset_data *buf; + struct ice_sched_node *parent; + enum ice_status status; + struct ice_hw *hw; + u16 i, buf_size; + int ret; + + if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) + return -EIO; + hw = pi->hw; + + if (!ice_is_vsi_valid(hw, vsi_handle)) + return -EINVAL; + + buf_size = struct_size(buf, rdma_qsets, num_qsets); + buf = kzalloc(buf_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + mutex_lock(&pi->sched_lock); + + parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, + ICE_SCHED_NODE_OWNER_RDMA); + if (!parent) { + ret = -EINVAL; + goto rdma_error_exit; + } + buf->parent_teid = parent->info.node_teid; + node.parent_teid = parent->info.node_teid; + + buf->num_qsets = cpu_to_le16(num_qsets); + for (i = 0; i < num_qsets; i++) { + buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]); + buf->rdma_qsets[i].info.valid_sections = + ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | + ICE_AQC_ELEM_VALID_EIR; + buf->rdma_qsets[i].info.generic = 0; + buf->rdma_qsets[i].info.cir_bw.bw_profile_idx = + cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); + buf->rdma_qsets[i].info.cir_bw.bw_alloc = + cpu_to_le16(ICE_SCHED_DFLT_BW_WT); + buf->rdma_qsets[i].info.eir_bw.bw_profile_idx = + cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); + buf->rdma_qsets[i].info.eir_bw.bw_alloc = + cpu_to_le16(ICE_SCHED_DFLT_BW_WT); + } + ret = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL); + if (ret) { + ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n"); + goto rdma_error_exit; + } + node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; + for (i = 0; i < num_qsets; i++) { + node.node_teid = buf->rdma_qsets[i].qset_teid; + status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, + &node); + if (status) { + ret = ice_status_to_errno(status); + break; + } + qset_teid[i] = le32_to_cpu(node.node_teid); + } +rdma_error_exit: + mutex_unlock(&pi->sched_lock); + kfree(buf); + return ret; +} + +/** + * ice_dis_vsi_rdma_qset - free RDMA resources + * @pi: port_info struct + * @count: number of RDMA Qsets to free + * @qset_teid: TEID of Qset node + * @q_id: list of queue IDs being disabled + */ +int +ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, + u16 *q_id) +{ + struct ice_aqc_dis_txq_item *qg_list; + enum ice_status status = 0; + struct ice_hw *hw; + u16 qg_size; + int i; + + if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) + return -EIO; + + hw = pi->hw; + + qg_size = struct_size(qg_list, q_id, 1); + qg_list = kzalloc(qg_size, GFP_KERNEL); + if (!qg_list) + return -ENOMEM; + + mutex_lock(&pi->sched_lock); + + for (i = 0; i < count; i++) { + struct ice_sched_node *node; + + node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]); + if (!node) + continue; + + qg_list->parent_teid = node->info.parent_teid; + qg_list->num_qs = 1; + qg_list->q_id[0] = + cpu_to_le16(q_id[i] | + ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET); + + status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size, + ICE_NO_RESET, 0, NULL); + if (status) + break; + + ice_free_sched_node(pi, node); + } + + mutex_unlock(&pi->sched_lock); + kfree(qg_list); + return ice_status_to_errno(status); +} + +/** * ice_replay_pre_init - replay pre initialization * @hw: pointer to the HW struct * diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 7a9d2dfb21a2..0fdda597fbc8 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -147,6 +147,15 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, bool write, struct ice_sq_cd *cd); +int +ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, + u16 *max_rdmaqs); +int +ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc, + u16 *rdma_qset, u16 num_qsets, u32 *qset_teid); +int +ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, + u16 *q_id); enum ice_status ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, u16 *q_handle, u16 *q_ids, u32 *q_teids, diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index df02cffdf209..857dc62da7a8 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -275,6 +275,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked) struct ice_dcbx_cfg *old_cfg, *curr_cfg; struct device *dev = ice_pf_to_dev(pf); int ret = ICE_DCB_NO_HW_CHG; + struct iidc_event *event; struct ice_vsi *pf_vsi; curr_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; @@ -313,6 +314,15 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked) goto free_cfg; } + /* Notify AUX drivers about impending change to TCs */ + event = kzalloc(sizeof(*event), GFP_KERNEL); + if (!event) + return -ENOMEM; + + set_bit(IIDC_EVENT_BEFORE_TC_CHANGE, event->type); + ice_send_event_to_aux(pf, event); + kfree(event); + /* avoid race conditions by holding the lock while disabling and * re-enabling the VSI */ @@ -640,6 +650,7 @@ static int ice_dcb_noncontig_cfg(struct ice_pf *pf) void ice_pf_dcb_recfg(struct ice_pf *pf) { struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; + struct iidc_event *event; u8 tc_map = 0; int v, ret; @@ -675,6 +686,14 @@ void ice_pf_dcb_recfg(struct ice_pf *pf) if (vsi->type == ICE_VSI_PF) ice_dcbnl_set_all(vsi); } + /* Notify the AUX drivers that TC change is finished */ + event = kzalloc(sizeof(*event), GFP_KERNEL); + if (!event) + return; + + set_bit(IIDC_EVENT_AFTER_TC_CHANGE, event->type); + ice_send_event_to_aux(pf, event); + kfree(event); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 9b8300d4a267..5386285c39e7 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -111,8 +111,6 @@ #define VPGEN_VFRSTAT_VFRD_M BIT(0) #define VPGEN_VFRTRIG(_VF) (0x00090000 + ((_VF) * 4)) #define VPGEN_VFRTRIG_VFSWR_M BIT(0) -#define PFHMC_ERRORDATA 0x00520500 -#define PFHMC_ERRORINFO 0x00520400 #define GLINT_CTL 0x0016CC54 #define GLINT_CTL_DIS_AUTOMASK_M BIT(0) #define GLINT_CTL_ITR_GRAN_200_S 16 @@ -161,6 +159,7 @@ #define PFINT_OICR_GRST_M BIT(20) #define PFINT_OICR_PCI_EXCEPTION_M BIT(21) #define PFINT_OICR_HMC_ERR_M BIT(26) +#define PFINT_OICR_PE_PUSH_M BIT(27) #define PFINT_OICR_PE_CRITERR_M BIT(28) #define PFINT_OICR_VFLR_M BIT(29) #define PFINT_OICR_SWINT_M BIT(31) diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c new file mode 100644 index 000000000000..1f2afdf6cd48 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_idc.c @@ -0,0 +1,334 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021, Intel Corporation. */ + +/* Inter-Driver Communication */ +#include "ice.h" +#include "ice_lib.h" +#include "ice_dcb_lib.h" + +/** + * ice_get_auxiliary_drv - retrieve iidc_auxiliary_drv struct + * @pf: pointer to PF struct + * + * This function has to be called with a device_lock on the + * pf->adev.dev to avoid race conditions. + */ +static struct iidc_auxiliary_drv *ice_get_auxiliary_drv(struct ice_pf *pf) +{ + struct auxiliary_device *adev; + + adev = pf->adev; + if (!adev || !adev->dev.driver) + return NULL; + + return container_of(adev->dev.driver, struct iidc_auxiliary_drv, + adrv.driver); +} + +/** + * ice_send_event_to_aux - send event to RDMA AUX driver + * @pf: pointer to PF struct + * @event: event struct + */ +void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event) +{ + struct iidc_auxiliary_drv *iadrv; + + if (!pf->adev) + return; + + device_lock(&pf->adev->dev); + iadrv = ice_get_auxiliary_drv(pf); + if (iadrv && iadrv->event_handler) + iadrv->event_handler(pf, event); + device_unlock(&pf->adev->dev); +} + +/** + * ice_find_vsi - Find the VSI from VSI ID + * @pf: The PF pointer to search in + * @vsi_num: The VSI ID to search for + */ +static struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num) +{ + int i; + + ice_for_each_vsi(pf, i) + if (pf->vsi[i] && pf->vsi[i]->vsi_num == vsi_num) + return pf->vsi[i]; + return NULL; +} + +/** + * ice_add_rdma_qset - Add Leaf Node for RDMA Qset + * @pf: PF struct + * @qset: Resource to be allocated + */ +int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset) +{ + u16 max_rdmaqs[ICE_MAX_TRAFFIC_CLASS]; + struct ice_vsi *vsi; + struct device *dev; + u32 qset_teid; + u16 qs_handle; + int status; + int i; + + if (WARN_ON(!pf || !qset)) + return -EINVAL; + + dev = ice_pf_to_dev(pf); + + if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) + return -EINVAL; + + vsi = ice_get_main_vsi(pf); + if (!vsi) { + dev_err(dev, "RDMA QSet invalid VSI\n"); + return -EINVAL; + } + + ice_for_each_traffic_class(i) + max_rdmaqs[i] = 0; + + max_rdmaqs[qset->tc]++; + qs_handle = qset->qs_handle; + + status = ice_cfg_vsi_rdma(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, + max_rdmaqs); + if (status) { + dev_err(dev, "Failed VSI RDMA Qset config\n"); + return status; + } + + status = ice_ena_vsi_rdma_qset(vsi->port_info, vsi->idx, qset->tc, + &qs_handle, 1, &qset_teid); + if (status) { + dev_err(dev, "Failed VSI RDMA Qset enable\n"); + return status; + } + vsi->qset_handle[qset->tc] = qset->qs_handle; + qset->teid = qset_teid; + + return 0; +} +EXPORT_SYMBOL_GPL(ice_add_rdma_qset); + +/** + * ice_del_rdma_qset - Delete leaf node for RDMA Qset + * @pf: PF struct + * @qset: Resource to be freed + */ +int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset) +{ + struct ice_vsi *vsi; + u32 teid; + u16 q_id; + + if (WARN_ON(!pf || !qset)) + return -EINVAL; + + vsi = ice_find_vsi(pf, qset->vport_id); + if (!vsi) { + dev_err(ice_pf_to_dev(pf), "RDMA Invalid VSI\n"); + return -EINVAL; + } + + q_id = qset->qs_handle; + teid = qset->teid; + + vsi->qset_handle[qset->tc] = 0; + + return ice_dis_vsi_rdma_qset(vsi->port_info, 1, &teid, &q_id); +} +EXPORT_SYMBOL_GPL(ice_del_rdma_qset); + +/** + * ice_rdma_request_reset - accept request from RDMA to perform a reset + * @pf: struct for PF + * @reset_type: type of reset + */ +int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type) +{ + enum ice_reset_req reset; + + if (WARN_ON(!pf)) + return -EINVAL; + + switch (reset_type) { + case IIDC_PFR: + reset = ICE_RESET_PFR; + break; + case IIDC_CORER: + reset = ICE_RESET_CORER; + break; + case IIDC_GLOBR: + reset = ICE_RESET_GLOBR; + break; + default: + dev_err(ice_pf_to_dev(pf), "incorrect reset request\n"); + return -EINVAL; + } + + return ice_schedule_reset(pf, reset); +} +EXPORT_SYMBOL_GPL(ice_rdma_request_reset); + +/** + * ice_rdma_update_vsi_filter - update main VSI filters for RDMA + * @pf: pointer to struct for PF + * @vsi_id: VSI HW idx to update filter on + * @enable: bool whether to enable or disable filters + */ +int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable) +{ + struct ice_vsi *vsi; + int status; + + if (WARN_ON(!pf)) + return -EINVAL; + + vsi = ice_find_vsi(pf, vsi_id); + if (!vsi) + return -EINVAL; + + status = ice_cfg_rdma_fltr(&pf->hw, vsi->idx, enable); + if (status) { + dev_err(ice_pf_to_dev(pf), "Failed to %sable RDMA filtering\n", + enable ? "en" : "dis"); + } else { + if (enable) + vsi->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN; + else + vsi->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN; + } + + return status; +} +EXPORT_SYMBOL_GPL(ice_rdma_update_vsi_filter); + +/** + * ice_get_qos_params - parse QoS params for RDMA consumption + * @pf: pointer to PF struct + * @qos: set of QoS values + */ +void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos) +{ + struct ice_dcbx_cfg *dcbx_cfg; + unsigned int i; + u32 up2tc; + + dcbx_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; + up2tc = rd32(&pf->hw, PRTDCB_TUP2TC); + + qos->num_tc = ice_dcb_get_num_tc(dcbx_cfg); + for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++) + qos->up2tc[i] = (up2tc >> (i * 3)) & 0x7; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + qos->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i]; +} +EXPORT_SYMBOL_GPL(ice_get_qos_params); + +/** + * ice_reserve_rdma_qvector - Reserve vector resources for RDMA driver + * @pf: board private structure to initialize + */ +static int ice_reserve_rdma_qvector(struct ice_pf *pf) +{ + if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) { + int index; + + index = ice_get_res(pf, pf->irq_tracker, pf->num_rdma_msix, + ICE_RES_RDMA_VEC_ID); + if (index < 0) + return index; + pf->num_avail_sw_msix -= pf->num_rdma_msix; + pf->rdma_base_vector = (u16)index; + } + return 0; +} + +/** + * ice_adev_release - function to be mapped to AUX dev's release op + * @dev: pointer to device to free + */ +static void ice_adev_release(struct device *dev) +{ + struct iidc_auxiliary_dev *iadev; + + iadev = container_of(dev, struct iidc_auxiliary_dev, adev.dev); + kfree(iadev); +} + +/** + * ice_plug_aux_dev - allocate and register AUX device + * @pf: pointer to pf struct + */ +int ice_plug_aux_dev(struct ice_pf *pf) +{ + struct iidc_auxiliary_dev *iadev; + struct auxiliary_device *adev; + int ret; + + iadev = kzalloc(sizeof(*iadev), GFP_KERNEL); + if (!iadev) + return -ENOMEM; + + adev = &iadev->adev; + pf->adev = adev; + iadev->pf = pf; + + adev->id = pf->aux_idx; + adev->dev.release = ice_adev_release; + adev->dev.parent = &pf->pdev->dev; + adev->name = IIDC_RDMA_ROCE_NAME; + + ret = auxiliary_device_init(adev); + if (ret) { + pf->adev = NULL; + kfree(iadev); + return ret; + } + + ret = auxiliary_device_add(adev); + if (ret) { + pf->adev = NULL; + auxiliary_device_uninit(adev); + return ret; + } + + return 0; +} + +/* ice_unplug_aux_dev - unregister and free AUX device + * @pf: pointer to pf struct + */ +void ice_unplug_aux_dev(struct ice_pf *pf) +{ + if (!pf->adev) + return; + + auxiliary_device_delete(pf->adev); + auxiliary_device_uninit(pf->adev); + pf->adev = NULL; +} + +/** + * ice_init_rdma - initializes PF for RDMA use + * @pf: ptr to ice_pf + */ +int ice_init_rdma(struct ice_pf *pf) +{ + struct device *dev = &pf->pdev->dev; + int ret; + + /* Reserve vector resources */ + ret = ice_reserve_rdma_qvector(pf); + if (ret < 0) { + dev_err(dev, "failed to reserve vectors for RDMA\n"); + return ret; + } + + return ice_plug_aux_dev(pf); +} diff --git a/drivers/net/ethernet/intel/ice/ice_idc_int.h b/drivers/net/ethernet/intel/ice/ice_idc_int.h new file mode 100644 index 000000000000..b7796b8aecbd --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_idc_int.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021, Intel Corporation. */ + +#ifndef _ICE_IDC_INT_H_ +#define _ICE_IDC_INT_H_ + +#include <linux/net/intel/iidc.h> +#include "ice.h" + +struct ice_pf; + +void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event); + +#endif /* !_ICE_IDC_INT_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c index 4599fc3b4ed8..37c18c66b5c7 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.c +++ b/drivers/net/ethernet/intel/ice/ice_lag.c @@ -172,6 +172,7 @@ ice_lag_link(struct ice_lag *lag, struct netdev_notifier_changeupper_info *info) } ice_clear_sriov_cap(pf); + ice_clear_rdma_cap(pf); lag->bonded = true; lag->role = ICE_LAG_UNSET; @@ -222,6 +223,7 @@ ice_lag_unlink(struct ice_lag *lag, } ice_set_sriov_cap(pf); + ice_set_rdma_cap(pf); lag->bonded = false; lag->role = ICE_LAG_NONE; } diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 27f9dac8719c..6d360aeae596 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -629,6 +629,17 @@ bool ice_is_safe_mode(struct ice_pf *pf) } /** + * ice_is_aux_ena + * @pf: pointer to the PF struct + * + * returns true if AUX devices/drivers are supported, false otherwise + */ +bool ice_is_aux_ena(struct ice_pf *pf) +{ + return test_bit(ICE_FLAG_AUX_ENA, pf->flags); +} + +/** * ice_vsi_clean_rss_flow_fld - Delete RSS configuration * @vsi: the VSI being cleaned up * diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 511c2316c40c..5ec857f71459 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -102,7 +102,7 @@ enum ice_status ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set); bool ice_is_safe_mode(struct ice_pf *pf); - +bool ice_is_aux_ena(struct ice_pf *pf); bool ice_is_dflt_vsi_in_use(struct ice_sw *sw); bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 0eb2307325d3..2d2a28c0a7ac 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -35,6 +35,8 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXX MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)"); #endif /* !CONFIG_DYNAMIC_DEBUG */ +static DEFINE_IDA(ice_aux_ida); + static struct workqueue_struct *ice_wq; static const struct net_device_ops ice_netdev_safe_mode_ops; static const struct net_device_ops ice_netdev_ops; @@ -454,6 +456,8 @@ ice_prepare_for_reset(struct ice_pf *pf) if (test_bit(ICE_PREPARED_FOR_RESET, pf->state)) return; + ice_unplug_aux_dev(pf); + /* Notify VFs of impending reset */ if (ice_check_sq_alive(hw, &hw->mailboxq)) ice_vc_notify_reset(pf); @@ -2118,6 +2122,8 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset) return -EBUSY; } + ice_unplug_aux_dev(pf); + switch (reset) { case ICE_RESET_PFR: set_bit(ICE_PFR_REQ, pf->state); @@ -2622,6 +2628,7 @@ static void ice_ena_misc_vector(struct ice_pf *pf) PFINT_OICR_PCI_EXCEPTION_M | PFINT_OICR_VFLR_M | PFINT_OICR_HMC_ERR_M | + PFINT_OICR_PE_PUSH_M | PFINT_OICR_PE_CRITERR_M); wr32(hw, PFINT_OICR_ENA, val); @@ -2692,8 +2699,6 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) /* If a reset cycle isn't already in progress, we set a bit in * pf->state so that the service task can start a reset/rebuild. - * We also make note of which reset happened so that peer - * devices/drivers can be informed. */ if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) { if (reset == ICE_RESET_CORER) @@ -2720,11 +2725,19 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) } } - if (oicr & PFINT_OICR_HMC_ERR_M) { - ena_mask &= ~PFINT_OICR_HMC_ERR_M; - dev_dbg(dev, "HMC Error interrupt - info 0x%x, data 0x%x\n", - rd32(hw, PFHMC_ERRORINFO), - rd32(hw, PFHMC_ERRORDATA)); +#define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M) + if (oicr & ICE_AUX_CRIT_ERR) { + struct iidc_event *event; + + ena_mask &= ~ICE_AUX_CRIT_ERR; + event = kzalloc(sizeof(*event), GFP_KERNEL); + if (event) { + set_bit(IIDC_EVENT_CRIT_ERR, event->type); + /* report the entire OICR value to AUX driver */ + event->reg = oicr; + ice_send_event_to_aux(pf, event); + kfree(event); + } } /* Report any remaining unexpected interrupts */ @@ -2734,8 +2747,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) /* If a critical error is pending there is no choice but to * reset the device. */ - if (oicr & (PFINT_OICR_PE_CRITERR_M | - PFINT_OICR_PCI_EXCEPTION_M | + if (oicr & (PFINT_OICR_PCI_EXCEPTION_M | PFINT_OICR_ECC_ERR_M)) { set_bit(ICE_PFR_REQ, pf->state); ice_service_task_schedule(pf); @@ -3290,6 +3302,12 @@ static void ice_set_pf_caps(struct ice_pf *pf) { struct ice_hw_func_caps *func_caps = &pf->hw.func_caps; + clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); + clear_bit(ICE_FLAG_AUX_ENA, pf->flags); + if (func_caps->common_cap.rdma) { + set_bit(ICE_FLAG_RDMA_ENA, pf->flags); + set_bit(ICE_FLAG_AUX_ENA, pf->flags); + } clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); if (func_caps->common_cap.dcb) set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); @@ -3369,11 +3387,12 @@ static int ice_init_pf(struct ice_pf *pf) */ static int ice_ena_msix_range(struct ice_pf *pf) { - int v_left, v_actual, v_other, v_budget = 0; + int num_cpus, v_left, v_actual, v_other, v_budget = 0; struct device *dev = ice_pf_to_dev(pf); int needed, err, i; v_left = pf->hw.func_caps.common_cap.num_msix_vectors; + num_cpus = num_online_cpus(); /* reserve for LAN miscellaneous handler */ needed = ICE_MIN_LAN_OICR_MSIX; @@ -3395,13 +3414,23 @@ static int ice_ena_msix_range(struct ice_pf *pf) v_other = v_budget; /* reserve vectors for LAN traffic */ - needed = min_t(int, num_online_cpus(), v_left); + needed = num_cpus; if (v_left < needed) goto no_hw_vecs_left_err; pf->num_lan_msix = needed; v_budget += needed; v_left -= needed; + /* reserve vectors for RDMA auxiliary driver */ + if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) { + needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX; + if (v_left < needed) + goto no_hw_vecs_left_err; + pf->num_rdma_msix = needed; + v_budget += needed; + v_left -= needed; + } + pf->msix_entries = devm_kcalloc(dev, v_budget, sizeof(*pf->msix_entries), GFP_KERNEL); if (!pf->msix_entries) { @@ -3431,16 +3460,46 @@ static int ice_ena_msix_range(struct ice_pf *pf) err = -ERANGE; goto msix_err; } else { - int v_traffic = v_actual - v_other; + int v_remain = v_actual - v_other; + int v_rdma = 0, v_min_rdma = 0; + + if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) { + /* Need at least 1 interrupt in addition to + * AEQ MSIX + */ + v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1; + v_min_rdma = ICE_MIN_RDMA_MSIX; + } if (v_actual == ICE_MIN_MSIX || - v_traffic < ICE_MIN_LAN_TXRX_MSIX) + v_remain < ICE_MIN_LAN_TXRX_MSIX + v_min_rdma) { + dev_warn(dev, "Not enough MSI-X vectors to support RDMA.\n"); + clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); + + pf->num_rdma_msix = 0; pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX; - else - pf->num_lan_msix = v_traffic; + } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) || + (v_remain - v_rdma < v_rdma)) { + /* Support minimum RDMA and give remaining + * vectors to LAN MSIX + */ + pf->num_rdma_msix = v_min_rdma; + pf->num_lan_msix = v_remain - v_min_rdma; + } else { + /* Split remaining MSIX with RDMA after + * accounting for AEQ MSIX + */ + pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 + + ICE_RDMA_NUM_AEQ_MSIX; + pf->num_lan_msix = v_remain - pf->num_rdma_msix; + } dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n", pf->num_lan_msix); + + if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) + dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n", + pf->num_rdma_msix); } } @@ -3455,6 +3514,7 @@ no_hw_vecs_left_err: needed, v_left); err = -ERANGE; exit_err: + pf->num_rdma_msix = 0; pf->num_lan_msix = 0; return err; } @@ -4282,8 +4342,29 @@ probe_done: /* ready to go, so clear down state bit */ clear_bit(ICE_DOWN, pf->state); + if (ice_is_aux_ena(pf)) { + pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL); + if (pf->aux_idx < 0) { + dev_err(dev, "Failed to allocate device ID for AUX driver\n"); + err = -ENOMEM; + goto err_netdev_reg; + } + + err = ice_init_rdma(pf); + if (err) { + dev_err(dev, "Failed to initialize RDMA: %d\n", err); + err = -EIO; + goto err_init_aux_unroll; + } + } else { + dev_warn(dev, "RDMA is not supported on this device\n"); + } + return 0; +err_init_aux_unroll: + pf->adev = NULL; + ida_free(&ice_aux_ida, pf->aux_idx); err_netdev_reg: err_send_version_unroll: ice_vsi_release_all(pf); @@ -4393,10 +4474,12 @@ static void ice_remove(struct pci_dev *pdev) ice_free_vfs(pf); } - set_bit(ICE_DOWN, pf->state); ice_service_task_stop(pf); ice_aq_cancel_waiting_tasks(pf); + ice_unplug_aux_dev(pf); + ida_free(&ice_aux_ida, pf->aux_idx); + set_bit(ICE_DOWN, pf->state); mutex_destroy(&(&pf->hw)->fdir_fltr_lock); ice_deinit_lag(pf); @@ -4552,6 +4635,8 @@ static int __maybe_unused ice_suspend(struct device *dev) */ disabled = ice_service_task_stop(pf); + ice_unplug_aux_dev(pf); + /* Already suspended?, then there is nothing to do */ if (test_and_set_bit(ICE_SUSPENDED, pf->state)) { if (!disabled) @@ -6222,6 +6307,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) /* if we get here, reset flow is successful */ clear_bit(ICE_RESET_FAILED, pf->state); + + ice_plug_aux_dev(pf); return; err_vsi_rebuild: @@ -6260,7 +6347,9 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; + struct iidc_event *event; u8 count = 0; + int err = 0; if (new_mtu == (int)netdev->mtu) { netdev_warn(netdev, "MTU is already %u\n", netdev->mtu); @@ -6293,27 +6382,38 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) return -EBUSY; } + event = kzalloc(sizeof(*event), GFP_KERNEL); + if (!event) + return -ENOMEM; + + set_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type); + ice_send_event_to_aux(pf, event); + clear_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type); + netdev->mtu = (unsigned int)new_mtu; /* if VSI is up, bring it down and then back up */ if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { - int err; - err = ice_down(vsi); if (err) { netdev_err(netdev, "change MTU if_down err %d\n", err); - return err; + goto event_after; } err = ice_up(vsi); if (err) { netdev_err(netdev, "change MTU if_up err %d\n", err); - return err; + goto event_after; } } netdev_dbg(netdev, "changed MTU to %d\n", new_mtu); - return 0; +event_after: + set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type); + ice_send_event_to_aux(pf, event); + kfree(event); + + return err; } /** diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index 2f097637e405..a17e24e54cf3 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -596,6 +596,50 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) } /** + * ice_alloc_rdma_q_ctx - allocate RDMA queue contexts for the given VSI and TC + * @hw: pointer to the HW struct + * @vsi_handle: VSI handle + * @tc: TC number + * @new_numqs: number of queues + */ +static enum ice_status +ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) +{ + struct ice_vsi_ctx *vsi_ctx; + struct ice_q_ctx *q_ctx; + + vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); + if (!vsi_ctx) + return ICE_ERR_PARAM; + /* allocate RDMA queue contexts */ + if (!vsi_ctx->rdma_q_ctx[tc]) { + vsi_ctx->rdma_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw), + new_numqs, + sizeof(*q_ctx), + GFP_KERNEL); + if (!vsi_ctx->rdma_q_ctx[tc]) + return ICE_ERR_NO_MEMORY; + vsi_ctx->num_rdma_q_entries[tc] = new_numqs; + return 0; + } + /* num queues are increased, update the queue contexts */ + if (new_numqs > vsi_ctx->num_rdma_q_entries[tc]) { + u16 prev_num = vsi_ctx->num_rdma_q_entries[tc]; + + q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs, + sizeof(*q_ctx), GFP_KERNEL); + if (!q_ctx) + return ICE_ERR_NO_MEMORY; + memcpy(q_ctx, vsi_ctx->rdma_q_ctx[tc], + prev_num * sizeof(*q_ctx)); + devm_kfree(ice_hw_to_dev(hw), vsi_ctx->rdma_q_ctx[tc]); + vsi_ctx->rdma_q_ctx[tc] = q_ctx; + vsi_ctx->num_rdma_q_entries[tc] = new_numqs; + } + return 0; +} + +/** * ice_aq_rl_profile - performs a rate limiting task * @hw: pointer to the HW struct * @opcode: opcode for add, query, or remove profile(s) @@ -1774,13 +1818,22 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, if (!vsi_ctx) return ICE_ERR_PARAM; - prev_numqs = vsi_ctx->sched.max_lanq[tc]; + if (owner == ICE_SCHED_NODE_OWNER_LAN) + prev_numqs = vsi_ctx->sched.max_lanq[tc]; + else + prev_numqs = vsi_ctx->sched.max_rdmaq[tc]; /* num queues are not changed or less than the previous number */ if (new_numqs <= prev_numqs) return status; - status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs); - if (status) - return status; + if (owner == ICE_SCHED_NODE_OWNER_LAN) { + status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs); + if (status) + return status; + } else { + status = ice_alloc_rdma_q_ctx(hw, vsi_handle, tc, new_numqs); + if (status) + return status; + } if (new_numqs) ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes); @@ -1795,7 +1848,10 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, new_num_nodes, owner); if (status) return status; - vsi_ctx->sched.max_lanq[tc] = new_numqs; + if (owner == ICE_SCHED_NODE_OWNER_LAN) + vsi_ctx->sched.max_lanq[tc] = new_numqs; + else + vsi_ctx->sched.max_rdmaq[tc] = new_numqs; return 0; } @@ -1861,6 +1917,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, * recreate the child nodes all the time in these cases. */ vsi_ctx->sched.max_lanq[tc] = 0; + vsi_ctx->sched.max_rdmaq[tc] = 0; } /* update the VSI child nodes */ @@ -1990,6 +2047,8 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner) } if (owner == ICE_SCHED_NODE_OWNER_LAN) vsi_ctx->sched.max_lanq[i] = 0; + else + vsi_ctx->sched.max_rdmaq[i] = 0; } status = 0; diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 357d3073d814..3b6c1420aa7b 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2018, Intel Corporation. */ +#include "ice_lib.h" #include "ice_switch.h" #define ICE_ETH_DA_OFFSET 0 @@ -302,6 +303,10 @@ static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle) devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]); vsi->lan_q_ctx[i] = NULL; } + if (vsi->rdma_q_ctx[i]) { + devm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]); + vsi->rdma_q_ctx[i] = NULL; + } } } @@ -423,6 +428,29 @@ ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, } /** + * ice_cfg_rdma_fltr - enable/disable RDMA filtering on VSI + * @hw: pointer to HW struct + * @vsi_handle: VSI SW index + * @enable: boolean for enable/disable + */ +int +ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable) +{ + struct ice_vsi_ctx *ctx; + + ctx = ice_get_vsi_ctx(hw, vsi_handle); + if (!ctx) + return -EIO; + + if (enable) + ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN; + else + ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN; + + return ice_status_to_errno(ice_update_vsi(hw, vsi_handle, ctx, NULL)); +} + +/** * ice_aq_alloc_free_vsi_list * @hw: pointer to the HW struct * @vsi_list_id: VSI list ID returned or used for lookup diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h index 8b4f9d35c860..6bb7358ff67b 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.h +++ b/drivers/net/ethernet/intel/ice/ice_switch.h @@ -26,6 +26,8 @@ struct ice_vsi_ctx { u8 vf_num; u16 num_lan_q_entries[ICE_MAX_TRAFFIC_CLASS]; struct ice_q_ctx *lan_q_ctx[ICE_MAX_TRAFFIC_CLASS]; + u16 num_rdma_q_entries[ICE_MAX_TRAFFIC_CLASS]; + struct ice_q_ctx *rdma_q_ctx[ICE_MAX_TRAFFIC_CLASS]; }; enum ice_sw_fwd_act_type { @@ -223,6 +225,8 @@ enum ice_status ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list); enum ice_status ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list); +int +ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable); void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle); enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *m_list); diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 4474dd6a7ba1..c580b87c76ee 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -45,6 +45,7 @@ static inline u32 ice_round_to_num(u32 N, u32 R) #define ICE_DBG_FLOW BIT_ULL(9) #define ICE_DBG_SW BIT_ULL(13) #define ICE_DBG_SCHED BIT_ULL(14) +#define ICE_DBG_RDMA BIT_ULL(15) #define ICE_DBG_PKG BIT_ULL(16) #define ICE_DBG_RES BIT_ULL(17) #define ICE_DBG_AQ_MSG BIT_ULL(24) @@ -262,6 +263,7 @@ struct ice_hw_common_caps { u8 rss_table_entry_width; /* RSS Entry width in bits */ u8 dcb; + u8 rdma; bool nvm_update_pending_nvm; bool nvm_update_pending_orom; @@ -440,6 +442,7 @@ struct ice_sched_node { u8 tc_num; u8 owner; #define ICE_SCHED_NODE_OWNER_LAN 0 +#define ICE_SCHED_NODE_OWNER_RDMA 2 }; /* Access Macros for Tx Sched Elements data */ @@ -511,6 +514,7 @@ struct ice_sched_vsi_info { struct ice_sched_node *ag_node[ICE_MAX_TRAFFIC_CLASS]; struct list_head list_entry; u16 max_lanq[ICE_MAX_TRAFFIC_CLASS]; + u16 max_rdmaq[ICE_MAX_TRAFFIC_CLASS]; }; /* driver defines the policy */ |