aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/ice/ice_common.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_common.c')
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c371
1 files changed, 298 insertions, 73 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 2e0731c1e1a3..3a6b3950eb0e 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -263,21 +263,23 @@ enum ice_status
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *link, struct ice_sq_cd *cd)
{
- struct ice_link_status *hw_link_info_old, *hw_link_info;
struct ice_aqc_get_link_status_data link_data = { 0 };
struct ice_aqc_get_link_status *resp;
+ struct ice_link_status *li_old, *li;
enum ice_media_type *hw_media_type;
struct ice_fc_info *hw_fc_info;
bool tx_pause, rx_pause;
struct ice_aq_desc desc;
enum ice_status status;
+ struct ice_hw *hw;
u16 cmd_flags;
if (!pi)
return ICE_ERR_PARAM;
- hw_link_info_old = &pi->phy.link_info_old;
+ hw = pi->hw;
+ li_old = &pi->phy.link_info_old;
hw_media_type = &pi->phy.media_type;
- hw_link_info = &pi->phy.link_info;
+ li = &pi->phy.link_info;
hw_fc_info = &pi->fc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
@@ -286,27 +288,27 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
resp->cmd_flags = cpu_to_le16(cmd_flags);
resp->lport_num = pi->lport;
- status = ice_aq_send_cmd(pi->hw, &desc, &link_data, sizeof(link_data),
- cd);
+ status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
if (status)
return status;
/* save off old link status information */
- *hw_link_info_old = *hw_link_info;
+ *li_old = *li;
/* update current link status information */
- hw_link_info->link_speed = le16_to_cpu(link_data.link_speed);
- hw_link_info->phy_type_low = le64_to_cpu(link_data.phy_type_low);
- hw_link_info->phy_type_high = le64_to_cpu(link_data.phy_type_high);
+ li->link_speed = le16_to_cpu(link_data.link_speed);
+ li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
+ li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
*hw_media_type = ice_get_media_type(pi);
- hw_link_info->link_info = link_data.link_info;
- hw_link_info->an_info = link_data.an_info;
- hw_link_info->ext_info = link_data.ext_info;
- hw_link_info->max_frame_size = le16_to_cpu(link_data.max_frame_size);
- hw_link_info->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
- hw_link_info->topo_media_conflict = link_data.topo_media_conflict;
- hw_link_info->pacing = link_data.cfg & ICE_AQ_CFG_PACING_M;
+ li->link_info = link_data.link_info;
+ li->an_info = link_data.an_info;
+ li->ext_info = link_data.ext_info;
+ li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
+ li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
+ li->topo_media_conflict = link_data.topo_media_conflict;
+ li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
+ ICE_AQ_CFG_PACING_TYPE_M);
/* update fc info */
tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
@@ -320,12 +322,24 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
else
hw_fc_info->current_mode = ICE_FC_NONE;
- hw_link_info->lse_ena =
- !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
+ li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
+
+ ice_debug(hw, ICE_DBG_LINK, "link_speed = 0x%x\n", li->link_speed);
+ ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
+ (unsigned long long)li->phy_type_low);
+ ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
+ (unsigned long long)li->phy_type_high);
+ ice_debug(hw, ICE_DBG_LINK, "media_type = 0x%x\n", *hw_media_type);
+ ice_debug(hw, ICE_DBG_LINK, "link_info = 0x%x\n", li->link_info);
+ ice_debug(hw, ICE_DBG_LINK, "an_info = 0x%x\n", li->an_info);
+ ice_debug(hw, ICE_DBG_LINK, "ext_info = 0x%x\n", li->ext_info);
+ ice_debug(hw, ICE_DBG_LINK, "lse_ena = 0x%x\n", li->lse_ena);
+ ice_debug(hw, ICE_DBG_LINK, "max_frame = 0x%x\n", li->max_frame_size);
+ ice_debug(hw, ICE_DBG_LINK, "pacing = 0x%x\n", li->pacing);
/* save link status information */
if (link)
- *link = *hw_link_info;
+ *link = *li;
/* flag cleared so calling functions don't call AQ again */
pi->phy.get_link_info = false;
@@ -715,6 +729,29 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw)
}
/**
+ * ice_get_nvm_version - get cached NVM version data
+ * @hw: pointer to the hardware structure
+ * @oem_ver: 8 bit NVM version
+ * @oem_build: 16 bit NVM build number
+ * @oem_patch: 8 NVM patch number
+ * @ver_hi: high 16 bits of the NVM version
+ * @ver_lo: low 16 bits of the NVM version
+ */
+void
+ice_get_nvm_version(struct ice_hw *hw, u8 *oem_ver, u16 *oem_build,
+ u8 *oem_patch, u8 *ver_hi, u8 *ver_lo)
+{
+ struct ice_nvm_info *nvm = &hw->nvm;
+
+ *oem_ver = (u8)((nvm->oem_ver & ICE_OEM_VER_MASK) >> ICE_OEM_VER_SHIFT);
+ *oem_patch = (u8)(nvm->oem_ver & ICE_OEM_VER_PATCH_MASK);
+ *oem_build = (u16)((nvm->oem_ver & ICE_OEM_VER_BUILD_MASK) >>
+ ICE_OEM_VER_BUILD_SHIFT);
+ *ver_hi = (nvm->ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT;
+ *ver_lo = (nvm->ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT;
+}
+
+/**
* ice_init_hw - main hardware initialization routine
* @hw: pointer to the hardware structure
*/
@@ -740,7 +777,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
ice_get_itr_intrl_gran(hw);
- status = ice_init_all_ctrlq(hw);
+ status = ice_create_all_ctrlq(hw);
if (status)
goto err_unroll_cqinit;
@@ -845,7 +882,9 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC);
ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC_2);
-
+ status = ice_init_hw_tbls(hw);
+ if (status)
+ goto err_unroll_fltr_mgmt_struct;
return 0;
err_unroll_fltr_mgmt_struct:
@@ -855,7 +894,7 @@ err_unroll_sched:
err_unroll_alloc:
devm_kfree(ice_hw_to_dev(hw), hw->port_info);
err_unroll_cqinit:
- ice_shutdown_all_ctrlq(hw);
+ ice_destroy_all_ctrlq(hw);
return status;
}
@@ -873,6 +912,8 @@ void ice_deinit_hw(struct ice_hw *hw)
ice_sched_cleanup_all(hw);
ice_sched_clear_agg(hw);
+ ice_free_seg(hw);
+ ice_free_hw_tbls(hw);
if (hw->port_info) {
devm_kfree(ice_hw_to_dev(hw), hw->port_info);
@@ -881,7 +922,7 @@ void ice_deinit_hw(struct ice_hw *hw)
/* Attempt to disable FW logging before shutting down control queues */
ice_cfg_fw_log(hw, false);
- ice_shutdown_all_ctrlq(hw);
+ ice_destroy_all_ctrlq(hw);
/* Clear VSI contexts if not already cleared */
ice_clear_all_vsi_ctx(hw);
@@ -1078,6 +1119,7 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
+ ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
{ 0 }
};
@@ -1088,7 +1130,8 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
* @rxq_index: the index of the Rx queue
*
* Converts rxq context from sparse to dense structure and then writes
- * it to HW register space
+ * it to HW register space and enables the hardware to prefetch descriptors
+ * instead of only fetching them on demand
*/
enum ice_status
ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
@@ -1096,6 +1139,11 @@ ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
{
u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
+ if (!rlan_ctx)
+ return ICE_ERR_BAD_PTR;
+
+ rlan_ctx->prefena = 1;
+
ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
}
@@ -1111,6 +1159,7 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = {
ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
+ ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
@@ -1129,7 +1178,7 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = {
ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
- ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 110, 171),
+ ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
{ 0 }
};
@@ -1185,6 +1234,12 @@ ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc, void *buf,
/* FW Admin Queue command wrappers */
+/* Software lock/mutex that is meant to be held while the Global Config Lock
+ * in firmware is acquired by the software to prevent most (but not all) types
+ * of AQ commands from being sent to FW
+ */
+DEFINE_MUTEX(ice_global_cfg_lock_sw);
+
/**
* ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
* @hw: pointer to the HW struct
@@ -1199,7 +1254,38 @@ enum ice_status
ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
u16 buf_size, struct ice_sq_cd *cd)
{
- return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
+ struct ice_aqc_req_res *cmd = &desc->params.res_owner;
+ bool lock_acquired = false;
+ enum ice_status status;
+
+ /* When a package download is in process (i.e. when the firmware's
+ * Global Configuration Lock resource is held), only the Download
+ * Package, Get Version, Get Package Info List and Release Resource
+ * (with resource ID set to Global Config Lock) AdminQ commands are
+ * allowed; all others must block until the package download completes
+ * and the Global Config Lock is released. See also
+ * ice_acquire_global_cfg_lock().
+ */
+ switch (le16_to_cpu(desc->opcode)) {
+ case ice_aqc_opc_download_pkg:
+ case ice_aqc_opc_get_pkg_info_list:
+ case ice_aqc_opc_get_ver:
+ break;
+ case ice_aqc_opc_release_res:
+ if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
+ break;
+ /* fall-through */
+ default:
+ mutex_lock(&ice_global_cfg_lock_sw);
+ lock_acquired = true;
+ break;
+ }
+
+ status = ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
+ if (lock_acquired)
+ mutex_unlock(&ice_global_cfg_lock_sw);
+
+ return status;
}
/**
@@ -1237,6 +1323,43 @@ enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
}
/**
+ * ice_aq_send_driver_ver
+ * @hw: pointer to the HW struct
+ * @dv: driver's major, minor version
+ * @cd: pointer to command details structure or NULL
+ *
+ * Send the driver version (0x0002) to the firmware
+ */
+enum ice_status
+ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_driver_ver *cmd;
+ struct ice_aq_desc desc;
+ u16 len;
+
+ cmd = &desc.params.driver_ver;
+
+ if (!dv)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
+
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ cmd->major_ver = dv->major_ver;
+ cmd->minor_ver = dv->minor_ver;
+ cmd->build_ver = dv->build_ver;
+ cmd->subbuild_ver = dv->subbuild_ver;
+
+ len = 0;
+ while (len < sizeof(dv->driver_string) &&
+ isascii(dv->driver_string[len]) && dv->driver_string[len])
+ len++;
+
+ return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
+}
+
+/**
* ice_aq_q_shutdown
* @hw: pointer to the HW struct
* @unloading: is the driver unloading itself
@@ -1254,7 +1377,7 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
if (unloading)
- cmd->driver_unloading = cpu_to_le32(ICE_AQC_DRIVER_UNLOADING);
+ cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
}
@@ -1529,29 +1652,29 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
case ICE_AQC_CAPS_VALID_FUNCTIONS:
caps->valid_functions = number;
ice_debug(hw, ICE_DBG_INIT,
- "%s: valid functions = %d\n", prefix,
+ "%s: valid_functions (bitmap) = %d\n", prefix,
caps->valid_functions);
break;
case ICE_AQC_CAPS_SRIOV:
caps->sr_iov_1_1 = (number == 1);
ice_debug(hw, ICE_DBG_INIT,
- "%s: SR-IOV = %d\n", prefix,
+ "%s: sr_iov_1_1 = %d\n", prefix,
caps->sr_iov_1_1);
break;
case ICE_AQC_CAPS_VF:
if (dev_p) {
dev_p->num_vfs_exposed = number;
ice_debug(hw, ICE_DBG_INIT,
- "%s: VFs exposed = %d\n", prefix,
+ "%s: num_vfs_exposed = %d\n", prefix,
dev_p->num_vfs_exposed);
} else if (func_p) {
func_p->num_allocd_vfs = number;
func_p->vf_base_id = logical_id;
ice_debug(hw, ICE_DBG_INIT,
- "%s: VFs allocated = %d\n", prefix,
+ "%s: num_allocd_vfs = %d\n", prefix,
func_p->num_allocd_vfs);
ice_debug(hw, ICE_DBG_INIT,
- "%s: VF base_id = %d\n", prefix,
+ "%s: vf_base_id = %d\n", prefix,
func_p->vf_base_id);
}
break;
@@ -1559,63 +1682,75 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
if (dev_p) {
dev_p->num_vsi_allocd_to_host = number;
ice_debug(hw, ICE_DBG_INIT,
- "%s: num VSI alloc to host = %d\n",
+ "%s: num_vsi_allocd_to_host = %d\n",
prefix,
dev_p->num_vsi_allocd_to_host);
} else if (func_p) {
func_p->guar_num_vsi =
ice_get_num_per_func(hw, ICE_MAX_VSI);
ice_debug(hw, ICE_DBG_INIT,
- "%s: num guaranteed VSI (fw) = %d\n",
+ "%s: guar_num_vsi (fw) = %d\n",
prefix, number);
ice_debug(hw, ICE_DBG_INIT,
- "%s: num guaranteed VSI = %d\n",
+ "%s: guar_num_vsi = %d\n",
prefix, func_p->guar_num_vsi);
}
break;
+ case ICE_AQC_CAPS_DCB:
+ caps->dcb = (number == 1);
+ caps->active_tc_bitmap = logical_id;
+ caps->maxtc = phys_id;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: dcb = %d\n", prefix, caps->dcb);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: active_tc_bitmap = %d\n", prefix,
+ caps->active_tc_bitmap);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: maxtc = %d\n", prefix, caps->maxtc);
+ break;
case ICE_AQC_CAPS_RSS:
caps->rss_table_size = number;
caps->rss_table_entry_width = logical_id;
ice_debug(hw, ICE_DBG_INIT,
- "%s: RSS table size = %d\n", prefix,
+ "%s: rss_table_size = %d\n", prefix,
caps->rss_table_size);
ice_debug(hw, ICE_DBG_INIT,
- "%s: RSS table width = %d\n", prefix,
+ "%s: rss_table_entry_width = %d\n", prefix,
caps->rss_table_entry_width);
break;
case ICE_AQC_CAPS_RXQS:
caps->num_rxq = number;
caps->rxq_first_id = phys_id;
ice_debug(hw, ICE_DBG_INIT,
- "%s: num Rx queues = %d\n", prefix,
+ "%s: num_rxq = %d\n", prefix,
caps->num_rxq);
ice_debug(hw, ICE_DBG_INIT,
- "%s: Rx first queue ID = %d\n", prefix,
+ "%s: rxq_first_id = %d\n", prefix,
caps->rxq_first_id);
break;
case ICE_AQC_CAPS_TXQS:
caps->num_txq = number;
caps->txq_first_id = phys_id;
ice_debug(hw, ICE_DBG_INIT,
- "%s: num Tx queues = %d\n", prefix,
+ "%s: num_txq = %d\n", prefix,
caps->num_txq);
ice_debug(hw, ICE_DBG_INIT,
- "%s: Tx first queue ID = %d\n", prefix,
+ "%s: txq_first_id = %d\n", prefix,
caps->txq_first_id);
break;
case ICE_AQC_CAPS_MSIX:
caps->num_msix_vectors = number;
caps->msix_vector_first_id = phys_id;
ice_debug(hw, ICE_DBG_INIT,
- "%s: MSIX vector count = %d\n", prefix,
+ "%s: num_msix_vectors = %d\n", prefix,
caps->num_msix_vectors);
ice_debug(hw, ICE_DBG_INIT,
- "%s: MSIX first vector index = %d\n", prefix,
+ "%s: msix_vector_first_id = %d\n", prefix,
caps->msix_vector_first_id);
break;
case ICE_AQC_CAPS_MAX_MTU:
caps->max_mtu = number;
- ice_debug(hw, ICE_DBG_INIT, "%s: max MTU = %d\n",
+ ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
prefix, caps->max_mtu);
break;
default:
@@ -1712,6 +1847,75 @@ ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc)
}
/**
+ * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
+ * @hw: pointer to the hardware structure
+ */
+void ice_set_safe_mode_caps(struct ice_hw *hw)
+{
+ struct ice_hw_func_caps *func_caps = &hw->func_caps;
+ struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
+ u32 valid_func, rxq_first_id, txq_first_id;
+ u32 msix_vector_first_id, max_mtu;
+ u32 num_func = 0;
+ u8 i;
+
+ /* cache some func_caps values that should be restored after memset */
+ valid_func = func_caps->common_cap.valid_functions;
+ txq_first_id = func_caps->common_cap.txq_first_id;
+ rxq_first_id = func_caps->common_cap.rxq_first_id;
+ msix_vector_first_id = func_caps->common_cap.msix_vector_first_id;
+ max_mtu = func_caps->common_cap.max_mtu;
+
+ /* unset func capabilities */
+ memset(func_caps, 0, sizeof(*func_caps));
+
+ /* restore cached values */
+ func_caps->common_cap.valid_functions = valid_func;
+ func_caps->common_cap.txq_first_id = txq_first_id;
+ func_caps->common_cap.rxq_first_id = rxq_first_id;
+ func_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
+ func_caps->common_cap.max_mtu = max_mtu;
+
+ /* one Tx and one Rx queue in safe mode */
+ func_caps->common_cap.num_rxq = 1;
+ func_caps->common_cap.num_txq = 1;
+
+ /* two MSIX vectors, one for traffic and one for misc causes */
+ func_caps->common_cap.num_msix_vectors = 2;
+ func_caps->guar_num_vsi = 1;
+
+ /* cache some dev_caps values that should be restored after memset */
+ valid_func = dev_caps->common_cap.valid_functions;
+ txq_first_id = dev_caps->common_cap.txq_first_id;
+ rxq_first_id = dev_caps->common_cap.rxq_first_id;
+ msix_vector_first_id = dev_caps->common_cap.msix_vector_first_id;
+ max_mtu = dev_caps->common_cap.max_mtu;
+
+ /* unset dev capabilities */
+ memset(dev_caps, 0, sizeof(*dev_caps));
+
+ /* restore cached values */
+ dev_caps->common_cap.valid_functions = valid_func;
+ dev_caps->common_cap.txq_first_id = txq_first_id;
+ dev_caps->common_cap.rxq_first_id = rxq_first_id;
+ dev_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
+ dev_caps->common_cap.max_mtu = max_mtu;
+
+ /* valid_func is a bitmap. get number of functions */
+#define ICE_MAX_FUNCS 8
+ for (i = 0; i < ICE_MAX_FUNCS; i++)
+ if (valid_func & BIT(i))
+ num_func++;
+
+ /* one Tx and one Rx queue per function in safe mode */
+ dev_caps->common_cap.num_rxq = num_func;
+ dev_caps->common_cap.num_txq = num_func;
+
+ /* two MSIX vectors per function */
+ dev_caps->common_cap.num_msix_vectors = 2 * num_func;
+}
+
+/**
* ice_get_caps - get info about the HW
* @hw: pointer to the hardware structure
*/
@@ -1993,6 +2197,17 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
desc.params.set_phy.lport_num = lport;
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
+ (unsigned long long)le64_to_cpu(cfg->phy_type_low));
+ ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
+ (unsigned long long)le64_to_cpu(cfg->phy_type_high));
+ ice_debug(hw, ICE_DBG_LINK, "caps = 0x%x\n", cfg->caps);
+ ice_debug(hw, ICE_DBG_LINK, "low_power_ctrl = 0x%x\n",
+ cfg->low_power_ctrl);
+ ice_debug(hw, ICE_DBG_LINK, "eee_cap = 0x%x\n", cfg->eee_cap);
+ ice_debug(hw, ICE_DBG_LINK, "eeer_value = 0x%x\n", cfg->eeer_value);
+ ice_debug(hw, ICE_DBG_LINK, "link_fec_opt = 0x%x\n", cfg->link_fec_opt);
+
return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
}
@@ -2024,7 +2239,7 @@ enum ice_status ice_update_link_info(struct ice_port_info *pi)
if (!pcaps)
return ICE_ERR_NO_MEMORY;
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG,
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
pcaps, NULL);
if (!status)
memcpy(li->module_type, &pcaps->module_type,
@@ -2174,27 +2389,24 @@ ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec)
{
switch (fec) {
case ICE_FEC_BASER:
- /* Clear auto FEC and RS bits, and AND BASE-R ability
+ /* Clear RS bits, and AND BASE-R ability
* bits and OR request bits.
*/
- cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC;
cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
ICE_AQC_PHY_FEC_25G_KR_REQ;
break;
case ICE_FEC_RS:
- /* Clear auto FEC and BASE-R bits, and AND RS ability
+ /* Clear BASE-R bits, and AND RS ability
* bits and OR request bits.
*/
- cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC;
cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
ICE_AQC_PHY_FEC_25G_RS_544_REQ;
break;
case ICE_FEC_NONE:
- /* Clear auto FEC and all FEC option bits. */
- cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC;
+ /* Clear all FEC option bits. */
cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
break;
case ICE_FEC_AUTO:
@@ -3240,40 +3452,44 @@ void ice_replay_post(struct ice_hw *hw)
/**
* ice_stat_update40 - read 40 bit stat from the chip and update stat values
* @hw: ptr to the hardware info
- * @hireg: high 32 bit HW register to read from
- * @loreg: low 32 bit HW register to read from
+ * @reg: offset of 64 bit HW register to read from
* @prev_stat_loaded: bool to specify if previous stats are loaded
* @prev_stat: ptr to previous loaded stat value
* @cur_stat: ptr to current stat value
*/
void
-ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
- bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat)
+ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
+ u64 *prev_stat, u64 *cur_stat)
{
- u64 new_data;
-
- new_data = rd32(hw, loreg);
- new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
+ u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
/* device stats are not reset at PFR, they likely will not be zeroed
- * when the driver starts. So save the first values read and use them as
- * offsets to be subtracted from the raw values in order to report stats
- * that count from zero.
+ * when the driver starts. Thus, save the value from the first read
+ * without adding to the statistic value so that we report stats which
+ * count up from zero.
*/
- if (!prev_stat_loaded)
+ if (!prev_stat_loaded) {
*prev_stat = new_data;
+ return;
+ }
+
+ /* Calculate the difference between the new and old values, and then
+ * add it to the software stat value.
+ */
if (new_data >= *prev_stat)
- *cur_stat = new_data - *prev_stat;
+ *cur_stat += new_data - *prev_stat;
else
/* to manage the potential roll-over */
- *cur_stat = (new_data + BIT_ULL(40)) - *prev_stat;
- *cur_stat &= 0xFFFFFFFFFFULL;
+ *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
+
+ /* Update the previously stored value to prepare for next read */
+ *prev_stat = new_data;
}
/**
* ice_stat_update32 - read 32 bit stat from the chip and update stat values
* @hw: ptr to the hardware info
- * @reg: HW register to read from
+ * @reg: offset of HW register to read from
* @prev_stat_loaded: bool to specify if previous stats are loaded
* @prev_stat: ptr to previous loaded stat value
* @cur_stat: ptr to current stat value
@@ -3287,17 +3503,26 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
new_data = rd32(hw, reg);
/* device stats are not reset at PFR, they likely will not be zeroed
- * when the driver starts. So save the first values read and use them as
- * offsets to be subtracted from the raw values in order to report stats
- * that count from zero.
+ * when the driver starts. Thus, save the value from the first read
+ * without adding to the statistic value so that we report stats which
+ * count up from zero.
*/
- if (!prev_stat_loaded)
+ if (!prev_stat_loaded) {
*prev_stat = new_data;
+ return;
+ }
+
+ /* Calculate the difference between the new and old values, and then
+ * add it to the software stat value.
+ */
if (new_data >= *prev_stat)
- *cur_stat = new_data - *prev_stat;
+ *cur_stat += new_data - *prev_stat;
else
/* to manage the potential roll-over */
- *cur_stat = (new_data + BIT_ULL(32)) - *prev_stat;
+ *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
+
+ /* Update the previously stored value to prepare for next read */
+ *prev_stat = new_data;
}
/**