diff options
-rw-r--r-- | Documentation/networking/devlink/bnxt.rst | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt.c | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt.h | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c | 9 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c | 95 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 85 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h | 143 |
8 files changed, 279 insertions, 66 deletions
diff --git a/Documentation/networking/devlink/bnxt.rst b/Documentation/networking/devlink/bnxt.rst index a4fb27663cd6..9a8b3d76d11f 100644 --- a/Documentation/networking/devlink/bnxt.rst +++ b/Documentation/networking/devlink/bnxt.rst @@ -24,6 +24,8 @@ Parameters - Permanent * - ``enable_remote_dev_reset`` - Runtime + * - ``enable_roce`` + - Permanent The ``bnxt`` driver also implements the following driver-specific parameters. diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 869bdde09c73..08a41e9c25ff 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -12309,6 +12309,7 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) struct hwrm_func_drv_if_change_input *req; bool fw_reset = !bp->irq_tbl; bool resc_reinit = false; + bool caps_change = false; int rc, retry = 0; u32 flags = 0; @@ -12364,8 +12365,11 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) set_bit(BNXT_STATE_ABORT_ERR, &bp->state); return -ENODEV; } - if (resc_reinit || fw_reset) { - if (fw_reset) { + if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE) + caps_change = true; + + if (resc_reinit || fw_reset || caps_change) { + if (fw_reset || caps_change) { set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) bnxt_ulp_irq_stop(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index e85b5ce94f58..e93ba0e4f087 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -2697,6 +2697,7 @@ struct bnxt { #define BNXT_DUMP_LIVE 0 #define BNXT_DUMP_CRASH 1 #define BNXT_DUMP_DRIVER 2 +#define BNXT_DUMP_LIVE_WITH_CTX_L1_CACHE 3 struct bpf_prog *xdp_prog; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c index 7236d8e548ab..5576e7cf8463 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c @@ -159,8 +159,8 @@ static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp, return rc; } -static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id, - u16 segment_id) +static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 dump_type, + u16 component_id, u16 segment_id) { struct hwrm_dbg_coredump_initiate_input *req; int rc; @@ -172,6 +172,8 @@ static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id, hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout); req->component_id = cpu_to_le16(component_id); req->segment_id = cpu_to_le16(segment_id); + if (dump_type == BNXT_DUMP_LIVE_WITH_CTX_L1_CACHE) + req->seg_flags = DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_COLLECT_CTX_L1_CACHE; return hwrm_req_send(bp, req); } @@ -450,7 +452,8 @@ static int __bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, start = jiffies; - rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id); + rc = bnxt_hwrm_dbg_coredump_initiate(bp, dump_type, comp_id, + seg_id); if (rc) { netdev_err(bp->dev, "Failed to initiate coredump for seg = %d\n", diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index f8fcc8e0e8de..777880594a04 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -679,6 +679,8 @@ static const struct bnxt_dl_nvm_param nvm_params[] = { NVM_OFF_MSIX_VEC_PER_PF_MAX, BNXT_NVM_SHARED_CFG, 10, 4}, {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN, NVM_OFF_MSIX_VEC_PER_PF_MIN, BNXT_NVM_SHARED_CFG, 7, 4}, + {DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, NVM_OFF_SUPPORT_RDMA, + BNXT_NVM_FUNC_CFG, 1, 1}, {BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, NVM_OFF_DIS_GRE_VER_CHECK, BNXT_NVM_SHARED_CFG, 1, 1}, }; @@ -1023,37 +1025,19 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req, } -static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, - union devlink_param_value *val) +static int __bnxt_hwrm_nvm_req(struct bnxt *bp, + const struct bnxt_dl_nvm_param *nvm, void *msg, + union devlink_param_value *val) { struct hwrm_nvm_get_variable_input *req = msg; - struct bnxt_dl_nvm_param nvm_param; struct hwrm_err_output *resp; union bnxt_nvm_data *data; dma_addr_t data_dma_addr; - int idx = 0, rc, i; - - /* Get/Set NVM CFG parameter is supported only on PFs */ - if (BNXT_VF(bp)) { - hwrm_req_drop(bp, req); - return -EPERM; - } - - for (i = 0; i < ARRAY_SIZE(nvm_params); i++) { - if (nvm_params[i].id == param_id) { - nvm_param = nvm_params[i]; - break; - } - } + int idx = 0, rc; - if (i == ARRAY_SIZE(nvm_params)) { - hwrm_req_drop(bp, req); - return -EOPNOTSUPP; - } - - if (nvm_param.dir_type == BNXT_NVM_PORT_CFG) + if (nvm->dir_type == BNXT_NVM_PORT_CFG) idx = bp->pf.port_id; - else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG) + else if (nvm->dir_type == BNXT_NVM_FUNC_CFG) idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID; data = hwrm_req_dma_slice(bp, req, sizeof(*data), &data_dma_addr); @@ -1064,23 +1048,23 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, } req->dest_data_addr = cpu_to_le64(data_dma_addr); - req->data_len = cpu_to_le16(nvm_param.nvm_num_bits); - req->option_num = cpu_to_le16(nvm_param.offset); + req->data_len = cpu_to_le16(nvm->nvm_num_bits); + req->option_num = cpu_to_le16(nvm->offset); req->index_0 = cpu_to_le16(idx); if (idx) req->dimensions = cpu_to_le16(1); resp = hwrm_req_hold(bp, req); if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) { - bnxt_copy_to_nvm_data(data, val, nvm_param.nvm_num_bits, - nvm_param.dl_num_bytes); + bnxt_copy_to_nvm_data(data, val, nvm->nvm_num_bits, + nvm->dl_num_bytes); rc = hwrm_req_send(bp, msg); } else { rc = hwrm_req_send_silent(bp, msg); if (!rc) { bnxt_copy_from_nvm_data(val, data, - nvm_param.nvm_num_bits, - nvm_param.dl_num_bytes); + nvm->nvm_num_bits, + nvm->dl_num_bytes); } else { if (resp->cmd_err == NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST) @@ -1093,6 +1077,27 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, return rc; } +static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, + union devlink_param_value *val) +{ + struct hwrm_nvm_get_variable_input *req = msg; + const struct bnxt_dl_nvm_param *nvm_param; + int i; + + /* Get/Set NVM CFG parameter is supported only on PFs */ + if (BNXT_VF(bp)) { + hwrm_req_drop(bp, req); + return -EPERM; + } + + for (i = 0; i < ARRAY_SIZE(nvm_params); i++) { + nvm_param = &nvm_params[i]; + if (nvm_param->id == param_id) + return __bnxt_hwrm_nvm_req(bp, nvm_param, msg, val); + } + return -EOPNOTSUPP; +} + static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id, struct devlink_param_gset_ctx *ctx) { @@ -1129,6 +1134,32 @@ static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id, return bnxt_hwrm_nvm_req(bp, id, req, &ctx->val); } +static int bnxt_dl_roce_validate(struct devlink *dl, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + const struct bnxt_dl_nvm_param nvm_roce_cap = {0, NVM_OFF_RDMA_CAPABLE, + BNXT_NVM_SHARED_CFG, 1, 1}; + struct bnxt *bp = bnxt_get_bp_from_dl(dl); + struct hwrm_nvm_get_variable_input *req; + union devlink_param_value roce_cap; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_NVM_GET_VARIABLE); + if (rc) + return rc; + + if (__bnxt_hwrm_nvm_req(bp, &nvm_roce_cap, req, &roce_cap)) { + NL_SET_ERR_MSG_MOD(extack, "Unable to verify if device is RDMA Capable"); + return -EINVAL; + } + if (!roce_cap.vbool) { + NL_SET_ERR_MSG_MOD(extack, "Device does not support RDMA"); + return -EINVAL; + } + return 0; +} + static int bnxt_dl_msix_validate(struct devlink *dl, u32 id, union devlink_param_value val, struct netlink_ext_ack *extack) @@ -1193,6 +1224,10 @@ static const struct devlink_param bnxt_dl_params[] = { BIT(DEVLINK_PARAM_CMODE_PERMANENT), bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, bnxt_dl_msix_validate), + DEVLINK_PARAM_GENERIC(ENABLE_ROCE, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, + bnxt_dl_roce_validate), DEVLINK_PARAM_DRIVER(BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, "gre_ver_check", DEVLINK_PARAM_TYPE_BOOL, BIT(DEVLINK_PARAM_CMODE_PERMANENT), diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h index b8105065367b..7f45dcd7b287 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h @@ -41,8 +41,10 @@ static inline void bnxt_dl_set_remote_reset(struct devlink *dl, bool value) #define NVM_OFF_MSIX_VEC_PER_PF_MAX 108 #define NVM_OFF_MSIX_VEC_PER_PF_MIN 114 #define NVM_OFF_IGNORE_ARI 164 +#define NVM_OFF_RDMA_CAPABLE 161 #define NVM_OFF_DIS_GRE_VER_CHECK 171 #define NVM_OFF_ENABLE_SRIOV 401 +#define NVM_OFF_SUPPORT_RDMA 506 #define NVM_OFF_NVM_CFG_VER 602 #define BNXT_NVM_CFG_VER_BITS 8 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 9c5820839514..48dd5922e4dd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -4541,16 +4541,16 @@ static int bnxt_get_module_status(struct bnxt *bp, struct netlink_ext_ack *extac return -EINVAL; } -static int bnxt_get_module_eeprom_by_page(struct net_device *dev, - const struct ethtool_module_eeprom *page_data, - struct netlink_ext_ack *extack) +static int +bnxt_mod_eeprom_by_page_precheck(struct bnxt *bp, + const struct ethtool_module_eeprom *page_data, + struct netlink_ext_ack *extack) { - struct bnxt *bp = netdev_priv(dev); int rc; if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { NL_SET_ERR_MSG_MOD(extack, - "Module read not permitted on untrusted VF"); + "Module read/write not permitted on untrusted VF"); return -EPERM; } @@ -4567,6 +4567,19 @@ static int bnxt_get_module_eeprom_by_page(struct net_device *dev, NL_SET_ERR_MSG_MOD(extack, "Firmware not capable for bank selection"); return -EINVAL; } + return 0; +} + +static int bnxt_get_module_eeprom_by_page(struct net_device *dev, + const struct ethtool_module_eeprom *page_data, + struct netlink_ext_ack *extack) +{ + struct bnxt *bp = netdev_priv(dev); + int rc; + + rc = bnxt_mod_eeprom_by_page_precheck(bp, page_data, extack); + if (rc) + return rc; rc = bnxt_read_sfp_module_eeprom_info(bp, page_data->i2c_address << 1, page_data->page, page_data->bank, @@ -4580,6 +4593,62 @@ static int bnxt_get_module_eeprom_by_page(struct net_device *dev, return page_data->length; } +static int bnxt_write_sfp_module_eeprom_info(struct bnxt *bp, + const struct ethtool_module_eeprom *page) +{ + struct hwrm_port_phy_i2c_write_input *req; + int bytes_written = 0; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_WRITE); + if (rc) + return rc; + + hwrm_req_hold(bp, req); + req->i2c_slave_addr = page->i2c_address << 1; + req->page_number = cpu_to_le16(page->page); + req->bank_number = page->bank; + req->port_id = cpu_to_le16(bp->pf.port_id); + req->enables = cpu_to_le32(PORT_PHY_I2C_WRITE_REQ_ENABLES_PAGE_OFFSET | + PORT_PHY_I2C_WRITE_REQ_ENABLES_BANK_NUMBER); + + while (bytes_written < page->length) { + u16 xfer_size; + + xfer_size = min_t(u16, page->length - bytes_written, + BNXT_MAX_PHY_I2C_RESP_SIZE); + req->page_offset = cpu_to_le16(page->offset + bytes_written); + req->data_length = xfer_size; + memcpy(req->data, page->data + bytes_written, xfer_size); + rc = hwrm_req_send(bp, req); + if (rc) + break; + bytes_written += xfer_size; + } + + hwrm_req_drop(bp, req); + return rc; +} + +static int bnxt_set_module_eeprom_by_page(struct net_device *dev, + const struct ethtool_module_eeprom *page_data, + struct netlink_ext_ack *extack) +{ + struct bnxt *bp = netdev_priv(dev); + int rc; + + rc = bnxt_mod_eeprom_by_page_precheck(bp, page_data, extack); + if (rc) + return rc; + + rc = bnxt_write_sfp_module_eeprom_info(bp, page_data); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, "Module`s eeprom write failed"); + return rc; + } + return page_data->length; +} + static int bnxt_nway_reset(struct net_device *dev) { int rc = 0; @@ -5077,8 +5146,9 @@ static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump) { struct bnxt *bp = netdev_priv(dev); - if (dump->flag > BNXT_DUMP_DRIVER) { - netdev_info(dev, "Supports only Live(0), Crash(1), Driver(2) dumps.\n"); + if (dump->flag > BNXT_DUMP_LIVE_WITH_CTX_L1_CACHE) { + netdev_info(dev, + "Supports only Live(0), Crash(1), Driver(2), Live with cached context(3) dumps.\n"); return -EINVAL; } @@ -5441,6 +5511,7 @@ const struct ethtool_ops bnxt_ethtool_ops = { .get_module_info = bnxt_get_module_info, .get_module_eeprom = bnxt_get_module_eeprom, .get_module_eeprom_by_page = bnxt_get_module_eeprom_by_page, + .set_module_eeprom_by_page = bnxt_set_module_eeprom_by_page, .nway_reset = bnxt_nway_reset, .set_phys_id = bnxt_set_phys_id, .self_test = bnxt_self_test, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 5f8de1634378..549231703bce 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -2,7 +2,7 @@ * * Copyright (c) 2014-2016 Broadcom Corporation * Copyright (c) 2014-2018 Broadcom Limited - * Copyright (c) 2018-2024 Broadcom Inc. + * Copyright (c) 2018-2025 Broadcom Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -438,6 +438,7 @@ struct cmd_nums { #define HWRM_MFG_PRVSN_EXPORT_CERT 0x219UL #define HWRM_STAT_DB_ERROR_QSTATS 0x21aUL #define HWRM_MFG_TESTS 0x21bUL + #define HWRM_MFG_WRITE_CERT_NVM 0x21cUL #define HWRM_PORT_POE_CFG 0x230UL #define HWRM_PORT_POE_QCFG 0x231UL #define HWRM_UDCC_QCAPS 0x258UL @@ -514,6 +515,8 @@ struct cmd_nums { #define HWRM_TFC_TBL_SCOPE_CONFIG_GET 0x39aUL #define HWRM_TFC_RESC_USAGE_QUERY 0x39bUL #define HWRM_TFC_GLOBAL_ID_FREE 0x39cUL + #define HWRM_TFC_TCAM_PRI_UPDATE 0x39dUL + #define HWRM_TFC_HOT_UPGRADE_PROCESS 0x3a0UL #define HWRM_SV 0x400UL #define HWRM_DBG_SERDES_TEST 0xff0eUL #define HWRM_DBG_LOG_BUFFER_FLUSH 0xff0fUL @@ -629,8 +632,8 @@ struct hwrm_err_output { #define HWRM_VERSION_MAJOR 1 #define HWRM_VERSION_MINOR 10 #define HWRM_VERSION_UPDATE 3 -#define HWRM_VERSION_RSVD 85 -#define HWRM_VERSION_STR "1.10.3.85" +#define HWRM_VERSION_RSVD 97 +#define HWRM_VERSION_STR "1.10.3.97" /* hwrm_ver_get_input (size:192b/24B) */ struct hwrm_ver_get_input { @@ -1905,11 +1908,15 @@ struct hwrm_func_qcaps_output { __le32 roce_vf_max_srq; __le32 roce_vf_max_gid; __le32 flags_ext3; - #define FUNC_QCAPS_RESP_FLAGS_EXT3_RM_RSV_WHILE_ALLOC_CAP 0x1UL - #define FUNC_QCAPS_RESP_FLAGS_EXT3_REQUIRE_L2_FILTER 0x2UL - #define FUNC_QCAPS_RESP_FLAGS_EXT3_MAX_ROCE_VFS_SUPPORTED 0x4UL + #define FUNC_QCAPS_RESP_FLAGS_EXT3_RM_RSV_WHILE_ALLOC_CAP 0x1UL + #define FUNC_QCAPS_RESP_FLAGS_EXT3_REQUIRE_L2_FILTER 0x2UL + #define FUNC_QCAPS_RESP_FLAGS_EXT3_MAX_ROCE_VFS_SUPPORTED 0x4UL + #define FUNC_QCAPS_RESP_FLAGS_EXT3_RX_RATE_PROFILE_SEL_SUPPORTED 0x8UL + #define FUNC_QCAPS_RESP_FLAGS_EXT3_BIDI_OPT_SUPPORTED 0x10UL + #define FUNC_QCAPS_RESP_FLAGS_EXT3_MIRROR_ON_ROCE_SUPPORTED 0x20UL __le16 max_roce_vfs; - u8 unused_3[5]; + __le16 max_crypto_rx_flow_filters; + u8 unused_3[3]; u8 valid; }; @@ -1924,7 +1931,7 @@ struct hwrm_func_qcfg_input { u8 unused_0[6]; }; -/* hwrm_func_qcfg_output (size:1280b/160B) */ +/* hwrm_func_qcfg_output (size:1344b/168B) */ struct hwrm_func_qcfg_output { __le16 error_code; __le16 req_type; @@ -2087,14 +2094,18 @@ struct hwrm_func_qcfg_output { __le16 host_mtu; __le16 flags2; #define FUNC_QCFG_RESP_FLAGS2_SRIOV_DSCP_INSERT_ENABLED 0x1UL - u8 unused_4[2]; + __le16 stag_vid; u8 port_kdnet_mode; #define FUNC_QCFG_RESP_PORT_KDNET_MODE_DISABLED 0x0UL #define FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED 0x1UL #define FUNC_QCFG_RESP_PORT_KDNET_MODE_LAST FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED u8 kdnet_pcie_function; __le16 port_kdnet_fid; - u8 unused_5[2]; + u8 unused_5; + u8 roce_bidi_opt_mode; + #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DISABLED 0x1UL + #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DEDICATED 0x2UL + #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_SHARED 0x4UL __le32 num_ktls_tx_key_ctxs; __le32 num_ktls_rx_key_ctxs; u8 lag_id; @@ -2112,7 +2123,8 @@ struct hwrm_func_qcfg_output { __le16 xid_partition_cfg; #define FUNC_QCFG_RESP_XID_PARTITION_CFG_TX_CK 0x1UL #define FUNC_QCFG_RESP_XID_PARTITION_CFG_RX_CK 0x2UL - u8 unused_7; + __le16 mirror_vnic_id; + u8 unused_7[7]; u8 valid; }; @@ -3965,7 +3977,7 @@ struct ts_split_entries { __le32 region_num_entries; u8 tsid; u8 lkup_static_bkt_cnt_exp[2]; - u8 rsvd; + u8 locked; __le32 rsvd2[2]; }; @@ -5483,6 +5495,37 @@ struct hwrm_port_phy_qcaps_output { u8 valid; }; +/* hwrm_port_phy_i2c_write_input (size:832b/104B) */ +struct hwrm_port_phy_i2c_write_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + __le32 enables; + #define PORT_PHY_I2C_WRITE_REQ_ENABLES_PAGE_OFFSET 0x1UL + #define PORT_PHY_I2C_WRITE_REQ_ENABLES_BANK_NUMBER 0x2UL + __le16 port_id; + u8 i2c_slave_addr; + u8 bank_number; + __le16 page_number; + __le16 page_offset; + u8 data_length; + u8 unused_1[7]; + __le32 data[16]; +}; + +/* hwrm_port_phy_i2c_write_output (size:128b/16B) */ +struct hwrm_port_phy_i2c_write_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + /* hwrm_port_phy_i2c_read_input (size:320b/40B) */ struct hwrm_port_phy_i2c_read_input { __le16 req_type; @@ -6610,8 +6653,9 @@ struct hwrm_vnic_alloc_input { __le32 flags; #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL #define VNIC_ALLOC_REQ_FLAGS_VIRTIO_NET_FID_VALID 0x2UL + #define VNIC_ALLOC_REQ_FLAGS_VNIC_ID_VALID 0x4UL __le16 virtio_net_fid; - u8 unused_0[2]; + __le16 vnic_id; }; /* hwrm_vnic_alloc_output (size:128b/16B) */ @@ -6710,6 +6754,7 @@ struct hwrm_vnic_cfg_input { #define VNIC_CFG_REQ_ENABLES_QUEUE_ID 0x80UL #define VNIC_CFG_REQ_ENABLES_RX_CSUM_V2_MODE 0x100UL #define VNIC_CFG_REQ_ENABLES_L2_CQE_MODE 0x200UL + #define VNIC_CFG_REQ_ENABLES_RAW_QP_ID 0x400UL __le16 vnic_id; __le16 dflt_ring_grp; __le16 rss_rule; @@ -6729,7 +6774,7 @@ struct hwrm_vnic_cfg_input { #define VNIC_CFG_REQ_L2_CQE_MODE_COMPRESSED 0x1UL #define VNIC_CFG_REQ_L2_CQE_MODE_MIXED 0x2UL #define VNIC_CFG_REQ_L2_CQE_MODE_LAST VNIC_CFG_REQ_L2_CQE_MODE_MIXED - u8 unused0[4]; + __le32 raw_qp_id; }; /* hwrm_vnic_cfg_output (size:128b/16B) */ @@ -7082,6 +7127,15 @@ struct hwrm_vnic_plcmodes_cfg_output { u8 valid; }; +/* hwrm_vnic_plcmodes_cfg_cmd_err (size:64b/8B) */ +struct hwrm_vnic_plcmodes_cfg_cmd_err { + u8 code; + #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL + #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_INVALID_HDS_THRESHOLD 0x1UL + #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_LAST VNIC_PLCMODES_CFG_CMD_ERR_CODE_INVALID_HDS_THRESHOLD + u8 unused_0[7]; +}; + /* hwrm_vnic_rss_cos_lb_ctx_alloc_input (size:128b/16B) */ struct hwrm_vnic_rss_cos_lb_ctx_alloc_input { __le16 req_type; @@ -7131,15 +7185,16 @@ struct hwrm_ring_alloc_input { __le16 target_id; __le64 resp_addr; __le32 enables; - #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL - #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL - #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL - #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL - #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL - #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL - #define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL - #define RING_ALLOC_REQ_ENABLES_MPC_CHNLS_TYPE 0x400UL - #define RING_ALLOC_REQ_ENABLES_STEERING_TAG_VALID 0x800UL + #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL + #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL + #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL + #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL + #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL + #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL + #define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL + #define RING_ALLOC_REQ_ENABLES_MPC_CHNLS_TYPE 0x400UL + #define RING_ALLOC_REQ_ENABLES_STEERING_TAG_VALID 0x800UL + #define RING_ALLOC_REQ_ENABLES_RX_RATE_PROFILE_VALID 0x1000UL u8 ring_type; #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL @@ -7226,7 +7281,11 @@ struct hwrm_ring_alloc_input { #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_RE_CFA 0x3UL #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE 0x4UL #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_LAST RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE - u8 unused_4[2]; + u8 rx_rate_profile_sel; + #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_DEFAULT 0x0UL + #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_POLL_MODE 0x1UL + #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_LAST RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_POLL_MODE + u8 unused_4; __le64 cq_handle; }; @@ -9122,6 +9181,39 @@ struct pcie_ctx_hw_stats { __le64 pcie_recovery_histogram; }; +/* pcie_ctx_hw_stats_v2 (size:4096b/512B) */ +struct pcie_ctx_hw_stats_v2 { + __le64 pcie_pl_signal_integrity; + __le64 pcie_dl_signal_integrity; + __le64 pcie_tl_signal_integrity; + __le64 pcie_link_integrity; + __le64 pcie_tx_traffic_rate; + __le64 pcie_rx_traffic_rate; + __le64 pcie_tx_dllp_statistics; + __le64 pcie_rx_dllp_statistics; + __le64 pcie_equalization_time; + __le32 pcie_ltssm_histogram[4]; + __le64 pcie_recovery_histogram; + __le32 pcie_tl_credit_nph_histogram[8]; + __le32 pcie_tl_credit_ph_histogram[8]; + __le32 pcie_tl_credit_pd_histogram[8]; + __le32 pcie_cmpl_latest_times[4]; + __le32 pcie_cmpl_longest_time; + __le32 pcie_cmpl_shortest_time; + __le32 unused_0[2]; + __le32 pcie_cmpl_latest_headers[4][4]; + __le32 pcie_cmpl_longest_headers[4][4]; + __le32 pcie_cmpl_shortest_headers[4][4]; + __le32 pcie_wr_latency_histogram[12]; + __le32 pcie_wr_latency_all_normal_count; + __le32 unused_1; + __le64 pcie_posted_packet_count; + __le64 pcie_non_posted_packet_count; + __le64 pcie_other_packet_count; + __le64 pcie_blocked_packet_count; + __le64 pcie_cmpl_packet_count; +}; + /* hwrm_stat_generic_qstats_input (size:256b/32B) */ struct hwrm_stat_generic_qstats_input { __le16 req_type; @@ -9317,6 +9409,9 @@ struct hwrm_struct_hdr { #define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_UDCC_RTT_BUCKET_BOUND __le16 len; u8 version; + #define STRUCT_HDR_VERSION_0 0x0UL + #define STRUCT_HDR_VERSION_1 0x1UL + #define STRUCT_HDR_VERSION_LAST STRUCT_HDR_VERSION_1 u8 count; __le16 subtype; __le16 next_offset; |