aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c1412
1 files changed, 499 insertions, 913 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index c2a58101144e..24f7afacae02 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -24,6 +24,7 @@
#include "hclge_err.h"
#include "hnae3.h"
#include "hclge_devlink.h"
+#include "hclge_comm_cmd.h"
#define HCLGE_NAME "hclge"
@@ -90,20 +91,20 @@ static const struct pci_device_id ae_algo_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
-static const u32 cmdq_reg_addr_list[] = {HCLGE_NIC_CSQ_BASEADDR_L_REG,
- HCLGE_NIC_CSQ_BASEADDR_H_REG,
- HCLGE_NIC_CSQ_DEPTH_REG,
- HCLGE_NIC_CSQ_TAIL_REG,
- HCLGE_NIC_CSQ_HEAD_REG,
- HCLGE_NIC_CRQ_BASEADDR_L_REG,
- HCLGE_NIC_CRQ_BASEADDR_H_REG,
- HCLGE_NIC_CRQ_DEPTH_REG,
- HCLGE_NIC_CRQ_TAIL_REG,
- HCLGE_NIC_CRQ_HEAD_REG,
- HCLGE_VECTOR0_CMDQ_SRC_REG,
- HCLGE_CMDQ_INTR_STS_REG,
- HCLGE_CMDQ_INTR_EN_REG,
- HCLGE_CMDQ_INTR_GEN_REG};
+static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG,
+ HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG,
+ HCLGE_COMM_NIC_CSQ_DEPTH_REG,
+ HCLGE_COMM_NIC_CSQ_TAIL_REG,
+ HCLGE_COMM_NIC_CSQ_HEAD_REG,
+ HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG,
+ HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG,
+ HCLGE_COMM_NIC_CRQ_DEPTH_REG,
+ HCLGE_COMM_NIC_CRQ_TAIL_REG,
+ HCLGE_COMM_NIC_CRQ_HEAD_REG,
+ HCLGE_COMM_VECTOR0_CMDQ_SRC_REG,
+ HCLGE_COMM_CMDQ_INTR_STS_REG,
+ HCLGE_COMM_CMDQ_INTR_EN_REG,
+ HCLGE_COMM_CMDQ_INTR_GEN_REG};
static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
HCLGE_PF_OTHER_INT_REG,
@@ -370,14 +371,6 @@ static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
},
};
-static const u8 hclge_hash_key[] = {
- 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
- 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
- 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
- 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
- 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
-};
-
static const u32 hclge_dfx_bd_offset_list[] = {
HCLGE_DFX_BIOS_BD_OFFSET,
HCLGE_DFX_SSU_0_BD_OFFSET,
@@ -478,6 +471,20 @@ static const struct key_info tuple_key_info[] = {
offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
};
+/**
+ * hclge_cmd_send - send command to command queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor for describing the command
+ * @num : the number of descriptors to be sent
+ *
+ * This is the main send command for command queue, it
+ * sends the queue, cleans the queue, etc
+ **/
+int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
+{
+ return hclge_comm_cmd_send(&hw->hw, desc, num);
+}
+
static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
{
#define HCLGE_MAC_CMD_NUM 21
@@ -604,111 +611,6 @@ int hclge_mac_update_stats(struct hclge_dev *hdev)
return hclge_mac_update_stats_defective(hdev);
}
-static int hclge_tqps_update_stats(struct hnae3_handle *handle)
-{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
- struct hnae3_queue *queue;
- struct hclge_desc desc[1];
- struct hclge_tqp *tqp;
- int ret, i;
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- queue = handle->kinfo.tqp[i];
- tqp = container_of(queue, struct hclge_tqp, q);
- /* command : HCLGE_OPC_QUERY_IGU_STAT */
- hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
- true);
-
- desc[0].data[0] = cpu_to_le32(tqp->index);
- ret = hclge_cmd_send(&hdev->hw, desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Query tqp stat fail, status = %d,queue = %d\n",
- ret, i);
- return ret;
- }
- tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
- le32_to_cpu(desc[0].data[1]);
- }
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- queue = handle->kinfo.tqp[i];
- tqp = container_of(queue, struct hclge_tqp, q);
- /* command : HCLGE_OPC_QUERY_IGU_STAT */
- hclge_cmd_setup_basic_desc(&desc[0],
- HCLGE_OPC_QUERY_TX_STATS,
- true);
-
- desc[0].data[0] = cpu_to_le32(tqp->index);
- ret = hclge_cmd_send(&hdev->hw, desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Query tqp stat fail, status = %d,queue = %d\n",
- ret, i);
- return ret;
- }
- tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
- le32_to_cpu(desc[0].data[1]);
- }
-
- return 0;
-}
-
-static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
-{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- struct hclge_tqp *tqp;
- u64 *buff = data;
- int i;
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
- *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
- }
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
- *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
- }
-
- return buff;
-}
-
-static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
-{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
-
- /* each tqp has TX & RX two queues */
- return kinfo->num_tqps * (2);
-}
-
-static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
-{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- u8 *buff = data;
- int i;
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
- struct hclge_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
- tqp->index);
- buff = buff + ETH_GSTRING_LEN;
- }
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
- struct hclge_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
- tqp->index);
- buff = buff + ETH_GSTRING_LEN;
- }
-
- return buff;
-}
-
static int hclge_comm_get_count(struct hclge_dev *hdev,
const struct hclge_comm_stats_str strs[],
u32 size)
@@ -769,7 +671,7 @@ static void hclge_update_stats_for_all(struct hclge_dev *hdev)
handle = &hdev->vport[0].nic;
if (handle->client) {
- status = hclge_tqps_update_stats(handle);
+ status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
if (status) {
dev_err(&hdev->pdev->dev,
"Update TQPS stats fail, status = %d.\n",
@@ -799,7 +701,7 @@ static void hclge_update_stats(struct hnae3_handle *handle,
"Update MAC stats fail, status = %d.\n",
status);
- status = hclge_tqps_update_stats(handle);
+ status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
if (status)
dev_err(&hdev->pdev->dev,
"Update TQPS stats fail, status = %d.\n",
@@ -848,7 +750,7 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
} else if (stringset == ETH_SS_STATS) {
count = hclge_comm_get_count(hdev, g_mac_stats_string,
ARRAY_SIZE(g_mac_stats_string)) +
- hclge_tqps_get_sset_count(handle, stringset);
+ hclge_comm_tqps_get_sset_count(handle);
}
return count;
@@ -866,7 +768,7 @@ static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
size = ARRAY_SIZE(g_mac_stats_string);
p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string,
size, p);
- p = hclge_tqps_get_strings(handle, p);
+ p = hclge_comm_tqps_get_strings(handle, p);
} else if (stringset == ETH_SS_TEST) {
if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
@@ -900,7 +802,7 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
p = hclge_comm_get_stats(hdev, g_mac_stats_string,
ARRAY_SIZE(g_mac_stats_string), data);
- p = hclge_tqps_get_stats(handle, p);
+ p = hclge_comm_tqps_get_stats(handle, p);
}
static void hclge_get_mac_stat(struct hnae3_handle *handle,
@@ -1480,7 +1382,7 @@ static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
- ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
+ ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
@@ -1520,7 +1422,7 @@ static void hclge_check_dev_specs(struct hclge_dev *hdev)
if (!dev_specs->rss_ind_tbl_size)
dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
if (!dev_specs->rss_key_size)
- dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
+ dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
if (!dev_specs->max_tm_rate)
dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
if (!dev_specs->max_qset_num)
@@ -1567,7 +1469,7 @@ static int hclge_query_dev_specs(struct hclge_dev *hdev)
for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
true);
- desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
}
hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
@@ -1613,12 +1515,39 @@ static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
}
+static void hclge_init_tc_config(struct hclge_dev *hdev)
+{
+ unsigned int i;
+
+ if (hdev->tc_max > HNAE3_MAX_TC ||
+ hdev->tc_max < 1) {
+ dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
+ hdev->tc_max);
+ hdev->tc_max = 1;
+ }
+
+ /* Dev does not support DCB */
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ hdev->tc_max = 1;
+ hdev->pfc_max = 0;
+ } else {
+ hdev->pfc_max = hdev->tc_max;
+ }
+
+ hdev->tm_info.num_tc = 1;
+
+ /* Currently not support uncontiuous tc */
+ for (i = 0; i < hdev->tm_info.num_tc; i++)
+ hnae3_set_bit(hdev->hw_tc_map, i, 1);
+
+ hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
+}
+
static int hclge_configure(struct hclge_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
const struct cpumask *cpumask = cpu_online_mask;
struct hclge_cfg cfg;
- unsigned int i;
int node, ret;
ret = hclge_get_cfg(hdev, &cfg);
@@ -1662,29 +1591,7 @@ static int hclge_configure(struct hclge_dev *hdev)
hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
- if ((hdev->tc_max > HNAE3_MAX_TC) ||
- (hdev->tc_max < 1)) {
- dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
- hdev->tc_max);
- hdev->tc_max = 1;
- }
-
- /* Dev does not support DCB */
- if (!hnae3_dev_dcb_supported(hdev)) {
- hdev->tc_max = 1;
- hdev->pfc_max = 0;
- } else {
- hdev->pfc_max = hdev->tc_max;
- }
-
- hdev->tm_info.num_tc = 1;
-
- /* Currently not support uncontiuous tc */
- for (i = 0; i < hdev->tm_info.num_tc; i++)
- hnae3_set_bit(hdev->hw_tc_map, i, 1);
-
- hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
-
+ hclge_init_tc_config(hdev);
hclge_init_kdump_kernel_config(hdev);
/* Set the affinity based on numa node */
@@ -1736,11 +1643,11 @@ static int hclge_config_gro(struct hclge_dev *hdev)
static int hclge_alloc_tqps(struct hclge_dev *hdev)
{
- struct hclge_tqp *tqp;
+ struct hclge_comm_tqp *tqp;
int i;
hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
- sizeof(struct hclge_tqp), GFP_KERNEL);
+ sizeof(struct hclge_comm_tqp), GFP_KERNEL);
if (!hdev->htqp)
return -ENOMEM;
@@ -1759,11 +1666,11 @@ static int hclge_alloc_tqps(struct hclge_dev *hdev)
* HCLGE_TQP_MAX_SIZE_DEV_V2
*/
if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
- tqp->q.io_base = hdev->hw.io_base +
+ tqp->q.io_base = hdev->hw.hw.io_base +
HCLGE_TQP_REG_OFFSET +
i * HCLGE_TQP_REG_SIZE;
else
- tqp->q.io_base = hdev->hw.io_base +
+ tqp->q.io_base = hdev->hw.hw.io_base +
HCLGE_TQP_REG_OFFSET +
HCLGE_TQP_EXT_REG_OFFSET +
(i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
@@ -1864,8 +1771,8 @@ static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
kinfo = &nic->kinfo;
for (i = 0; i < vport->alloc_tqps; i++) {
- struct hclge_tqp *q =
- container_of(kinfo->tqp[i], struct hclge_tqp, q);
+ struct hclge_comm_tqp *q =
+ container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
bool is_pf;
int ret;
@@ -1885,7 +1792,7 @@ static int hclge_map_tqp(struct hclge_dev *hdev)
u16 i, num_vport;
num_vport = hdev->num_req_vfs + 1;
- for (i = 0; i < num_vport; i++) {
+ for (i = 0; i < num_vport; i++) {
int ret;
ret = hclge_map_tqp_to_vport(hdev, vport);
@@ -1907,7 +1814,7 @@ static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
nic->pdev = hdev->pdev;
nic->ae_algo = &ae_algo;
nic->numa_node_mask = hdev->numa_node_mask;
- nic->kinfo.io_base = hdev->hw.io_base;
+ nic->kinfo.io_base = hdev->hw.hw.io_base;
ret = hclge_knic_setup(vport, num_tqps,
hdev->num_tx_desc, hdev->num_rx_desc);
@@ -2416,9 +2323,9 @@ static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
/* The first descriptor set the NEXT bit to 1 */
if (i == 0)
- desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
else
- desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
@@ -2461,9 +2368,9 @@ static int hclge_common_thrd_config(struct hclge_dev *hdev,
/* The first descriptor set the NEXT bit to 1 */
if (i == 0)
- desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
else
- desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
@@ -2592,8 +2499,8 @@ static int hclge_init_roce_base_info(struct hclge_vport *vport)
roce->rinfo.base_vector = hdev->num_nic_msi;
roce->rinfo.netdev = nic->kinfo.netdev;
- roce->rinfo.roce_io_base = hdev->hw.io_base;
- roce->rinfo.roce_mem_base = hdev->hw.mem_base;
+ roce->rinfo.roce_io_base = hdev->hw.hw.io_base;
+ roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base;
roce->pdev = nic->pdev;
roce->ae_algo = nic->ae_algo;
@@ -2653,11 +2560,38 @@ static u8 hclge_check_speed_dup(u8 duplex, int speed)
return duplex;
}
+static struct hclge_mac_speed_map hclge_mac_speed_map_to_fw[] = {
+ {HCLGE_MAC_SPEED_10M, HCLGE_FW_MAC_SPEED_10M},
+ {HCLGE_MAC_SPEED_100M, HCLGE_FW_MAC_SPEED_100M},
+ {HCLGE_MAC_SPEED_1G, HCLGE_FW_MAC_SPEED_1G},
+ {HCLGE_MAC_SPEED_10G, HCLGE_FW_MAC_SPEED_10G},
+ {HCLGE_MAC_SPEED_25G, HCLGE_FW_MAC_SPEED_25G},
+ {HCLGE_MAC_SPEED_40G, HCLGE_FW_MAC_SPEED_40G},
+ {HCLGE_MAC_SPEED_50G, HCLGE_FW_MAC_SPEED_50G},
+ {HCLGE_MAC_SPEED_100G, HCLGE_FW_MAC_SPEED_100G},
+ {HCLGE_MAC_SPEED_200G, HCLGE_FW_MAC_SPEED_200G},
+};
+
+static int hclge_convert_to_fw_speed(u32 speed_drv, u32 *speed_fw)
+{
+ u16 i;
+
+ for (i = 0; i < ARRAY_SIZE(hclge_mac_speed_map_to_fw); i++) {
+ if (hclge_mac_speed_map_to_fw[i].speed_drv == speed_drv) {
+ *speed_fw = hclge_mac_speed_map_to_fw[i].speed_fw;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
u8 duplex)
{
struct hclge_config_mac_speed_dup_cmd *req;
struct hclge_desc desc;
+ u32 speed_fw;
int ret;
req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
@@ -2667,48 +2601,14 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
if (duplex)
hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
- switch (speed) {
- case HCLGE_MAC_SPEED_10M:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10M);
- break;
- case HCLGE_MAC_SPEED_100M:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100M);
- break;
- case HCLGE_MAC_SPEED_1G:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_1G);
- break;
- case HCLGE_MAC_SPEED_10G:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10G);
- break;
- case HCLGE_MAC_SPEED_25G:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_25G);
- break;
- case HCLGE_MAC_SPEED_40G:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_40G);
- break;
- case HCLGE_MAC_SPEED_50G:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_50G);
- break;
- case HCLGE_MAC_SPEED_100G:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100G);
- break;
- case HCLGE_MAC_SPEED_200G:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_200G);
- break;
- default:
+ ret = hclge_convert_to_fw_speed(speed, &speed_fw);
+ if (ret) {
dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
- return -EINVAL;
+ return ret;
}
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, HCLGE_CFG_SPEED_S,
+ speed_fw);
hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
1);
@@ -2933,16 +2833,20 @@ static int hclge_mac_init(struct hclge_dev *hdev)
static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
- !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
+ !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) {
+ hdev->last_mbx_scheduled = jiffies;
mod_delayed_work(hclge_wq, &hdev->service_task, 0);
+ }
}
static void hclge_reset_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) &&
- !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
+ !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) {
+ hdev->last_rst_scheduled = jiffies;
mod_delayed_work(hclge_wq, &hdev->service_task, 0);
+ }
}
static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
@@ -3237,7 +3141,7 @@ static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
true);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
true);
@@ -3294,7 +3198,7 @@ hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
false);
@@ -3501,7 +3405,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
hdev->rst_stats.imp_rst_cnt++;
return HCLGE_VECTOR0_EVENT_RST;
@@ -3509,7 +3413,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
dev_info(&hdev->pdev->dev, "global reset interrupt\n");
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
hdev->rst_stats.global_rst_cnt++;
@@ -3643,7 +3547,7 @@ static void hclge_get_misc_vector(struct hclge_dev *hdev)
vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
- vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
+ vector->addr = hdev->hw.hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
hdev->vector_status[0] = 0;
hdev->num_msi_left -= 1;
@@ -3827,10 +3731,17 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
static void hclge_mailbox_service_task(struct hclge_dev *hdev)
{
if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
- test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
+ test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state) ||
test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
return;
+ if (time_is_before_jiffies(hdev->last_mbx_scheduled +
+ HCLGE_MBX_SCHED_TIMEOUT))
+ dev_warn(&hdev->pdev->dev,
+ "mbx service task is scheduled after %ums on cpu%u!\n",
+ jiffies_to_msecs(jiffies - hdev->last_mbx_scheduled),
+ smp_processor_id());
+
hclge_mbx_handler(hdev);
clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
@@ -3865,7 +3776,7 @@ static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
return;
}
msleep(HCLGE_PF_RESET_SYNC_TIME);
- hclge_cmd_reuse_desc(&desc, true);
+ hclge_comm_cmd_reuse_desc(&desc, true);
} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
@@ -4022,13 +3933,13 @@ static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
{
u32 reg_val;
- reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
+ reg_val = hclge_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG);
if (enable)
- reg_val |= HCLGE_NIC_SW_RST_RDY;
+ reg_val |= HCLGE_COMM_NIC_SW_RST_RDY;
else
- reg_val &= ~HCLGE_NIC_SW_RST_RDY;
+ reg_val &= ~HCLGE_COMM_NIC_SW_RST_RDY;
- hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
+ hclge_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val);
}
static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
@@ -4065,9 +3976,9 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
/* After performaning pf reset, it is not necessary to do the
* mailbox handling or send any command to firmware, because
* any mailbox handling or command to firmware is only valid
- * after hclge_cmd_init is called.
+ * after hclge_comm_cmd_init is called.
*/
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
hdev->rst_stats.pf_rst_cnt++;
break;
case HNAE3_FLR_RESET:
@@ -4480,6 +4391,13 @@ static void hclge_reset_service_task(struct hclge_dev *hdev)
if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
return;
+ if (time_is_before_jiffies(hdev->last_rst_scheduled +
+ HCLGE_RESET_SCHED_TIMEOUT))
+ dev_warn(&hdev->pdev->dev,
+ "reset service task is scheduled after %ums on cpu%u!\n",
+ jiffies_to_msecs(jiffies - hdev->last_rst_scheduled),
+ smp_processor_id());
+
down(&hdev->reset_sem);
set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
@@ -4614,11 +4532,11 @@ static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
/* need an extend offset to config vector >= 64 */
if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
- vector_info->io_addr = hdev->hw.io_base +
+ vector_info->io_addr = hdev->hw.hw.io_base +
HCLGE_VECTOR_REG_BASE +
(idx - 1) * HCLGE_VECTOR_REG_OFFSET;
else
- vector_info->io_addr = hdev->hw.io_base +
+ vector_info->io_addr = hdev->hw.hw.io_base +
HCLGE_VECTOR_EXT_REG_BASE +
(idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
HCLGE_VECTOR_REG_OFFSET_H +
@@ -4688,334 +4606,43 @@ static int hclge_put_vector(struct hnae3_handle *handle, int vector)
return 0;
}
-static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
-{
- return HCLGE_RSS_KEY_SIZE;
-}
-
-static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
- const u8 hfunc, const u8 *key)
-{
- struct hclge_rss_config_cmd *req;
- unsigned int key_offset = 0;
- struct hclge_desc desc;
- int key_counts;
- int key_size;
- int ret;
-
- key_counts = HCLGE_RSS_KEY_SIZE;
- req = (struct hclge_rss_config_cmd *)desc.data;
-
- while (key_counts) {
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
- false);
-
- req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
- req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
-
- key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
- memcpy(req->hash_key,
- key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
-
- key_counts -= key_size;
- key_offset++;
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Configure RSS config fail, status = %d\n",
- ret);
- return ret;
- }
- }
- return 0;
-}
-
-static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
-{
- struct hclge_rss_indirection_table_cmd *req;
- struct hclge_desc desc;
- int rss_cfg_tbl_num;
- u8 rss_msb_oft;
- u8 rss_msb_val;
- int ret;
- u16 qid;
- int i;
- u32 j;
-
- req = (struct hclge_rss_indirection_table_cmd *)desc.data;
- rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
- HCLGE_RSS_CFG_TBL_SIZE;
-
- for (i = 0; i < rss_cfg_tbl_num; i++) {
- hclge_cmd_setup_basic_desc
- (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
-
- req->start_table_index =
- cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
- req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
- for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
- qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
- req->rss_qid_l[j] = qid & 0xff;
- rss_msb_oft =
- j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
- rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
- (j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
- req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
- }
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Configure rss indir table fail,status = %d\n",
- ret);
- return ret;
- }
- }
- return 0;
-}
-
-static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
- u16 *tc_size, u16 *tc_offset)
-{
- struct hclge_rss_tc_mode_cmd *req;
- struct hclge_desc desc;
- int ret;
- int i;
-
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
- req = (struct hclge_rss_tc_mode_cmd *)desc.data;
-
- for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
- u16 mode = 0;
-
- hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
- hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
- HCLGE_RSS_TC_SIZE_S, tc_size[i]);
- hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
- tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
- hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
- HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
-
- req->rss_tc_mode[i] = cpu_to_le16(mode);
- }
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "Configure rss tc mode fail, status = %d\n", ret);
-
- return ret;
-}
-
-static void hclge_get_rss_type(struct hclge_vport *vport)
-{
- if (vport->rss_tuple_sets.ipv4_tcp_en ||
- vport->rss_tuple_sets.ipv4_udp_en ||
- vport->rss_tuple_sets.ipv4_sctp_en ||
- vport->rss_tuple_sets.ipv6_tcp_en ||
- vport->rss_tuple_sets.ipv6_udp_en ||
- vport->rss_tuple_sets.ipv6_sctp_en)
- vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
- else if (vport->rss_tuple_sets.ipv4_fragment_en ||
- vport->rss_tuple_sets.ipv6_fragment_en)
- vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
- else
- vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
-}
-
-static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
-{
- struct hclge_rss_input_tuple_cmd *req;
- struct hclge_desc desc;
- int ret;
-
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
-
- req = (struct hclge_rss_input_tuple_cmd *)desc.data;
-
- /* Get the tuple cfg from pf */
- req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
- req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
- req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
- req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
- req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
- req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
- req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
- req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
- hclge_get_rss_type(&hdev->vport[0]);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "Configure rss input fail, status = %d\n", ret);
- return ret;
-}
-
static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
u8 *key, u8 *hfunc)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hclge_vport *vport = hclge_get_vport(handle);
- int i;
+ struct hclge_comm_rss_cfg *rss_cfg = &vport->back->rss_cfg;
- /* Get hash algorithm */
- if (hfunc) {
- switch (vport->rss_algo) {
- case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
- *hfunc = ETH_RSS_HASH_TOP;
- break;
- case HCLGE_RSS_HASH_ALGO_SIMPLE:
- *hfunc = ETH_RSS_HASH_XOR;
- break;
- default:
- *hfunc = ETH_RSS_HASH_UNKNOWN;
- break;
- }
- }
-
- /* Get the RSS Key required by the user */
- if (key)
- memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
+ hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc);
- /* Get indirect table */
- if (indir)
- for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
- indir[i] = vport->rss_indirection_tbl[i];
+ hclge_comm_get_rss_indir_tbl(rss_cfg, indir,
+ ae_dev->dev_specs.rss_ind_tbl_size);
return 0;
}
-static int hclge_parse_rss_hfunc(struct hclge_vport *vport, const u8 hfunc,
- u8 *hash_algo)
-{
- switch (hfunc) {
- case ETH_RSS_HASH_TOP:
- *hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
- return 0;
- case ETH_RSS_HASH_XOR:
- *hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
- return 0;
- case ETH_RSS_HASH_NO_CHANGE:
- *hash_algo = vport->rss_algo;
- return 0;
- default:
- return -EINVAL;
- }
-}
-
static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- u8 hash_algo;
+ struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
int ret, i;
- ret = hclge_parse_rss_hfunc(vport, hfunc, &hash_algo);
+ ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key, hfunc);
if (ret) {
dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
return ret;
}
- /* Set the RSS Hash Key if specififed by the user */
- if (key) {
- ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
- if (ret)
- return ret;
-
- /* Update the shadow RSS key with user specified qids */
- memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
- } else {
- ret = hclge_set_rss_algo_key(hdev, hash_algo,
- vport->rss_hash_key);
- if (ret)
- return ret;
- }
- vport->rss_algo = hash_algo;
-
/* Update the shadow RSS table with user specified qids */
for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
- vport->rss_indirection_tbl[i] = indir[i];
+ rss_cfg->rss_indirection_tbl[i] = indir[i];
/* Update the hardware */
- return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
-}
-
-static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
-{
- u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
-
- if (nfc->data & RXH_L4_B_2_3)
- hash_sets |= HCLGE_D_PORT_BIT;
- else
- hash_sets &= ~HCLGE_D_PORT_BIT;
-
- if (nfc->data & RXH_IP_SRC)
- hash_sets |= HCLGE_S_IP_BIT;
- else
- hash_sets &= ~HCLGE_S_IP_BIT;
-
- if (nfc->data & RXH_IP_DST)
- hash_sets |= HCLGE_D_IP_BIT;
- else
- hash_sets &= ~HCLGE_D_IP_BIT;
-
- if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
- hash_sets |= HCLGE_V_TAG_BIT;
-
- return hash_sets;
-}
-
-static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
- struct ethtool_rxnfc *nfc,
- struct hclge_rss_input_tuple_cmd *req)
-{
- struct hclge_dev *hdev = vport->back;
- u8 tuple_sets;
-
- req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
- req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
- req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
- req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
- req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
- req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
- req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
- req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
-
- tuple_sets = hclge_get_rss_hash_bits(nfc);
- switch (nfc->flow_type) {
- case TCP_V4_FLOW:
- req->ipv4_tcp_en = tuple_sets;
- break;
- case TCP_V6_FLOW:
- req->ipv6_tcp_en = tuple_sets;
- break;
- case UDP_V4_FLOW:
- req->ipv4_udp_en = tuple_sets;
- break;
- case UDP_V6_FLOW:
- req->ipv6_udp_en = tuple_sets;
- break;
- case SCTP_V4_FLOW:
- req->ipv4_sctp_en = tuple_sets;
- break;
- case SCTP_V6_FLOW:
- if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
- (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
- return -EINVAL;
-
- req->ipv6_sctp_en = tuple_sets;
- break;
- case IPV4_FLOW:
- req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
- break;
- case IPV6_FLOW:
- req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
+ return hclge_comm_set_rss_indir_table(ae_dev, &hdev->hw.hw,
+ rss_cfg->rss_indirection_tbl);
}
static int hclge_set_rss_tuple(struct hnae3_handle *handle,
@@ -5023,92 +4650,20 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- struct hclge_rss_input_tuple_cmd *req;
- struct hclge_desc desc;
int ret;
- if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3))
- return -EINVAL;
-
- req = (struct hclge_rss_input_tuple_cmd *)desc.data;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
-
- ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "failed to init rss tuple cmd, ret = %d\n", ret);
- return ret;
- }
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw,
+ &hdev->rss_cfg, nfc);
if (ret) {
dev_err(&hdev->pdev->dev,
- "Set rss tuple fail, status = %d\n", ret);
+ "failed to set rss tuple, ret = %d.\n", ret);
return ret;
}
- vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
- vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
- vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
- vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
- vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
- vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
- vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
- vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
- hclge_get_rss_type(vport);
- return 0;
-}
-
-static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
- u8 *tuple_sets)
-{
- switch (flow_type) {
- case TCP_V4_FLOW:
- *tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
- break;
- case UDP_V4_FLOW:
- *tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
- break;
- case TCP_V6_FLOW:
- *tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
- break;
- case UDP_V6_FLOW:
- *tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
- break;
- case SCTP_V4_FLOW:
- *tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
- break;
- case SCTP_V6_FLOW:
- *tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
- break;
- case IPV4_FLOW:
- case IPV6_FLOW:
- *tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
- break;
- default:
- return -EINVAL;
- }
-
+ hclge_comm_get_rss_type(&vport->nic, &hdev->rss_cfg.rss_tuple_sets);
return 0;
}
-static u64 hclge_convert_rss_tuple(u8 tuple_sets)
-{
- u64 tuple_data = 0;
-
- if (tuple_sets & HCLGE_D_PORT_BIT)
- tuple_data |= RXH_L4_B_2_3;
- if (tuple_sets & HCLGE_S_PORT_BIT)
- tuple_data |= RXH_L4_B_0_1;
- if (tuple_sets & HCLGE_D_IP_BIT)
- tuple_data |= RXH_IP_DST;
- if (tuple_sets & HCLGE_S_IP_BIT)
- tuple_data |= RXH_IP_SRC;
-
- return tuple_data;
-}
-
static int hclge_get_rss_tuple(struct hnae3_handle *handle,
struct ethtool_rxnfc *nfc)
{
@@ -5118,11 +4673,12 @@ static int hclge_get_rss_tuple(struct hnae3_handle *handle,
nfc->data = 0;
- ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
+ ret = hclge_comm_get_rss_tuple(&vport->back->rss_cfg, nfc->flow_type,
+ &tuple_sets);
if (ret || !tuple_sets)
return ret;
- nfc->data = hclge_convert_rss_tuple(tuple_sets);
+ nfc->data = hclge_comm_convert_rss_tuple(tuple_sets);
return 0;
}
@@ -5175,78 +4731,35 @@ static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
tc_offset[i] = tc_info->tqp_offset[i];
}
- return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
+ return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid,
+ tc_size);
}
int hclge_rss_init_hw(struct hclge_dev *hdev)
{
- struct hclge_vport *vport = hdev->vport;
- u16 *rss_indir = vport[0].rss_indirection_tbl;
- u8 *key = vport[0].rss_hash_key;
- u8 hfunc = vport[0].rss_algo;
+ u16 *rss_indir = hdev->rss_cfg.rss_indirection_tbl;
+ u8 *key = hdev->rss_cfg.rss_hash_key;
+ u8 hfunc = hdev->rss_cfg.rss_algo;
int ret;
- ret = hclge_set_rss_indir_table(hdev, rss_indir);
+ ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw,
+ rss_indir);
if (ret)
return ret;
- ret = hclge_set_rss_algo_key(hdev, hfunc, key);
+ ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw, hfunc, key);
if (ret)
return ret;
- ret = hclge_set_rss_input_tuple(hdev);
+ ret = hclge_comm_set_rss_input_tuple(&hdev->vport[0].nic,
+ &hdev->hw.hw, true,
+ &hdev->rss_cfg);
if (ret)
return ret;
return hclge_init_rss_tc_mode(hdev);
}
-void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
-{
- struct hclge_vport *vport = &hdev->vport[0];
- int i;
-
- for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
- vport->rss_indirection_tbl[i] = i % vport->alloc_rss_size;
-}
-
-static int hclge_rss_init_cfg(struct hclge_dev *hdev)
-{
- u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
- int rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
- struct hclge_vport *vport = &hdev->vport[0];
- u16 *rss_ind_tbl;
-
- if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
- rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
-
- vport->rss_tuple_sets.ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
- vport->rss_tuple_sets.ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
- vport->rss_tuple_sets.ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
- vport->rss_tuple_sets.ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
- vport->rss_tuple_sets.ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
- vport->rss_tuple_sets.ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
- vport->rss_tuple_sets.ipv6_sctp_en =
- hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
- HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
- HCLGE_RSS_INPUT_TUPLE_SCTP;
- vport->rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
-
- vport->rss_algo = rss_algo;
-
- rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
- sizeof(*rss_ind_tbl), GFP_KERNEL);
- if (!rss_ind_tbl)
- return -ENOMEM;
-
- vport->rss_indirection_tbl = rss_ind_tbl;
- memcpy(vport->rss_hash_key, hclge_hash_key, HCLGE_RSS_KEY_SIZE);
-
- hclge_rss_indir_init_cfg(hdev);
-
- return 0;
-}
-
int hclge_bind_ring_with_vector(struct hclge_vport *vport,
int vector_id, bool en,
struct hnae3_ring_chain_node *ring_chain)
@@ -5256,7 +4769,7 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport,
struct hclge_desc desc;
struct hclge_ctrl_vector_chain_cmd *req =
(struct hclge_ctrl_vector_chain_cmd *)desc.data;
- enum hclge_cmd_status status;
+ enum hclge_comm_cmd_status status;
enum hclge_opcode_type op;
u16 tqp_type_and_id;
int i;
@@ -5886,9 +5399,9 @@ static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
int ret;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
- desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
@@ -6790,7 +6303,7 @@ static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
if (vf > hdev->num_req_vfs) {
dev_err(&hdev->pdev->dev,
"Error: vf id (%u) should be less than %u\n",
- vf - 1, hdev->num_req_vfs);
+ vf - 1U, hdev->num_req_vfs);
return -EINVAL;
}
@@ -6800,7 +6313,7 @@ static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
if (ring >= tqps) {
dev_err(&hdev->pdev->dev,
"Error: queue id (%u) > max tqp num (%u)\n",
- ring, tqps - 1);
+ ring, tqps - 1U);
return -EINVAL;
}
@@ -7161,6 +6674,37 @@ static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
}
}
+static struct hclge_fd_rule *hclge_get_fd_rule(struct hclge_dev *hdev,
+ u16 location)
+{
+ struct hclge_fd_rule *rule = NULL;
+ struct hlist_node *node2;
+
+ hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
+ if (rule->location == location)
+ return rule;
+ else if (rule->location > location)
+ return NULL;
+ }
+
+ return NULL;
+}
+
+static void hclge_fd_get_ring_cookie(struct ethtool_rx_flow_spec *fs,
+ struct hclge_fd_rule *rule)
+{
+ if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
+ fs->ring_cookie = RX_CLS_FLOW_DISC;
+ } else {
+ u64 vf_id;
+
+ fs->ring_cookie = rule->queue_id;
+ vf_id = rule->vf_id;
+ vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+ fs->ring_cookie |= vf_id;
+ }
+}
+
static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
struct ethtool_rxnfc *cmd)
{
@@ -7168,7 +6712,6 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
struct hclge_fd_rule *rule = NULL;
struct hclge_dev *hdev = vport->back;
struct ethtool_rx_flow_spec *fs;
- struct hlist_node *node2;
if (!hnae3_dev_fd_supported(hdev))
return -EOPNOTSUPP;
@@ -7177,14 +6720,9 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
spin_lock_bh(&hdev->fd_rule_lock);
- hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
- if (rule->location >= fs->location)
- break;
- }
-
- if (!rule || fs->location != rule->location) {
+ rule = hclge_get_fd_rule(hdev, fs->location);
+ if (!rule) {
spin_unlock_bh(&hdev->fd_rule_lock);
-
return -ENOENT;
}
@@ -7222,16 +6760,7 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
hclge_fd_get_ext_info(fs, rule);
- if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
- fs->ring_cookie = RX_CLS_FLOW_DISC;
- } else {
- u64 vf_id;
-
- fs->ring_cookie = rule->queue_id;
- vf_id = rule->vf_id;
- vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
- fs->ring_cookie |= vf_id;
- }
+ hclge_fd_get_ring_cookie(fs, rule);
spin_unlock_bh(&hdev->fd_rule_lock);
@@ -7776,7 +7305,7 @@ static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
}
static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
@@ -7866,7 +7395,7 @@ static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
}
/* modify and write new config parameter */
- hclge_cmd_reuse_desc(&desc, false);
+ hclge_comm_cmd_reuse_desc(&desc, false);
req->switch_param = (req->switch_param & param_mask) | switch_param;
req->param_mask = param_mask;
@@ -7960,7 +7489,7 @@ static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
/* 3 Config mac work mode with loopback flag
* and its original configure parameters
*/
- hclge_cmd_reuse_desc(&desc, false);
+ hclge_comm_cmd_reuse_desc(&desc, false);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
dev_err(&hdev->pdev->dev,
@@ -7968,16 +7497,13 @@ static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
return ret;
}
-static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
- enum hnae3_loop loop_mode)
+static int hclge_cfg_common_loopback_cmd_send(struct hclge_dev *hdev, bool en,
+ enum hnae3_loop loop_mode)
{
-#define HCLGE_COMMON_LB_RETRY_MS 10
-#define HCLGE_COMMON_LB_RETRY_NUM 100
-
struct hclge_common_lb_cmd *req;
struct hclge_desc desc;
- int ret, i = 0;
u8 loop_mode_b;
+ int ret;
req = (struct hclge_common_lb_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
@@ -7994,23 +7520,34 @@ static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
break;
default:
dev_err(&hdev->pdev->dev,
- "unsupported common loopback mode %d\n", loop_mode);
+ "unsupported loopback mode %d\n", loop_mode);
return -ENOTSUPP;
}
- if (en) {
+ req->mask = loop_mode_b;
+ if (en)
req->enable = loop_mode_b;
- req->mask = loop_mode_b;
- } else {
- req->mask = loop_mode_b;
- }
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
- "common loopback set fail, ret = %d\n", ret);
- return ret;
- }
+ "failed to send loopback cmd, loop_mode = %d, ret = %d\n",
+ loop_mode, ret);
+
+ return ret;
+}
+
+static int hclge_cfg_common_loopback_wait(struct hclge_dev *hdev)
+{
+#define HCLGE_COMMON_LB_RETRY_MS 10
+#define HCLGE_COMMON_LB_RETRY_NUM 100
+
+ struct hclge_common_lb_cmd *req;
+ struct hclge_desc desc;
+ u32 i = 0;
+ int ret;
+
+ req = (struct hclge_common_lb_cmd *)desc.data;
do {
msleep(HCLGE_COMMON_LB_RETRY_MS);
@@ -8019,20 +7556,34 @@ static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
- "common loopback get, ret = %d\n", ret);
+ "failed to get loopback done status, ret = %d\n",
+ ret);
return ret;
}
} while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
!(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
- dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
+ dev_err(&hdev->pdev->dev, "wait loopback timeout\n");
return -EBUSY;
} else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
- dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
+ dev_err(&hdev->pdev->dev, "failed to do loopback test\n");
return -EIO;
}
- return ret;
+
+ return 0;
+}
+
+static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
+ enum hnae3_loop loop_mode)
+{
+ int ret;
+
+ ret = hclge_cfg_common_loopback_cmd_send(hdev, en, loop_mode);
+ if (ret)
+ return ret;
+
+ return hclge_cfg_common_loopback_wait(hdev);
}
static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
@@ -8213,22 +7764,6 @@ static int hclge_set_default_loopback(struct hclge_dev *hdev)
HNAE3_LOOP_PARALLEL_SERDES);
}
-static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
-{
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hnae3_knic_private_info *kinfo;
- struct hnae3_queue *queue;
- struct hclge_tqp *tqp;
- int i;
-
- kinfo = &vport->nic.kinfo;
- for (i = 0; i < kinfo->num_tqps; i++) {
- queue = handle->kinfo.tqp[i];
- tqp = container_of(queue, struct hclge_tqp, q);
- memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
- }
-}
-
static void hclge_flush_link_update(struct hclge_dev *hdev)
{
#define HCLGE_FLUSH_LINK_TIMEOUT 100000
@@ -8270,7 +7805,7 @@ static int hclge_ae_start(struct hnae3_handle *handle)
hdev->hw.mac.link = 0;
/* reset tqp stats */
- hclge_reset_tqp_stats(handle);
+ hclge_comm_reset_tqp_stats(handle);
hclge_mac_start_phy(hdev);
@@ -8308,7 +7843,7 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
hclge_mac_stop_phy(hdev);
/* reset tqp stats */
- hclge_reset_tqp_stats(handle);
+ hclge_comm_reset_tqp_stats(handle);
hclge_update_link_status(hdev);
}
@@ -8511,14 +8046,14 @@ static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
if (is_mc) {
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
memcpy(desc[0].data,
req,
sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
hclge_cmd_setup_basic_desc(&desc[1],
HCLGE_OPC_MAC_VLAN_ADD,
true);
- desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[2],
HCLGE_OPC_MAC_VLAN_ADD,
true);
@@ -8568,12 +8103,12 @@ static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
resp_code,
HCLGE_MAC_VLAN_ADD);
} else {
- hclge_cmd_reuse_desc(&mc_desc[0], false);
- mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_reuse_desc(&mc_desc[1], false);
- mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_reuse_desc(&mc_desc[2], false);
- mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
+ hclge_comm_cmd_reuse_desc(&mc_desc[0], false);
+ mc_desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_comm_cmd_reuse_desc(&mc_desc[1], false);
+ mc_desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_comm_cmd_reuse_desc(&mc_desc[2], false);
+ mc_desc[2].flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_NEXT);
memcpy(mc_desc[0].data, req,
sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
@@ -8743,6 +8278,7 @@ int hclge_update_mac_list(struct hclge_vport *vport,
enum HCLGE_MAC_ADDR_TYPE mac_type,
const unsigned char *addr)
{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
struct hclge_mac_node *mac_node;
struct list_head *list;
@@ -8767,9 +8303,10 @@ int hclge_update_mac_list(struct hclge_vport *vport,
/* if this address is never added, unnecessary to delete */
if (state == HCLGE_MAC_TO_DEL) {
spin_unlock_bh(&vport->mac_list_lock);
+ hnae3_format_mac_addr(format_mac_addr, addr);
dev_err(&hdev->pdev->dev,
- "failed to delete address %pM from mac list\n",
- addr);
+ "failed to delete address %s from mac list\n",
+ format_mac_addr);
return -ENOENT;
}
@@ -8802,6 +8339,7 @@ static int hclge_add_uc_addr(struct hnae3_handle *handle,
int hclge_add_uc_addr_common(struct hclge_vport *vport,
const unsigned char *addr)
{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
struct hclge_mac_vlan_tbl_entry_cmd req;
struct hclge_desc desc;
@@ -8812,9 +8350,10 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
if (is_zero_ether_addr(addr) ||
is_broadcast_ether_addr(addr) ||
is_multicast_ether_addr(addr)) {
+ hnae3_format_mac_addr(format_mac_addr, addr);
dev_err(&hdev->pdev->dev,
- "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
- addr, is_zero_ether_addr(addr),
+ "Set_uc mac err! invalid mac:%s. is_zero:%d,is_br=%d,is_mul=%d\n",
+ format_mac_addr, is_zero_ether_addr(addr),
is_broadcast_ether_addr(addr),
is_multicast_ether_addr(addr));
return -EINVAL;
@@ -8871,6 +8410,7 @@ static int hclge_rm_uc_addr(struct hnae3_handle *handle,
int hclge_rm_uc_addr_common(struct hclge_vport *vport,
const unsigned char *addr)
{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
struct hclge_mac_vlan_tbl_entry_cmd req;
int ret;
@@ -8879,8 +8419,9 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport,
if (is_zero_ether_addr(addr) ||
is_broadcast_ether_addr(addr) ||
is_multicast_ether_addr(addr)) {
- dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
- addr);
+ hnae3_format_mac_addr(format_mac_addr, addr);
+ dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%s.\n",
+ format_mac_addr);
return -EINVAL;
}
@@ -8911,6 +8452,7 @@ static int hclge_add_mc_addr(struct hnae3_handle *handle,
int hclge_add_mc_addr_common(struct hclge_vport *vport,
const unsigned char *addr)
{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
struct hclge_mac_vlan_tbl_entry_cmd req;
struct hclge_desc desc[3];
@@ -8919,9 +8461,10 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
/* mac addr check */
if (!is_multicast_ether_addr(addr)) {
+ hnae3_format_mac_addr(format_mac_addr, addr);
dev_err(&hdev->pdev->dev,
- "Add mc mac err! invalid mac:%pM.\n",
- addr);
+ "Add mc mac err! invalid mac:%s.\n",
+ format_mac_addr);
return -EINVAL;
}
memset(&req, 0, sizeof(req));
@@ -8973,16 +8516,18 @@ static int hclge_rm_mc_addr(struct hnae3_handle *handle,
int hclge_rm_mc_addr_common(struct hclge_vport *vport,
const unsigned char *addr)
{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
struct hclge_mac_vlan_tbl_entry_cmd req;
- enum hclge_cmd_status status;
+ enum hclge_comm_cmd_status status;
struct hclge_desc desc[3];
/* mac addr check */
if (!is_multicast_ether_addr(addr)) {
+ hnae3_format_mac_addr(format_mac_addr, addr);
dev_dbg(&hdev->pdev->dev,
- "Remove mc mac err! invalid mac:%pM.\n",
- addr);
+ "Remove mc mac err! invalid mac:%s.\n",
+ format_mac_addr);
return -EINVAL;
}
@@ -9422,16 +8967,18 @@ static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
u8 *mac_addr)
{
struct hclge_vport *vport = hclge_get_vport(handle);
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
vport = hclge_get_vf_vport(hdev, vf);
if (!vport)
return -EINVAL;
+ hnae3_format_mac_addr(format_mac_addr, mac_addr);
if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
dev_info(&hdev->pdev->dev,
- "Specified MAC(=%pM) is same as before, no change committed!\n",
- mac_addr);
+ "Specified MAC(=%s) is same as before, no change committed!\n",
+ format_mac_addr);
return 0;
}
@@ -9439,13 +8986,13 @@ static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
dev_info(&hdev->pdev->dev,
- "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
- vf, mac_addr);
+ "MAC of VF %d has been set to %s, and it will be reinitialized!\n",
+ vf, format_mac_addr);
return hclge_inform_reset_assert_to_vf(vport);
}
- dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
- vf, mac_addr);
+ dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %s\n",
+ vf, format_mac_addr);
return 0;
}
@@ -9549,6 +9096,7 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p,
{
const unsigned char *new_addr = (const unsigned char *)p;
struct hclge_vport *vport = hclge_get_vport(handle);
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
unsigned char *old_addr = NULL;
int ret;
@@ -9557,9 +9105,10 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p,
if (is_zero_ether_addr(new_addr) ||
is_broadcast_ether_addr(new_addr) ||
is_multicast_ether_addr(new_addr)) {
+ hnae3_format_mac_addr(format_mac_addr, new_addr);
dev_err(&hdev->pdev->dev,
- "change uc mac err! invalid mac: %pM.\n",
- new_addr);
+ "change uc mac err! invalid mac: %s.\n",
+ format_mac_addr);
return -EINVAL;
}
@@ -9577,9 +9126,10 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p,
spin_lock_bh(&vport->mac_list_lock);
ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
if (ret) {
+ hnae3_format_mac_addr(format_mac_addr, new_addr);
dev_err(&hdev->pdev->dev,
- "failed to change the mac addr:%pM, ret = %d\n",
- new_addr, ret);
+ "failed to change the mac addr:%s, ret = %d\n",
+ format_mac_addr, ret);
spin_unlock_bh(&vport->mac_list_lock);
if (!is_first)
@@ -9677,20 +9227,20 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
- dev_err(&hdev->pdev->dev,
- "failed to get vlan filter config, ret = %d.\n", ret);
+ dev_err(&hdev->pdev->dev, "failed to get vport%u vlan filter config, ret = %d.\n",
+ vf_id, ret);
return ret;
}
/* modify and write new config parameter */
- hclge_cmd_reuse_desc(&desc, false);
+ hclge_comm_cmd_reuse_desc(&desc, false);
req->vlan_fe = filter_en ?
(req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
- dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
- ret);
+ dev_err(&hdev->pdev->dev, "failed to set vport%u vlan filter, ret = %d.\n",
+ vf_id, ret);
return ret;
}
@@ -9809,7 +9359,7 @@ static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
hclge_cmd_setup_basic_desc(&desc[1],
HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
vf_byte_off = vfid / 8;
vf_byte_val = 1 << (vfid % 8);
@@ -9936,6 +9486,32 @@ static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
return ret;
}
+static bool hclge_need_update_port_vlan(struct hclge_dev *hdev, u16 vport_id,
+ u16 vlan_id, bool is_kill)
+{
+ /* vlan 0 may be added twice when 8021q module is enabled */
+ if (!is_kill && !vlan_id &&
+ test_bit(vport_id, hdev->vlan_table[vlan_id]))
+ return false;
+
+ if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
+ dev_warn(&hdev->pdev->dev,
+ "Add port vlan failed, vport %u is already in vlan %u\n",
+ vport_id, vlan_id);
+ return false;
+ }
+
+ if (is_kill &&
+ !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
+ dev_warn(&hdev->pdev->dev,
+ "Delete port vlan failed, vport %u is not in vlan %u\n",
+ vport_id, vlan_id);
+ return false;
+ }
+
+ return true;
+}
+
static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
u16 vport_id, u16 vlan_id,
bool is_kill)
@@ -9957,26 +9533,9 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
return ret;
}
- /* vlan 0 may be added twice when 8021q module is enabled */
- if (!is_kill && !vlan_id &&
- test_bit(vport_id, hdev->vlan_table[vlan_id]))
+ if (!hclge_need_update_port_vlan(hdev, vport_id, vlan_id, is_kill))
return 0;
- if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
- dev_err(&hdev->pdev->dev,
- "Add port vlan failed, vport %u is already in vlan %u\n",
- vport_id, vlan_id);
- return -EINVAL;
- }
-
- if (is_kill &&
- !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
- dev_err(&hdev->pdev->dev,
- "Delete port vlan failed, vport %u is not in vlan %u\n",
- vport_id, vlan_id);
- return -EINVAL;
- }
-
for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
vport_num++;
@@ -10168,67 +9727,80 @@ static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
return status;
}
-static int hclge_init_vlan_config(struct hclge_dev *hdev)
+static int hclge_init_vlan_filter(struct hclge_dev *hdev)
{
-#define HCLGE_DEF_VLAN_TYPE 0x8100
-
- struct hnae3_handle *handle = &hdev->vport[0].nic;
struct hclge_vport *vport;
int ret;
int i;
- if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
- /* for revision 0x21, vf vlan filter is per function */
- for (i = 0; i < hdev->num_alloc_vport; i++) {
- vport = &hdev->vport[i];
- ret = hclge_set_vlan_filter_ctrl(hdev,
- HCLGE_FILTER_TYPE_VF,
- HCLGE_FILTER_FE_EGRESS,
- true,
- vport->vport_id);
- if (ret)
- return ret;
- vport->cur_vlan_fltr_en = true;
- }
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_EGRESS_V1_B,
+ true, 0);
- ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
- HCLGE_FILTER_FE_INGRESS, true,
- 0);
- if (ret)
- return ret;
- } else {
+ /* for revision 0x21, vf vlan filter is per function */
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
- HCLGE_FILTER_FE_EGRESS_V1_B,
- true, 0);
+ HCLGE_FILTER_FE_EGRESS, true,
+ vport->vport_id);
if (ret)
return ret;
+ vport->cur_vlan_fltr_en = true;
}
- hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
- hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
- hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
- hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
- hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
- hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
+ return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
+ HCLGE_FILTER_FE_INGRESS, true, 0);
+}
- ret = hclge_set_vlan_protocol_type(hdev);
- if (ret)
- return ret;
+static int hclge_init_vlan_type(struct hclge_dev *hdev)
+{
+ hdev->vlan_type_cfg.rx_in_fst_vlan_type = ETH_P_8021Q;
+ hdev->vlan_type_cfg.rx_in_sec_vlan_type = ETH_P_8021Q;
+ hdev->vlan_type_cfg.rx_ot_fst_vlan_type = ETH_P_8021Q;
+ hdev->vlan_type_cfg.rx_ot_sec_vlan_type = ETH_P_8021Q;
+ hdev->vlan_type_cfg.tx_ot_vlan_type = ETH_P_8021Q;
+ hdev->vlan_type_cfg.tx_in_vlan_type = ETH_P_8021Q;
- for (i = 0; i < hdev->num_alloc_vport; i++) {
- u16 vlan_tag;
- u8 qos;
+ return hclge_set_vlan_protocol_type(hdev);
+}
+static int hclge_init_vport_vlan_offload(struct hclge_dev *hdev)
+{
+ struct hclge_port_base_vlan_config *cfg;
+ struct hclge_vport *vport;
+ int ret;
+ int i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i];
- vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
- qos = vport->port_base_vlan_cfg.vlan_info.qos;
+ cfg = &vport->port_base_vlan_cfg;
- ret = hclge_vlan_offload_cfg(vport,
- vport->port_base_vlan_cfg.state,
- vlan_tag, qos);
+ ret = hclge_vlan_offload_cfg(vport, cfg->state,
+ cfg->vlan_info.vlan_tag,
+ cfg->vlan_info.qos);
if (ret)
return ret;
}
+ return 0;
+}
+
+static int hclge_init_vlan_config(struct hclge_dev *hdev)
+{
+ struct hnae3_handle *handle = &hdev->vport[0].nic;
+ int ret;
+
+ ret = hclge_init_vlan_filter(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclge_init_vlan_type(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclge_init_vport_vlan_offload(hdev);
+ if (ret)
+ return ret;
return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
}
@@ -10485,12 +10057,41 @@ static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
return false;
}
+static int hclge_modify_port_base_vlan_tag(struct hclge_vport *vport,
+ struct hclge_vlan_info *new_info,
+ struct hclge_vlan_info *old_info)
+{
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ /* add new VLAN tag */
+ ret = hclge_set_vlan_filter_hw(hdev, htons(new_info->vlan_proto),
+ vport->vport_id, new_info->vlan_tag,
+ false);
+ if (ret)
+ return ret;
+
+ /* remove old VLAN tag */
+ if (old_info->vlan_tag == 0)
+ ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
+ true, 0);
+ else
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+ vport->vport_id,
+ old_info->vlan_tag, true);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to clear vport%u port base vlan %u, ret = %d.\n",
+ vport->vport_id, old_info->vlan_tag, ret);
+
+ return ret;
+}
+
int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
struct hclge_vlan_info *vlan_info)
{
struct hnae3_handle *nic = &vport->nic;
struct hclge_vlan_info *old_vlan_info;
- struct hclge_dev *hdev = vport->back;
int ret;
old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
@@ -10503,38 +10104,12 @@ int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
goto out;
- if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
- /* add new VLAN tag */
- ret = hclge_set_vlan_filter_hw(hdev,
- htons(vlan_info->vlan_proto),
- vport->vport_id,
- vlan_info->vlan_tag,
- false);
- if (ret)
- return ret;
-
- /* remove old VLAN tag */
- if (old_vlan_info->vlan_tag == 0)
- ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
- true, 0);
- else
- ret = hclge_set_vlan_filter_hw(hdev,
- htons(ETH_P_8021Q),
- vport->vport_id,
- old_vlan_info->vlan_tag,
- true);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "failed to clear vport%u port base vlan %u, ret = %d.\n",
- vport->vport_id, old_vlan_info->vlan_tag, ret);
- return ret;
- }
-
- goto out;
- }
-
- ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
- old_vlan_info);
+ if (state == HNAE3_PORT_BASE_VLAN_MODIFY)
+ ret = hclge_modify_port_base_vlan_tag(vport, vlan_info,
+ old_vlan_info);
+ else
+ ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
+ old_vlan_info);
if (ret)
return ret;
@@ -10881,11 +10456,11 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id,
u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
{
+ struct hclge_comm_tqp *tqp;
struct hnae3_queue *queue;
- struct hclge_tqp *tqp;
queue = handle->kinfo.tqp[queue_id];
- tqp = container_of(queue, struct hclge_tqp, q);
+ tqp = container_of(queue, struct hclge_comm_tqp, q);
return tqp->index;
}
@@ -11442,10 +11017,11 @@ static int hclge_dev_mem_map(struct hclge_dev *hdev)
if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
return 0;
- hw->mem_base = devm_ioremap_wc(&pdev->dev,
- pci_resource_start(pdev, HCLGE_MEM_BAR),
- pci_resource_len(pdev, HCLGE_MEM_BAR));
- if (!hw->mem_base) {
+ hw->hw.mem_base =
+ devm_ioremap_wc(&pdev->dev,
+ pci_resource_start(pdev, HCLGE_MEM_BAR),
+ pci_resource_len(pdev, HCLGE_MEM_BAR));
+ if (!hw->hw.mem_base) {
dev_err(&pdev->dev, "failed to map device memory\n");
return -EFAULT;
}
@@ -11484,8 +11060,8 @@ static int hclge_pci_init(struct hclge_dev *hdev)
pci_set_master(pdev);
hw = &hdev->hw;
- hw->io_base = pcim_iomap(pdev, 2, 0);
- if (!hw->io_base) {
+ hw->hw.io_base = pcim_iomap(pdev, 2, 0);
+ if (!hw->hw.io_base) {
dev_err(&pdev->dev, "Can't map configuration register space\n");
ret = -ENOMEM;
goto err_clr_master;
@@ -11500,7 +11076,7 @@ static int hclge_pci_init(struct hclge_dev *hdev)
return 0;
err_unmap_io_base:
- pcim_iounmap(pdev, hdev->hw.io_base);
+ pcim_iounmap(pdev, hdev->hw.hw.io_base);
err_clr_master:
pci_clear_master(pdev);
pci_release_regions(pdev);
@@ -11514,10 +11090,10 @@ static void hclge_pci_uninit(struct hclge_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
- if (hdev->hw.mem_base)
- devm_iounmap(&pdev->dev, hdev->hw.mem_base);
+ if (hdev->hw.hw.mem_base)
+ devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base);
- pcim_iounmap(pdev, hdev->hw.io_base);
+ pcim_iounmap(pdev, hdev->hw.hw.io_base);
pci_free_irq_vectors(pdev);
pci_clear_master(pdev);
pci_release_mem_regions(pdev);
@@ -11556,29 +11132,25 @@ static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
int retry_cnt = 0;
int ret;
-retry:
- down(&hdev->reset_sem);
- set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
- hdev->reset_type = rst_type;
- ret = hclge_reset_prepare(hdev);
- if (ret || hdev->reset_pending) {
- dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n",
- ret);
- if (hdev->reset_pending ||
- retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
- dev_err(&hdev->pdev->dev,
- "reset_pending:0x%lx, retry_cnt:%d\n",
- hdev->reset_pending, retry_cnt);
- clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
- up(&hdev->reset_sem);
- msleep(HCLGE_RESET_RETRY_WAIT_MS);
- goto retry;
- }
+ while (retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
+ down(&hdev->reset_sem);
+ set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ hdev->reset_type = rst_type;
+ ret = hclge_reset_prepare(hdev);
+ if (!ret && !hdev->reset_pending)
+ break;
+
+ dev_err(&hdev->pdev->dev,
+ "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n",
+ ret, hdev->reset_pending, retry_cnt);
+ clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
+ msleep(HCLGE_RESET_RETRY_WAIT_MS);
}
/* disable misc vector before reset done */
hclge_enable_vector(&hdev->misc_vector, false);
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
if (hdev->reset_type == HNAE3_FLR_RESET)
hdev->rst_stats.flr_rst_cnt++;
@@ -11683,12 +11255,13 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_pci_uninit;
/* Firmware command queue initialize */
- ret = hclge_cmd_queue_init(hdev);
+ ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
if (ret)
goto err_devlink_uninit;
/* Firmware command initialize */
- ret = hclge_cmd_init(hdev);
+ ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version,
+ true, hdev->reset_pending);
if (ret)
goto err_cmd_uninit;
@@ -11776,7 +11349,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_mdiobus_unreg;
}
- ret = hclge_rss_init_cfg(hdev);
+ ret = hclge_comm_rss_init_cfg(&hdev->vport->nic, hdev->ae_dev,
+ &hdev->rss_cfg);
if (ret) {
dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
goto err_mdiobus_unreg;
@@ -11861,11 +11435,11 @@ err_msi_irq_uninit:
err_msi_uninit:
pci_free_irq_vectors(pdev);
err_cmd_uninit:
- hclge_cmd_uninit(hdev);
+ hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
err_devlink_uninit:
hclge_devlink_uninit(hdev);
err_pci_uninit:
- pcim_iounmap(pdev, hdev->hw.io_base);
+ pcim_iounmap(pdev, hdev->hw.hw.io_base);
pci_clear_master(pdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
@@ -12112,7 +11686,8 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_reset_umv_space(hdev);
}
- ret = hclge_cmd_init(hdev);
+ ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version,
+ true, hdev->reset_pending);
if (ret) {
dev_err(&pdev->dev, "Cmd queue init failed\n");
return ret;
@@ -12252,7 +11827,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_config_nic_hw_error(hdev, false);
hclge_config_rocee_ras_interrupt(hdev, false);
- hclge_cmd_uninit(hdev);
+ hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
hclge_misc_irq_uninit(hdev);
hclge_devlink_uninit(hdev);
hclge_pci_uninit(hdev);
@@ -12288,19 +11863,43 @@ static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
*max_rss_size = hdev->pf_rss_size_max;
}
+static int hclge_set_rss_tc_mode_cfg(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
+ struct hclge_dev *hdev = vport->back;
+ u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
+ u16 tc_valid[HCLGE_MAX_TC_NUM];
+ u16 roundup_size;
+ unsigned int i;
+
+ roundup_size = roundup_pow_of_two(vport->nic.kinfo.rss_size);
+ roundup_size = ilog2(roundup_size);
+ /* Set the RSS TC mode according to the new RSS size */
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ tc_valid[i] = 0;
+
+ if (!(hdev->hw_tc_map & BIT(i)))
+ continue;
+
+ tc_valid[i] = 1;
+ tc_size[i] = roundup_size;
+ tc_offset[i] = vport->nic.kinfo.rss_size * i;
+ }
+
+ return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid,
+ tc_size);
+}
+
static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
bool rxfh_configured)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hclge_vport *vport = hclge_get_vport(handle);
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
- u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
struct hclge_dev *hdev = vport->back;
- u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
u16 cur_rss_size = kinfo->rss_size;
u16 cur_tqps = kinfo->num_tqps;
- u16 tc_valid[HCLGE_MAX_TC_NUM];
- u16 roundup_size;
u32 *rss_indir;
unsigned int i;
int ret;
@@ -12313,20 +11912,7 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
return ret;
}
- roundup_size = roundup_pow_of_two(kinfo->rss_size);
- roundup_size = ilog2(roundup_size);
- /* Set the RSS TC mode according to the new RSS size */
- for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
- tc_valid[i] = 0;
-
- if (!(hdev->hw_tc_map & BIT(i)))
- continue;
-
- tc_valid[i] = 1;
- tc_size[i] = roundup_size;
- tc_offset[i] = kinfo->rss_size * i;
- }
- ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
+ ret = hclge_set_rss_tc_mode_cfg(handle);
if (ret)
return ret;
@@ -12508,7 +12094,7 @@ int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
true);
- desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
}
/* initialize the last command BD */
@@ -12552,7 +12138,7 @@ static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
hclge_cmd_setup_basic_desc(desc, cmd, true);
for (i = 0; i < bd_num - 1; i++) {
- desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
desc++;
hclge_cmd_setup_basic_desc(desc, cmd, true);
}
@@ -12985,7 +12571,7 @@ static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
/* bd0~bd4 need next flag */
if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
- desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
}
/* setup bd0, this bd contains offset and read length. */
@@ -13095,7 +12681,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.check_port_speed = hclge_check_port_speed,
.get_fec = hclge_get_fec,
.set_fec = hclge_set_fec,
- .get_rss_key_size = hclge_get_rss_key_size,
+ .get_rss_key_size = hclge_comm_get_rss_key_size,
.get_rss = hclge_get_rss,
.set_rss = hclge_set_rss,
.set_rss_tuple = hclge_set_rss_tuple,