aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/hisilicon/hns3/hns3pf
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c493
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h633
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c343
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c2833
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h88
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c134
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h15
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c2167
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h99
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c7794
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h416
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c1088
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c53
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c564
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h143
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c1079
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h117
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h87
20 files changed, 12247 insertions, 5916 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
deleted file mode 100644
index 0fb61d440d3b..000000000000
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0+
-#
-# Makefile for the HISILICON network device drivers.
-#
-
-ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
-
-obj-$(CONFIG_HNS3_HCLGE) += hclge.o
-hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o
-
-hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
deleted file mode 100644
index 7f509eff562e..000000000000
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ /dev/null
@@ -1,493 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-// Copyright (c) 2016-2017 Hisilicon Limited.
-
-#include <linux/dma-mapping.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/dma-direction.h>
-#include "hclge_cmd.h"
-#include "hnae3.h"
-#include "hclge_main.h"
-
-#define hclge_is_csq(ring) ((ring)->flag & HCLGE_TYPE_CSQ)
-
-#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
-
-static int hclge_ring_space(struct hclge_cmq_ring *ring)
-{
- int ntu = ring->next_to_use;
- int ntc = ring->next_to_clean;
- int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
-
- return ring->desc_num - used - 1;
-}
-
-static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int head)
-{
- int ntu = ring->next_to_use;
- int ntc = ring->next_to_clean;
-
- if (ntu > ntc)
- return head >= ntc && head <= ntu;
-
- return head >= ntc || head <= ntu;
-}
-
-static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
-{
- int size = ring->desc_num * sizeof(struct hclge_desc);
-
- ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
- &ring->desc_dma_addr, GFP_KERNEL);
- if (!ring->desc)
- return -ENOMEM;
-
- return 0;
-}
-
-static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring)
-{
- int size = ring->desc_num * sizeof(struct hclge_desc);
-
- if (ring->desc) {
- dma_free_coherent(cmq_ring_to_dev(ring), size,
- ring->desc, ring->desc_dma_addr);
- ring->desc = NULL;
- }
-}
-
-static int hclge_alloc_cmd_queue(struct hclge_dev *hdev, int ring_type)
-{
- struct hclge_hw *hw = &hdev->hw;
- struct hclge_cmq_ring *ring =
- (ring_type == HCLGE_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
- int ret;
-
- ring->ring_type = ring_type;
- ring->dev = hdev;
-
- ret = hclge_alloc_cmd_desc(ring);
- if (ret) {
- dev_err(&hdev->pdev->dev, "descriptor %s alloc error %d\n",
- (ring_type == HCLGE_TYPE_CSQ) ? "CSQ" : "CRQ", ret);
- return ret;
- }
-
- return 0;
-}
-
-void hclge_cmd_reuse_desc(struct hclge_desc *desc, bool is_read)
-{
- desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
- if (is_read)
- desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
- else
- desc->flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
-}
-
-void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
- enum hclge_opcode_type opcode, bool is_read)
-{
- memset((void *)desc, 0, sizeof(struct hclge_desc));
- desc->opcode = cpu_to_le16(opcode);
- desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
-
- if (is_read)
- desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
-}
-
-static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
-{
- dma_addr_t dma = ring->desc_dma_addr;
- struct hclge_dev *hdev = ring->dev;
- struct hclge_hw *hw = &hdev->hw;
- u32 reg_val;
-
- if (ring->ring_type == HCLGE_TYPE_CSQ) {
- hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG,
- lower_32_bits(dma));
- hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG,
- upper_32_bits(dma));
- reg_val = hclge_read_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG);
- reg_val &= HCLGE_NIC_SW_RST_RDY;
- reg_val |= ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S;
- hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
- hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
- } else {
- hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG,
- lower_32_bits(dma));
- hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG,
- upper_32_bits(dma));
- hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG,
- ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S);
- hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
- }
-}
-
-static void hclge_cmd_init_regs(struct hclge_hw *hw)
-{
- hclge_cmd_config_regs(&hw->cmq.csq);
- hclge_cmd_config_regs(&hw->cmq.crq);
-}
-
-static int hclge_cmd_csq_clean(struct hclge_hw *hw)
-{
- struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
- struct hclge_cmq_ring *csq = &hw->cmq.csq;
- u32 head;
- int clean;
-
- head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
- rmb(); /* Make sure head is ready before touch any data */
-
- if (!is_valid_csq_clean_head(csq, head)) {
- dev_warn(&hdev->pdev->dev, "wrong cmd head (%u, %d-%d)\n", head,
- csq->next_to_use, csq->next_to_clean);
- dev_warn(&hdev->pdev->dev,
- "Disabling any further commands to IMP firmware\n");
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
- dev_warn(&hdev->pdev->dev,
- "IMP firmware watchdog reset soon expected!\n");
- return -EIO;
- }
-
- clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
- csq->next_to_clean = head;
- return clean;
-}
-
-static int hclge_cmd_csq_done(struct hclge_hw *hw)
-{
- u32 head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
- return head == hw->cmq.csq.next_to_use;
-}
-
-static bool hclge_is_special_opcode(u16 opcode)
-{
- /* these commands have several descriptors,
- * and use the first one to save opcode and return value
- */
- u16 spec_opcode[] = {HCLGE_OPC_STATS_64_BIT,
- HCLGE_OPC_STATS_32_BIT,
- HCLGE_OPC_STATS_MAC,
- HCLGE_OPC_STATS_MAC_ALL,
- HCLGE_OPC_QUERY_32_BIT_REG,
- HCLGE_OPC_QUERY_64_BIT_REG,
- HCLGE_QUERY_CLEAR_MPF_RAS_INT,
- HCLGE_QUERY_CLEAR_PF_RAS_INT,
- HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
- HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT};
- int i;
-
- for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
- if (spec_opcode[i] == opcode)
- return true;
- }
-
- return false;
-}
-
-static int hclge_cmd_convert_err_code(u16 desc_ret)
-{
- switch (desc_ret) {
- case HCLGE_CMD_EXEC_SUCCESS:
- return 0;
- case HCLGE_CMD_NO_AUTH:
- return -EPERM;
- case HCLGE_CMD_NOT_SUPPORTED:
- return -EOPNOTSUPP;
- case HCLGE_CMD_QUEUE_FULL:
- return -EXFULL;
- case HCLGE_CMD_NEXT_ERR:
- return -ENOSR;
- case HCLGE_CMD_UNEXE_ERR:
- return -ENOTBLK;
- case HCLGE_CMD_PARA_ERR:
- return -EINVAL;
- case HCLGE_CMD_RESULT_ERR:
- return -ERANGE;
- case HCLGE_CMD_TIMEOUT:
- return -ETIME;
- case HCLGE_CMD_HILINK_ERR:
- return -ENOLINK;
- case HCLGE_CMD_QUEUE_ILLEGAL:
- return -ENXIO;
- case HCLGE_CMD_INVALID:
- return -EBADR;
- default:
- return -EIO;
- }
-}
-
-static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc,
- int num, int ntc)
-{
- u16 opcode, desc_ret;
- int handle;
-
- opcode = le16_to_cpu(desc[0].opcode);
- for (handle = 0; handle < num; handle++) {
- desc[handle] = hw->cmq.csq.desc[ntc];
- ntc++;
- if (ntc >= hw->cmq.csq.desc_num)
- ntc = 0;
- }
- if (likely(!hclge_is_special_opcode(opcode)))
- desc_ret = le16_to_cpu(desc[num - 1].retval);
- else
- desc_ret = le16_to_cpu(desc[0].retval);
-
- hw->cmq.last_status = desc_ret;
-
- return hclge_cmd_convert_err_code(desc_ret);
-}
-
-/**
- * hclge_cmd_send - send command to command queue
- * @hw: pointer to the hw struct
- * @desc: prefilled descriptor for describing the command
- * @num : the number of descriptors to be sent
- *
- * This is the main send command for command queue, it
- * sends the queue, cleans the queue, etc
- **/
-int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
-{
- struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
- struct hclge_cmq_ring *csq = &hw->cmq.csq;
- struct hclge_desc *desc_to_use;
- bool complete = false;
- u32 timeout = 0;
- int handle = 0;
- int retval = 0;
- int ntc;
-
- spin_lock_bh(&hw->cmq.csq.lock);
-
- if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
- spin_unlock_bh(&hw->cmq.csq.lock);
- return -EBUSY;
- }
-
- if (num > hclge_ring_space(&hw->cmq.csq)) {
- /* If CMDQ ring is full, SW HEAD and HW HEAD may be different,
- * need update the SW HEAD pointer csq->next_to_clean
- */
- csq->next_to_clean = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
- spin_unlock_bh(&hw->cmq.csq.lock);
- return -EBUSY;
- }
-
- /**
- * Record the location of desc in the ring for this time
- * which will be use for hardware to write back
- */
- ntc = hw->cmq.csq.next_to_use;
- while (handle < num) {
- desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
- *desc_to_use = desc[handle];
- (hw->cmq.csq.next_to_use)++;
- if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num)
- hw->cmq.csq.next_to_use = 0;
- handle++;
- }
-
- /* Write to hardware */
- hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, hw->cmq.csq.next_to_use);
-
- /**
- * If the command is sync, wait for the firmware to write back,
- * if multi descriptors to be sent, use the first one to check
- */
- if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) {
- do {
- if (hclge_cmd_csq_done(hw)) {
- complete = true;
- break;
- }
- udelay(1);
- timeout++;
- } while (timeout < hw->cmq.tx_timeout);
- }
-
- if (!complete)
- retval = -EBADE;
- else
- retval = hclge_cmd_check_retval(hw, desc, num, ntc);
-
- /* Clean the command send queue */
- handle = hclge_cmd_csq_clean(hw);
- if (handle < 0)
- retval = handle;
- else if (handle != num)
- dev_warn(&hdev->pdev->dev,
- "cleaned %d, need to clean %d\n", handle, num);
-
- spin_unlock_bh(&hw->cmq.csq.lock);
-
- return retval;
-}
-
-static enum hclge_cmd_status hclge_cmd_query_firmware_version(
- struct hclge_hw *hw, u32 *version)
-{
- struct hclge_query_version_cmd *resp;
- struct hclge_desc desc;
- int ret;
-
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1);
- resp = (struct hclge_query_version_cmd *)desc.data;
-
- ret = hclge_cmd_send(hw, &desc, 1);
- if (!ret)
- *version = le32_to_cpu(resp->firmware);
-
- return ret;
-}
-
-int hclge_cmd_queue_init(struct hclge_dev *hdev)
-{
- int ret;
-
- /* Setup the lock for command queue */
- spin_lock_init(&hdev->hw.cmq.csq.lock);
- spin_lock_init(&hdev->hw.cmq.crq.lock);
-
- /* Setup the queue entries for use cmd queue */
- hdev->hw.cmq.csq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
- hdev->hw.cmq.crq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
-
- /* Setup Tx write back timeout */
- hdev->hw.cmq.tx_timeout = HCLGE_CMDQ_TX_TIMEOUT;
-
- /* Setup queue rings */
- ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CSQ);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "CSQ ring setup error %d\n", ret);
- return ret;
- }
-
- ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CRQ);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "CRQ ring setup error %d\n", ret);
- goto err_csq;
- }
-
- return 0;
-err_csq:
- hclge_free_cmd_desc(&hdev->hw.cmq.csq);
- return ret;
-}
-
-static int hclge_firmware_compat_config(struct hclge_dev *hdev)
-{
- struct hclge_firmware_compat_cmd *req;
- struct hclge_desc desc;
- u32 compat = 0;
-
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_M7_COMPAT_CFG, false);
-
- req = (struct hclge_firmware_compat_cmd *)desc.data;
-
- hnae3_set_bit(compat, HCLGE_LINK_EVENT_REPORT_EN_B, 1);
- hnae3_set_bit(compat, HCLGE_NCSI_ERROR_REPORT_EN_B, 1);
- req->compat = cpu_to_le32(compat);
-
- return hclge_cmd_send(&hdev->hw, &desc, 1);
-}
-
-int hclge_cmd_init(struct hclge_dev *hdev)
-{
- u32 version;
- int ret;
-
- spin_lock_bh(&hdev->hw.cmq.csq.lock);
- spin_lock(&hdev->hw.cmq.crq.lock);
-
- hdev->hw.cmq.csq.next_to_clean = 0;
- hdev->hw.cmq.csq.next_to_use = 0;
- hdev->hw.cmq.crq.next_to_clean = 0;
- hdev->hw.cmq.crq.next_to_use = 0;
-
- hclge_cmd_init_regs(&hdev->hw);
-
- spin_unlock(&hdev->hw.cmq.crq.lock);
- spin_unlock_bh(&hdev->hw.cmq.csq.lock);
-
- clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
-
- /* Check if there is new reset pending, because the higher level
- * reset may happen when lower level reset is being processed.
- */
- if ((hclge_is_reset_pending(hdev))) {
- ret = -EBUSY;
- goto err_cmd_init;
- }
-
- ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "firmware version query failed %d\n", ret);
- goto err_cmd_init;
- }
- hdev->fw_version = version;
-
- dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n",
- hnae3_get_field(version, HNAE3_FW_VERSION_BYTE3_MASK,
- HNAE3_FW_VERSION_BYTE3_SHIFT),
- hnae3_get_field(version, HNAE3_FW_VERSION_BYTE2_MASK,
- HNAE3_FW_VERSION_BYTE2_SHIFT),
- hnae3_get_field(version, HNAE3_FW_VERSION_BYTE1_MASK,
- HNAE3_FW_VERSION_BYTE1_SHIFT),
- hnae3_get_field(version, HNAE3_FW_VERSION_BYTE0_MASK,
- HNAE3_FW_VERSION_BYTE0_SHIFT));
-
- /* ask the firmware to enable some features, driver can work without
- * it.
- */
- ret = hclge_firmware_compat_config(hdev);
- if (ret)
- dev_warn(&hdev->pdev->dev,
- "Firmware compatible features not enabled(%d).\n",
- ret);
-
- return 0;
-
-err_cmd_init:
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
-
- return ret;
-}
-
-static void hclge_cmd_uninit_regs(struct hclge_hw *hw)
-{
- hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
-}
-
-void hclge_cmd_uninit(struct hclge_dev *hdev)
-{
- spin_lock_bh(&hdev->hw.cmq.csq.lock);
- spin_lock(&hdev->hw.cmq.crq.lock);
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
- hclge_cmd_uninit_regs(&hdev->hw);
- spin_unlock(&hdev->hw.cmq.crq.lock);
- spin_unlock_bh(&hdev->hw.cmq.csq.lock);
-
- hclge_free_cmd_desc(&hdev->hw.cmq.csq);
- hclge_free_cmd_desc(&hdev->hw.cmq.crq);
-}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 96498d9b4754..43cada51d8cb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -7,303 +7,28 @@
#include <linux/io.h>
#include <linux/etherdevice.h>
#include "hnae3.h"
-
-#define HCLGE_CMDQ_TX_TIMEOUT 30000
-#define HCLGE_DESC_DATA_LEN 6
+#include "hclge_comm_cmd.h"
struct hclge_dev;
-struct hclge_desc {
- __le16 opcode;
#define HCLGE_CMDQ_RX_INVLD_B 0
#define HCLGE_CMDQ_RX_OUTVLD_B 1
- __le16 flag;
- __le16 retval;
- __le16 rsv;
- __le32 data[HCLGE_DESC_DATA_LEN];
-};
-
-struct hclge_cmq_ring {
- dma_addr_t desc_dma_addr;
- struct hclge_desc *desc;
- struct hclge_dev *dev;
- u32 head;
- u32 tail;
-
- u16 buf_size;
- u16 desc_num;
- int next_to_use;
- int next_to_clean;
- u8 ring_type; /* cmq ring type */
- spinlock_t lock; /* Command queue lock */
-};
-
-enum hclge_cmd_return_status {
- HCLGE_CMD_EXEC_SUCCESS = 0,
- HCLGE_CMD_NO_AUTH = 1,
- HCLGE_CMD_NOT_SUPPORTED = 2,
- HCLGE_CMD_QUEUE_FULL = 3,
- HCLGE_CMD_NEXT_ERR = 4,
- HCLGE_CMD_UNEXE_ERR = 5,
- HCLGE_CMD_PARA_ERR = 6,
- HCLGE_CMD_RESULT_ERR = 7,
- HCLGE_CMD_TIMEOUT = 8,
- HCLGE_CMD_HILINK_ERR = 9,
- HCLGE_CMD_QUEUE_ILLEGAL = 10,
- HCLGE_CMD_INVALID = 11,
-};
-
-enum hclge_cmd_status {
- HCLGE_STATUS_SUCCESS = 0,
- HCLGE_ERR_CSQ_FULL = -1,
- HCLGE_ERR_CSQ_TIMEOUT = -2,
- HCLGE_ERR_CSQ_ERROR = -3,
-};
-
struct hclge_misc_vector {
u8 __iomem *addr;
int vector_irq;
char name[HNAE3_INT_NAME_LEN];
};
-struct hclge_cmq {
- struct hclge_cmq_ring csq;
- struct hclge_cmq_ring crq;
- u16 tx_timeout;
- enum hclge_cmd_status last_status;
-};
-
-#define HCLGE_CMD_FLAG_IN BIT(0)
-#define HCLGE_CMD_FLAG_OUT BIT(1)
-#define HCLGE_CMD_FLAG_NEXT BIT(2)
-#define HCLGE_CMD_FLAG_WR BIT(3)
-#define HCLGE_CMD_FLAG_NO_INTR BIT(4)
-#define HCLGE_CMD_FLAG_ERR_INTR BIT(5)
-
-enum hclge_opcode_type {
- /* Generic commands */
- HCLGE_OPC_QUERY_FW_VER = 0x0001,
- HCLGE_OPC_CFG_RST_TRIGGER = 0x0020,
- HCLGE_OPC_GBL_RST_STATUS = 0x0021,
- HCLGE_OPC_QUERY_FUNC_STATUS = 0x0022,
- HCLGE_OPC_QUERY_PF_RSRC = 0x0023,
- HCLGE_OPC_QUERY_VF_RSRC = 0x0024,
- HCLGE_OPC_GET_CFG_PARAM = 0x0025,
- HCLGE_OPC_PF_RST_DONE = 0x0026,
- HCLGE_OPC_QUERY_VF_RST_RDY = 0x0027,
-
- HCLGE_OPC_STATS_64_BIT = 0x0030,
- HCLGE_OPC_STATS_32_BIT = 0x0031,
- HCLGE_OPC_STATS_MAC = 0x0032,
- HCLGE_OPC_QUERY_MAC_REG_NUM = 0x0033,
- HCLGE_OPC_STATS_MAC_ALL = 0x0034,
-
- HCLGE_OPC_QUERY_REG_NUM = 0x0040,
- HCLGE_OPC_QUERY_32_BIT_REG = 0x0041,
- HCLGE_OPC_QUERY_64_BIT_REG = 0x0042,
- HCLGE_OPC_DFX_BD_NUM = 0x0043,
- HCLGE_OPC_DFX_BIOS_COMMON_REG = 0x0044,
- HCLGE_OPC_DFX_SSU_REG_0 = 0x0045,
- HCLGE_OPC_DFX_SSU_REG_1 = 0x0046,
- HCLGE_OPC_DFX_IGU_EGU_REG = 0x0047,
- HCLGE_OPC_DFX_RPU_REG_0 = 0x0048,
- HCLGE_OPC_DFX_RPU_REG_1 = 0x0049,
- HCLGE_OPC_DFX_NCSI_REG = 0x004A,
- HCLGE_OPC_DFX_RTC_REG = 0x004B,
- HCLGE_OPC_DFX_PPP_REG = 0x004C,
- HCLGE_OPC_DFX_RCB_REG = 0x004D,
- HCLGE_OPC_DFX_TQP_REG = 0x004E,
- HCLGE_OPC_DFX_SSU_REG_2 = 0x004F,
- HCLGE_OPC_DFX_QUERY_CHIP_CAP = 0x0050,
-
- /* MAC command */
- HCLGE_OPC_CONFIG_MAC_MODE = 0x0301,
- HCLGE_OPC_CONFIG_AN_MODE = 0x0304,
- HCLGE_OPC_QUERY_LINK_STATUS = 0x0307,
- HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308,
- HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309,
- HCLGE_OPC_QUERY_MAC_TNL_INT = 0x0310,
- HCLGE_OPC_MAC_TNL_INT_EN = 0x0311,
- HCLGE_OPC_CLEAR_MAC_TNL_INT = 0x0312,
- HCLGE_OPC_SERDES_LOOPBACK = 0x0315,
- HCLGE_OPC_CONFIG_FEC_MODE = 0x031A,
-
- /* PFC/Pause commands */
- HCLGE_OPC_CFG_MAC_PAUSE_EN = 0x0701,
- HCLGE_OPC_CFG_PFC_PAUSE_EN = 0x0702,
- HCLGE_OPC_CFG_MAC_PARA = 0x0703,
- HCLGE_OPC_CFG_PFC_PARA = 0x0704,
- HCLGE_OPC_QUERY_MAC_TX_PKT_CNT = 0x0705,
- HCLGE_OPC_QUERY_MAC_RX_PKT_CNT = 0x0706,
- HCLGE_OPC_QUERY_PFC_TX_PKT_CNT = 0x0707,
- HCLGE_OPC_QUERY_PFC_RX_PKT_CNT = 0x0708,
- HCLGE_OPC_PRI_TO_TC_MAPPING = 0x0709,
- HCLGE_OPC_QOS_MAP = 0x070A,
-
- /* ETS/scheduler commands */
- HCLGE_OPC_TM_PG_TO_PRI_LINK = 0x0804,
- HCLGE_OPC_TM_QS_TO_PRI_LINK = 0x0805,
- HCLGE_OPC_TM_NQ_TO_QS_LINK = 0x0806,
- HCLGE_OPC_TM_RQ_TO_QS_LINK = 0x0807,
- HCLGE_OPC_TM_PORT_WEIGHT = 0x0808,
- HCLGE_OPC_TM_PG_WEIGHT = 0x0809,
- HCLGE_OPC_TM_QS_WEIGHT = 0x080A,
- HCLGE_OPC_TM_PRI_WEIGHT = 0x080B,
- HCLGE_OPC_TM_PRI_C_SHAPPING = 0x080C,
- HCLGE_OPC_TM_PRI_P_SHAPPING = 0x080D,
- HCLGE_OPC_TM_PG_C_SHAPPING = 0x080E,
- HCLGE_OPC_TM_PG_P_SHAPPING = 0x080F,
- HCLGE_OPC_TM_PORT_SHAPPING = 0x0810,
- HCLGE_OPC_TM_PG_SCH_MODE_CFG = 0x0812,
- HCLGE_OPC_TM_PRI_SCH_MODE_CFG = 0x0813,
- HCLGE_OPC_TM_QS_SCH_MODE_CFG = 0x0814,
- HCLGE_OPC_TM_BP_TO_QSET_MAPPING = 0x0815,
- HCLGE_OPC_ETS_TC_WEIGHT = 0x0843,
- HCLGE_OPC_QSET_DFX_STS = 0x0844,
- HCLGE_OPC_PRI_DFX_STS = 0x0845,
- HCLGE_OPC_PG_DFX_STS = 0x0846,
- HCLGE_OPC_PORT_DFX_STS = 0x0847,
- HCLGE_OPC_SCH_NQ_CNT = 0x0848,
- HCLGE_OPC_SCH_RQ_CNT = 0x0849,
- HCLGE_OPC_TM_INTERNAL_STS = 0x0850,
- HCLGE_OPC_TM_INTERNAL_CNT = 0x0851,
- HCLGE_OPC_TM_INTERNAL_STS_1 = 0x0852,
-
- /* Packet buffer allocate commands */
- HCLGE_OPC_TX_BUFF_ALLOC = 0x0901,
- HCLGE_OPC_RX_PRIV_BUFF_ALLOC = 0x0902,
- HCLGE_OPC_RX_PRIV_WL_ALLOC = 0x0903,
- HCLGE_OPC_RX_COM_THRD_ALLOC = 0x0904,
- HCLGE_OPC_RX_COM_WL_ALLOC = 0x0905,
- HCLGE_OPC_RX_GBL_PKT_CNT = 0x0906,
-
- /* TQP management command */
- HCLGE_OPC_SET_TQP_MAP = 0x0A01,
-
- /* TQP commands */
- HCLGE_OPC_CFG_TX_QUEUE = 0x0B01,
- HCLGE_OPC_QUERY_TX_POINTER = 0x0B02,
- HCLGE_OPC_QUERY_TX_STATUS = 0x0B03,
- HCLGE_OPC_TQP_TX_QUEUE_TC = 0x0B04,
- HCLGE_OPC_CFG_RX_QUEUE = 0x0B11,
- HCLGE_OPC_QUERY_RX_POINTER = 0x0B12,
- HCLGE_OPC_QUERY_RX_STATUS = 0x0B13,
- HCLGE_OPC_STASH_RX_QUEUE_LRO = 0x0B16,
- HCLGE_OPC_CFG_RX_QUEUE_LRO = 0x0B17,
- HCLGE_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
- HCLGE_OPC_RESET_TQP_QUEUE = 0x0B22,
-
- /* PPU commands */
- HCLGE_OPC_PPU_PF_OTHER_INT_DFX = 0x0B4A,
-
- /* TSO command */
- HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01,
- HCLGE_OPC_GRO_GENERIC_CONFIG = 0x0C10,
-
- /* RSS commands */
- HCLGE_OPC_RSS_GENERIC_CONFIG = 0x0D01,
- HCLGE_OPC_RSS_INDIR_TABLE = 0x0D07,
- HCLGE_OPC_RSS_TC_MODE = 0x0D08,
- HCLGE_OPC_RSS_INPUT_TUPLE = 0x0D02,
-
- /* Promisuous mode command */
- HCLGE_OPC_CFG_PROMISC_MODE = 0x0E01,
-
- /* Vlan offload commands */
- HCLGE_OPC_VLAN_PORT_TX_CFG = 0x0F01,
- HCLGE_OPC_VLAN_PORT_RX_CFG = 0x0F02,
-
- /* Interrupts commands */
- HCLGE_OPC_ADD_RING_TO_VECTOR = 0x1503,
- HCLGE_OPC_DEL_RING_TO_VECTOR = 0x1504,
-
- /* MAC commands */
- HCLGE_OPC_MAC_VLAN_ADD = 0x1000,
- HCLGE_OPC_MAC_VLAN_REMOVE = 0x1001,
- HCLGE_OPC_MAC_VLAN_TYPE_ID = 0x1002,
- HCLGE_OPC_MAC_VLAN_INSERT = 0x1003,
- HCLGE_OPC_MAC_VLAN_ALLOCATE = 0x1004,
- HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010,
- HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011,
-
- /* MAC VLAN commands */
- HCLGE_OPC_MAC_VLAN_SWITCH_PARAM = 0x1033,
-
- /* VLAN commands */
- HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100,
- HCLGE_OPC_VLAN_FILTER_PF_CFG = 0x1101,
- HCLGE_OPC_VLAN_FILTER_VF_CFG = 0x1102,
-
- /* Flow Director commands */
- HCLGE_OPC_FD_MODE_CTRL = 0x1200,
- HCLGE_OPC_FD_GET_ALLOCATION = 0x1201,
- HCLGE_OPC_FD_KEY_CONFIG = 0x1202,
- HCLGE_OPC_FD_TCAM_OP = 0x1203,
- HCLGE_OPC_FD_AD_OP = 0x1204,
-
- /* MDIO command */
- HCLGE_OPC_MDIO_CONFIG = 0x1900,
-
- /* QCN commands */
- HCLGE_OPC_QCN_MOD_CFG = 0x1A01,
- HCLGE_OPC_QCN_GRP_TMPLT_CFG = 0x1A02,
- HCLGE_OPC_QCN_SHAPPING_CFG = 0x1A03,
- HCLGE_OPC_QCN_SHAPPING_BS_CFG = 0x1A04,
- HCLGE_OPC_QCN_QSET_LINK_CFG = 0x1A05,
- HCLGE_OPC_QCN_RP_STATUS_GET = 0x1A06,
- HCLGE_OPC_QCN_AJUST_INIT = 0x1A07,
- HCLGE_OPC_QCN_DFX_CNT_STATUS = 0x1A08,
-
- /* Mailbox command */
- HCLGEVF_OPC_MBX_PF_TO_VF = 0x2000,
-
- /* Led command */
- HCLGE_OPC_LED_STATUS_CFG = 0xB000,
-
- /* NCL config command */
- HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011,
-
- /* M7 stats command */
- HCLGE_OPC_M7_STATS_BD = 0x7012,
- HCLGE_OPC_M7_STATS_INFO = 0x7013,
- HCLGE_OPC_M7_COMPAT_CFG = 0x701A,
-
- /* SFP command */
- HCLGE_OPC_GET_SFP_INFO = 0x7104,
-
- /* Error INT commands */
- HCLGE_MAC_COMMON_INT_EN = 0x030E,
- HCLGE_TM_SCH_ECC_INT_EN = 0x0829,
- HCLGE_SSU_ECC_INT_CMD = 0x0989,
- HCLGE_SSU_COMMON_INT_CMD = 0x098C,
- HCLGE_PPU_MPF_ECC_INT_CMD = 0x0B40,
- HCLGE_PPU_MPF_OTHER_INT_CMD = 0x0B41,
- HCLGE_PPU_PF_OTHER_INT_CMD = 0x0B42,
- HCLGE_COMMON_ECC_INT_CFG = 0x1505,
- HCLGE_QUERY_RAS_INT_STS_BD_NUM = 0x1510,
- HCLGE_QUERY_CLEAR_MPF_RAS_INT = 0x1511,
- HCLGE_QUERY_CLEAR_PF_RAS_INT = 0x1512,
- HCLGE_QUERY_MSIX_INT_STS_BD_NUM = 0x1513,
- HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT = 0x1514,
- HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT = 0x1515,
- HCLGE_CONFIG_ROCEE_RAS_INT_EN = 0x1580,
- HCLGE_QUERY_CLEAR_ROCEE_RAS_INT = 0x1581,
- HCLGE_ROCEE_PF_RAS_INT_CMD = 0x1584,
- HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD = 0x1585,
- HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD = 0x1586,
- HCLGE_IGU_EGU_TNL_INT_EN = 0x1803,
- HCLGE_IGU_COMMON_INT_EN = 0x1806,
- HCLGE_TM_QCN_MEM_INT_CFG = 0x1A14,
- HCLGE_PPP_CMD0_INT_CMD = 0x2100,
- HCLGE_PPP_CMD1_INT_CMD = 0x2101,
- HCLGE_MAC_ETHERTYPE_IDX_RD = 0x2105,
- HCLGE_NCSI_INT_EN = 0x2401,
-};
+#define hclge_cmd_setup_basic_desc(desc, opcode, is_read) \
+ hclge_comm_cmd_setup_basic_desc(desc, opcode, is_read)
#define HCLGE_TQP_REG_OFFSET 0x80000
#define HCLGE_TQP_REG_SIZE 0x200
+#define HCLGE_TQP_MAX_SIZE_DEV_V2 1024
+#define HCLGE_TQP_EXT_REG_OFFSET 0x100
+
#define HCLGE_RCB_INIT_QUERY_TIMEOUT 10
#define HCLGE_RCB_INIT_FLAG_EN_B 0
#define HCLGE_RCB_INIT_FLAG_FINI_B 8
@@ -333,7 +58,9 @@ enum hclge_int_type {
};
struct hclge_ctrl_vector_chain_cmd {
- u8 int_vector_id;
+#define HCLGE_VECTOR_ID_L_S 0
+#define HCLGE_VECTOR_ID_L_M GENMASK(7, 0)
+ u8 int_vector_id_l;
u8 int_cause_num;
#define HCLGE_INT_TYPE_S 0
#define HCLGE_INT_TYPE_M GENMASK(1, 0)
@@ -343,7 +70,9 @@ struct hclge_ctrl_vector_chain_cmd {
#define HCLGE_INT_GL_IDX_M GENMASK(14, 13)
__le16 tqp_type_and_id[HCLGE_VECTOR_ELEMENTS_PER_CMD];
u8 vfid;
- u8 rsv;
+#define HCLGE_VECTOR_ID_H_S 8
+#define HCLGE_VECTOR_ID_H_M GENMASK(15, 8)
+ u8 int_vector_id_h;
};
#define HCLGE_MAX_TC_NUM 8
@@ -360,11 +89,6 @@ struct hclge_rx_priv_buff_cmd {
u8 rsv[6];
};
-struct hclge_query_version_cmd {
- __le32 firmware;
- __le32 firmware_rsv[5];
-};
-
#define HCLGE_RX_PRIV_EN_B 15
#define HCLGE_TC_NUM_ONE_DESC 4
struct hclge_priv_wl {
@@ -395,7 +119,7 @@ struct hclge_tc_thrd {
};
struct hclge_priv_buf {
- struct hclge_waterline wl; /* Waterline for low and high*/
+ struct hclge_waterline wl; /* Waterline for low and high */
u32 buf_size; /* TC private buffer size */
u32 tx_buf_size;
u32 enable; /* Enable TC private buffer or not */
@@ -450,16 +174,13 @@ struct hclge_pf_res_cmd {
__le16 tqp_num;
__le16 buf_size;
__le16 msixcap_localid_ba_nic;
- __le16 msixcap_localid_ba_rocee;
-#define HCLGE_MSIX_OFT_ROCEE_S 0
-#define HCLGE_MSIX_OFT_ROCEE_M GENMASK(15, 0)
-#define HCLGE_PF_VEC_NUM_S 0
-#define HCLGE_PF_VEC_NUM_M GENMASK(7, 0)
- __le16 pf_intr_vector_number;
+ __le16 msixcap_localid_number_nic;
+ __le16 pf_intr_vector_number_roce;
__le16 pf_own_fun_number;
__le16 tx_buf_size;
__le16 dv_buf_size;
- __le32 rsv[2];
+ __le16 ext_tqp_num;
+ u8 rsv[6];
};
#define HCLGE_CFG_OFFSET_S 0
@@ -469,8 +190,6 @@ struct hclge_pf_res_cmd {
#define HCLGE_CFG_RD_LEN_BYTES 16
#define HCLGE_CFG_RD_LEN_UNIT 4
-#define HCLGE_CFG_VMDQ_S 0
-#define HCLGE_CFG_VMDQ_M GENMASK(7, 0)
#define HCLGE_CFG_TC_NUM_S 8
#define HCLGE_CFG_TC_NUM_M GENMASK(15, 8)
#define HCLGE_CFG_TQP_DESC_N_S 16
@@ -489,8 +208,16 @@ struct hclge_pf_res_cmd {
#define HCLGE_CFG_RSS_SIZE_M GENMASK(31, 24)
#define HCLGE_CFG_SPEED_ABILITY_S 0
#define HCLGE_CFG_SPEED_ABILITY_M GENMASK(7, 0)
+#define HCLGE_CFG_SPEED_ABILITY_EXT_S 10
+#define HCLGE_CFG_SPEED_ABILITY_EXT_M GENMASK(15, 10)
+#define HCLGE_CFG_VLAN_FLTR_CAP_S 8
+#define HCLGE_CFG_VLAN_FLTR_CAP_M GENMASK(9, 8)
#define HCLGE_CFG_UMV_TBL_SPACE_S 16
#define HCLGE_CFG_UMV_TBL_SPACE_M GENMASK(31, 16)
+#define HCLGE_CFG_PF_RSS_SIZE_S 0
+#define HCLGE_CFG_PF_RSS_SIZE_M GENMASK(3, 0)
+#define HCLGE_CFG_TX_SPARE_BUF_SIZE_S 4
+#define HCLGE_CFG_TX_SPARE_BUF_SIZE_M GENMASK(15, 4)
#define HCLGE_CFG_CMD_CNT 4
@@ -510,44 +237,17 @@ struct hclge_vf_num_cmd {
};
#define HCLGE_RSS_DEFAULT_OUTPORT_B 4
-#define HCLGE_RSS_HASH_KEY_OFFSET_B 4
-#define HCLGE_RSS_HASH_KEY_NUM 16
-struct hclge_rss_config_cmd {
- u8 hash_config;
- u8 rsv[7];
- u8 hash_key[HCLGE_RSS_HASH_KEY_NUM];
-};
-
-struct hclge_rss_input_tuple_cmd {
- u8 ipv4_tcp_en;
- u8 ipv4_udp_en;
- u8 ipv4_sctp_en;
- u8 ipv4_fragment_en;
- u8 ipv6_tcp_en;
- u8 ipv6_udp_en;
- u8 ipv6_sctp_en;
- u8 ipv6_fragment_en;
- u8 rsv[16];
-};
-#define HCLGE_RSS_CFG_TBL_SIZE 16
-
-struct hclge_rss_indirection_table_cmd {
- __le16 start_table_index;
- __le16 rss_set_bitmap;
- u8 rsv[4];
- u8 rss_result[HCLGE_RSS_CFG_TBL_SIZE];
-};
+#define HCLGE_RSS_CFG_TBL_SIZE_H 4
+#define HCLGE_RSS_CFG_TBL_BW_L 8U
#define HCLGE_RSS_TC_OFFSET_S 0
-#define HCLGE_RSS_TC_OFFSET_M GENMASK(9, 0)
+#define HCLGE_RSS_TC_OFFSET_M GENMASK(10, 0)
+#define HCLGE_RSS_TC_SIZE_MSB_B 11
#define HCLGE_RSS_TC_SIZE_S 12
#define HCLGE_RSS_TC_SIZE_M GENMASK(14, 12)
+#define HCLGE_RSS_TC_SIZE_MSB_OFFSET 3
#define HCLGE_RSS_TC_VALID_B 15
-struct hclge_rss_tc_mode_cmd {
- __le16 rss_tc_mode[HCLGE_MAX_TC_NUM];
- u8 rsv[8];
-};
#define HCLGE_LINK_STATUS_UP_B 0
#define HCLGE_LINK_STATUS_UP_M BIT(HCLGE_LINK_STATUS_UP_B)
@@ -556,23 +256,26 @@ struct hclge_link_status_cmd {
u8 rsv[23];
};
-struct hclge_promisc_param {
- u8 vf_id;
- u8 enable;
-};
+/* for DEVICE_VERSION_V1/2, reference to promisc cmd byte8 */
+#define HCLGE_PROMISC_EN_UC 1
+#define HCLGE_PROMISC_EN_MC 2
+#define HCLGE_PROMISC_EN_BC 3
+#define HCLGE_PROMISC_TX_EN 4
+#define HCLGE_PROMISC_RX_EN 5
+
+/* for DEVICE_VERSION_V3, reference to promisc cmd byte10 */
+#define HCLGE_PROMISC_UC_RX_EN 2
+#define HCLGE_PROMISC_MC_RX_EN 3
+#define HCLGE_PROMISC_BC_RX_EN 4
+#define HCLGE_PROMISC_UC_TX_EN 5
+#define HCLGE_PROMISC_MC_TX_EN 6
+#define HCLGE_PROMISC_BC_TX_EN 7
-#define HCLGE_PROMISC_TX_EN_B BIT(4)
-#define HCLGE_PROMISC_RX_EN_B BIT(5)
-#define HCLGE_PROMISC_EN_B 1
-#define HCLGE_PROMISC_EN_ALL 0x7
-#define HCLGE_PROMISC_EN_UC 0x1
-#define HCLGE_PROMISC_EN_MC 0x2
-#define HCLGE_PROMISC_EN_BC 0x4
struct hclge_promisc_cfg_cmd {
- u8 flag;
+ u8 promisc;
u8 vf_id;
- __le16 rsv0;
- u8 rsv1[20];
+ u8 extend_promisc;
+ u8 rsv0[21];
};
enum hclge_promisc_type {
@@ -618,10 +321,11 @@ struct hclge_config_mac_speed_dup_cmd {
#define HCLGE_CFG_MAC_SPEED_CHANGE_EN_B 0
u8 mac_change_fec_en;
- u8 rsv[22];
+ u8 rsv[4];
+ u8 lane_num;
+ u8 rsv1[17];
};
-#define HCLGE_RING_ID_MASK GENMASK(9, 0)
#define HCLGE_TQP_ENABLE_B 0
#define HCLGE_MAC_CFG_AN_EN_B 0
@@ -645,7 +349,9 @@ struct hclge_sfp_info_cmd {
u8 autoneg_ability; /* whether support autoneg */
__le32 speed_ability; /* speed ability for current media */
__le32 module_type;
- u8 rsv[8];
+ u8 fec_ability;
+ u8 lane_num;
+ u8 rsv[6];
};
#define HCLGE_MAC_CFG_FEC_AUTO_EN_B 0
@@ -657,12 +363,27 @@ struct hclge_sfp_info_cmd {
#define HCLGE_MAC_FEC_OFF 0
#define HCLGE_MAC_FEC_BASER 1
#define HCLGE_MAC_FEC_RS 2
+#define HCLGE_MAC_FEC_LLRS 3
struct hclge_config_fec_cmd {
u8 fec_mode;
u8 default_config;
u8 rsv[22];
};
+#define HCLGE_FEC_STATS_CMD_NUM 4
+
+struct hclge_query_fec_stats_cmd {
+ /* fec rs mode total stats */
+ __le32 rs_fec_corr_blocks;
+ __le32 rs_fec_uncorr_blocks;
+ __le32 rs_fec_error_blocks;
+ /* fec base-r mode per lanes stats */
+ u8 base_r_lane_num;
+ u8 rsv[3];
+ __le32 base_r_fec_corr_blocks;
+ __le32 base_r_fec_uncorr_blocks;
+};
+
#define HCLGE_MAC_UPLINK_PORT 0x100
struct hclge_config_max_frm_size_cmd {
@@ -733,31 +454,6 @@ struct hclge_mac_mgr_tbl_entry_cmd {
u8 rsv3[2];
};
-struct hclge_mac_vlan_add_cmd {
- __le16 flags;
- __le16 mac_addr_hi16;
- __le32 mac_addr_lo32;
- __le32 mac_addr_msk_hi32;
- __le16 mac_addr_msk_lo16;
- __le16 vlan_tag;
- __le16 ingress_port;
- __le16 egress_port;
- u8 rsv[4];
-};
-
-#define HNS3_MAC_VLAN_CFG_FLAG_BIT 0
-struct hclge_mac_vlan_remove_cmd {
- __le16 flags;
- __le16 mac_addr_hi16;
- __le32 mac_addr_lo32;
- __le32 mac_addr_msk_hi32;
- __le16 mac_addr_msk_lo16;
- __le16 vlan_tag;
- __le16 ingress_port;
- __le16 egress_port;
- u8 rsv[4];
-};
-
struct hclge_vlan_filter_ctrl_cmd {
u8 vlan_type;
u8 vlan_fe;
@@ -789,6 +485,14 @@ struct hclge_vlan_filter_vf_cfg_cmd {
u8 vf_bitmap[HCLGE_MAX_VF_BYTES];
};
+#define HCLGE_INGRESS_BYPASS_B 0
+struct hclge_port_vlan_filter_bypass_cmd {
+ u8 bypass_state;
+ u8 rsv1[3];
+ u8 vf_id;
+ u8 rsv2[19];
+};
+
#define HCLGE_SWITCH_ANTI_SPOOF_B 0U
#define HCLGE_SWITCH_ALW_LPBK_B 1U
#define HCLGE_SWITCH_ALW_LCL_LPBK_B 2U
@@ -821,6 +525,7 @@ enum hclge_mac_vlan_cfg_sel {
#define HCLGE_CFG_NIC_ROCE_SEL_B 4
#define HCLGE_ACCEPT_TAG2_B 5
#define HCLGE_ACCEPT_UNTAG2_B 6
+#define HCLGE_TAG_SHIFT_MODE_EN_B 7
#define HCLGE_VF_NUM_PER_BYTE 8
struct hclge_vport_vtag_tx_cfg_cmd {
@@ -837,6 +542,8 @@ struct hclge_vport_vtag_tx_cfg_cmd {
#define HCLGE_REM_TAG2_EN_B 1
#define HCLGE_SHOW_TAG1_EN_B 2
#define HCLGE_SHOW_TAG2_EN_B 3
+#define HCLGE_DISCARD_TAG1_EN_B 5
+#define HCLGE_DISCARD_TAG2_EN_B 6
struct hclge_vport_vtag_rx_cfg_cmd {
u8 vport_vlan_cfg;
u8 vf_offset;
@@ -907,8 +614,8 @@ struct hclge_cfg_tso_status_cmd {
#define HCLGE_GRO_EN_B 0
struct hclge_cfg_gro_status_cmd {
- __le16 gro_en;
- u8 rsv[22];
+ u8 gro_en;
+ u8 rsv[23];
};
#define HCLGE_TSO_MSS_MIN 256
@@ -924,10 +631,16 @@ struct hclge_reset_tqp_queue_cmd {
#define HCLGE_CFG_RESET_MAC_B 3
#define HCLGE_CFG_RESET_FUNC_B 7
+#define HCLGE_CFG_RESET_RCB_B 1
struct hclge_reset_cmd {
u8 mac_func_reset;
u8 fun_reset_vfid;
- u8 rsv[22];
+ u8 fun_reset_rcb;
+ u8 rsv;
+ __le16 fun_reset_rcb_vqid_start;
+ __le16 fun_reset_rcb_vqid_num;
+ u8 fun_reset_rcb_return_status;
+ u8 rsv1[15];
};
#define HCLGE_PF_RESET_DONE_BIT BIT(0)
@@ -939,9 +652,10 @@ struct hclge_pf_rst_done_cmd {
#define HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B BIT(0)
#define HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B BIT(2)
-#define HCLGE_CMD_SERDES_DONE_B BIT(0)
-#define HCLGE_CMD_SERDES_SUCCESS_B BIT(1)
-struct hclge_serdes_lb_cmd {
+#define HCLGE_CMD_GE_PHY_INNER_LOOP_B BIT(3)
+#define HCLGE_CMD_COMMON_LB_DONE_B BIT(0)
+#define HCLGE_CMD_COMMON_LB_SUCCESS_B BIT(1)
+struct hclge_common_lb_cmd {
u8 mask;
u8 enable;
u8 result;
@@ -954,26 +668,6 @@ struct hclge_serdes_lb_cmd {
#define HCLGE_DEFAULT_NON_DCB_DV 0x7800 /* 30K byte */
#define HCLGE_NON_DCB_ADDITIONAL_BUF 0x1400 /* 5120 byte */
-#define HCLGE_TYPE_CRQ 0
-#define HCLGE_TYPE_CSQ 1
-#define HCLGE_NIC_CSQ_BASEADDR_L_REG 0x27000
-#define HCLGE_NIC_CSQ_BASEADDR_H_REG 0x27004
-#define HCLGE_NIC_CSQ_DEPTH_REG 0x27008
-#define HCLGE_NIC_CSQ_TAIL_REG 0x27010
-#define HCLGE_NIC_CSQ_HEAD_REG 0x27014
-#define HCLGE_NIC_CRQ_BASEADDR_L_REG 0x27018
-#define HCLGE_NIC_CRQ_BASEADDR_H_REG 0x2701c
-#define HCLGE_NIC_CRQ_DEPTH_REG 0x27020
-#define HCLGE_NIC_CRQ_TAIL_REG 0x27024
-#define HCLGE_NIC_CRQ_HEAD_REG 0x27028
-
-/* this bit indicates that the driver is ready for hardware reset */
-#define HCLGE_NIC_SW_RST_RDY_B 16
-#define HCLGE_NIC_SW_RST_RDY BIT(HCLGE_NIC_SW_RST_RDY_B)
-
-#define HCLGE_NIC_CMQ_DESC_NUM 1024
-#define HCLGE_NIC_CMQ_DESC_NUM_S 3
-
#define HCLGE_LED_LOCATE_STATE_S 0
#define HCLGE_LED_LOCATE_STATE_M GENMASK(1, 0)
@@ -1034,16 +728,19 @@ struct hclge_fd_tcam_config_3_cmd {
#define HCLGE_FD_AD_DROP_B 0
#define HCLGE_FD_AD_DIRECT_QID_B 1
#define HCLGE_FD_AD_QID_S 2
-#define HCLGE_FD_AD_QID_M GENMASK(12, 2)
+#define HCLGE_FD_AD_QID_M GENMASK(11, 2)
#define HCLGE_FD_AD_USE_COUNTER_B 12
#define HCLGE_FD_AD_COUNTER_NUM_S 13
#define HCLGE_FD_AD_COUNTER_NUM_M GENMASK(20, 13)
#define HCLGE_FD_AD_NXT_STEP_B 20
#define HCLGE_FD_AD_NXT_KEY_S 21
-#define HCLGE_FD_AD_NXT_KEY_M GENMASK(26, 21)
+#define HCLGE_FD_AD_NXT_KEY_M GENMASK(25, 21)
#define HCLGE_FD_AD_WR_RULE_ID_B 0
#define HCLGE_FD_AD_RULE_ID_S 1
-#define HCLGE_FD_AD_RULE_ID_M GENMASK(13, 1)
+#define HCLGE_FD_AD_RULE_ID_M GENMASK(12, 1)
+#define HCLGE_FD_AD_TC_OVRD_B 16
+#define HCLGE_FD_AD_TC_SIZE_S 17
+#define HCLGE_FD_AD_TC_SIZE_M GENMASK(20, 17)
struct hclge_fd_ad_config_cmd {
u8 stage;
@@ -1053,7 +750,28 @@ struct hclge_fd_ad_config_cmd {
u8 rsv2[8];
};
-struct hclge_get_m7_bd_cmd {
+struct hclge_fd_ad_cnt_read_cmd {
+ u8 rsv0[4];
+ __le16 index;
+ u8 rsv1[2];
+ __le64 cnt;
+ u8 rsv2[8];
+};
+
+#define HCLGE_FD_USER_DEF_OFT_S 0
+#define HCLGE_FD_USER_DEF_OFT_M GENMASK(14, 0)
+#define HCLGE_FD_USER_DEF_EN_B 15
+struct hclge_fd_user_def_cfg_cmd {
+ __le16 ol2_cfg;
+ __le16 l2_cfg;
+ __le16 ol3_cfg;
+ __le16 l3_cfg;
+ __le16 ol4_cfg;
+ __le16 l4_cfg;
+ u8 rsv[12];
+};
+
+struct hclge_get_imp_bd_cmd {
__le32 bd_num;
u8 rsv[20];
};
@@ -1072,45 +790,92 @@ struct hclge_query_ppu_pf_other_int_dfx_cmd {
u8 rsv[4];
};
-#define HCLGE_LINK_EVENT_REPORT_EN_B 0
-#define HCLGE_NCSI_ERROR_REPORT_EN_B 1
-struct hclge_firmware_compat_cmd {
- __le32 compat;
- u8 rsv[20];
+#define HCLGE_SFP_INFO_CMD_NUM 6
+#define HCLGE_SFP_INFO_BD0_LEN 20
+#define HCLGE_SFP_INFO_BDX_LEN 24
+#define HCLGE_SFP_INFO_MAX_LEN \
+ (HCLGE_SFP_INFO_BD0_LEN + \
+ (HCLGE_SFP_INFO_CMD_NUM - 1) * HCLGE_SFP_INFO_BDX_LEN)
+
+struct hclge_sfp_info_bd0_cmd {
+ __le16 offset;
+ __le16 read_len;
+ u8 data[HCLGE_SFP_INFO_BD0_LEN];
};
-int hclge_cmd_init(struct hclge_dev *hdev);
-static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
-{
- writel(value, base + reg);
-}
+#define HCLGE_QUERY_DEV_SPECS_BD_NUM 4
-#define hclge_write_dev(a, reg, value) \
- hclge_write_reg((a)->io_base, (reg), (value))
-#define hclge_read_dev(a, reg) \
- hclge_read_reg((a)->io_base, (reg))
+struct hclge_dev_specs_0_cmd {
+ __le32 rsv0;
+ __le32 mac_entry_num;
+ __le32 mng_entry_num;
+ __le16 rss_ind_tbl_size;
+ __le16 rss_key_size;
+ __le16 int_ql_max;
+ u8 max_non_tso_bd_num;
+ u8 rsv1;
+ __le32 max_tm_rate;
+};
-static inline u32 hclge_read_reg(u8 __iomem *base, u32 reg)
-{
- u8 __iomem *reg_addr = READ_ONCE(base);
+#define HCLGE_DEF_MAX_INT_GL 0x1FE0U
- return readl(reg_addr + reg);
-}
+struct hclge_dev_specs_1_cmd {
+ __le16 max_frm_size;
+ __le16 max_qset_num;
+ __le16 max_int_gl;
+ u8 rsv0[2];
+ __le16 umv_size;
+ __le16 mc_mac_size;
+ u8 rsv1[12];
+};
-#define HCLGE_SEND_SYNC(flag) \
- ((flag) & HCLGE_CMD_FLAG_NO_INTR)
+/* mac speed type defined in firmware command */
+enum HCLGE_FIRMWARE_MAC_SPEED {
+ HCLGE_FW_MAC_SPEED_1G,
+ HCLGE_FW_MAC_SPEED_10G,
+ HCLGE_FW_MAC_SPEED_25G,
+ HCLGE_FW_MAC_SPEED_40G,
+ HCLGE_FW_MAC_SPEED_50G,
+ HCLGE_FW_MAC_SPEED_100G,
+ HCLGE_FW_MAC_SPEED_10M,
+ HCLGE_FW_MAC_SPEED_100M,
+ HCLGE_FW_MAC_SPEED_200G,
+};
-struct hclge_hw;
-int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num);
-void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
- enum hclge_opcode_type opcode, bool is_read);
-void hclge_cmd_reuse_desc(struct hclge_desc *desc, bool is_read);
+#define HCLGE_PHY_LINK_SETTING_BD_NUM 2
-enum hclge_cmd_status hclge_cmd_mdio_write(struct hclge_hw *hw,
- struct hclge_desc *desc);
-enum hclge_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw,
- struct hclge_desc *desc);
+struct hclge_phy_link_ksetting_0_cmd {
+ __le32 speed;
+ u8 duplex;
+ u8 autoneg;
+ u8 eth_tp_mdix;
+ u8 eth_tp_mdix_ctrl;
+ u8 port;
+ u8 transceiver;
+ u8 phy_address;
+ u8 rsv;
+ __le32 supported;
+ __le32 advertising;
+ __le32 lp_advertising;
+};
+
+struct hclge_phy_link_ksetting_1_cmd {
+ u8 master_slave_cfg;
+ u8 master_slave_state;
+ u8 rsv[22];
+};
-void hclge_cmd_uninit(struct hclge_dev *hdev);
-int hclge_cmd_queue_init(struct hclge_dev *hdev);
+struct hclge_phy_reg_cmd {
+ __le16 reg_addr;
+ u8 rsv0[2];
+ __le16 reg_val;
+ u8 rsv1[18];
+};
+
+struct hclge_hw;
+int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num);
+enum hclge_comm_cmd_status hclge_cmd_mdio_write(struct hclge_hw *hw,
+ struct hclge_desc *desc);
+enum hclge_comm_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw,
+ struct hclge_desc *desc);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index d6c3952aba04..c4aded65e848 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -2,6 +2,7 @@
// Copyright (c) 2016-2017 Hisilicon Limited.
#include "hclge_main.h"
+#include "hclge_dcb.h"
#include "hclge_tm.h"
#include "hnae3.h"
@@ -103,28 +104,32 @@ static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc,
return 0;
}
-static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
- u8 *tc, bool *changed)
+static u8 hclge_ets_tc_changed(struct hclge_dev *hdev, struct ieee_ets *ets,
+ bool *changed)
{
- bool has_ets_tc = false;
- u32 total_ets_bw = 0;
- u8 max_tc = 0;
- int ret;
+ u8 max_tc_id = 0;
u8 i;
for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i])
*changed = true;
- if (ets->prio_tc[i] > max_tc)
- max_tc = ets->prio_tc[i];
+ if (ets->prio_tc[i] > max_tc_id)
+ max_tc_id = ets->prio_tc[i];
}
- ret = hclge_dcb_common_validate(hdev, max_tc + 1, ets->prio_tc);
- if (ret)
- return ret;
+ /* return max tc number, max tc id need to plus 1 */
+ return max_tc_id + 1;
+}
+
+static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev,
+ struct ieee_ets *ets, bool *changed)
+{
+ bool has_ets_tc = false;
+ u32 total_ets_bw = 0;
+ u8 i;
- for (i = 0; i < hdev->tc_max; i++) {
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_STRICT:
if (hdev->tm_info.tc_info[i].tc_sch_mode !=
@@ -132,6 +137,15 @@ static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
*changed = true;
break;
case IEEE_8021QAZ_TSA_ETS:
+ /* The hardware will switch to sp mode if bandwidth is
+ * 0, so limit ets bandwidth must be greater than 0.
+ */
+ if (!ets->tc_tx_bw[i]) {
+ dev_err(&hdev->pdev->dev,
+ "tc%u ets bw cannot be 0\n", i);
+ return -EINVAL;
+ }
+
if (hdev->tm_info.tc_info[i].tc_sch_mode !=
HCLGE_SCH_MODE_DWRR)
*changed = true;
@@ -147,7 +161,26 @@ static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
if (has_ets_tc && total_ets_bw != BW_PERCENT)
return -EINVAL;
- *tc = max_tc + 1;
+ return 0;
+}
+
+static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
+ u8 *tc, bool *changed)
+{
+ u8 tc_num;
+ int ret;
+
+ tc_num = hclge_ets_tc_changed(hdev, ets, changed);
+
+ ret = hclge_dcb_common_validate(hdev, tc_num, ets->prio_tc);
+ if (ret)
+ return ret;
+
+ ret = hclge_ets_sch_mode_validate(hdev, ets, changed);
+ if (ret)
+ return ret;
+
+ *tc = tc_num;
if (*tc != hdev->tm_info.num_tc)
*changed = true;
@@ -170,34 +203,11 @@ static int hclge_map_update(struct hclge_dev *hdev)
if (ret)
return ret;
- hclge_rss_indir_init_cfg(hdev);
+ hclge_comm_rss_indir_init_cfg(hdev->ae_dev, &hdev->rss_cfg);
return hclge_rss_init_hw(hdev);
}
-static int hclge_client_setup_tc(struct hclge_dev *hdev)
-{
- struct hclge_vport *vport = hdev->vport;
- struct hnae3_client *client;
- struct hnae3_handle *handle;
- int ret;
- u32 i;
-
- for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
- handle = &vport[i].nic;
- client = handle->client;
-
- if (!client || !client->ops || !client->ops->setup_tc)
- continue;
-
- ret = client->ops->setup_tc(handle, hdev->tm_info.num_tc);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
static int hclge_notify_down_uinit(struct hclge_dev *hdev)
{
int ret;
@@ -246,6 +256,10 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
}
hclge_tm_schd_info_update(hdev, num_tc);
+ if (num_tc > 1)
+ hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
+ else
+ hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
ret = hclge_ieee_ets_to_tm_info(hdev, ets);
if (ret)
@@ -256,13 +270,7 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
if (ret)
goto err_out;
- ret = hclge_client_setup_tc(hdev);
- if (ret)
- goto err_out;
-
- ret = hclge_notify_init_up(hdev);
- if (ret)
- return ret;
+ return hclge_notify_init_up(hdev);
}
return hclge_tm_dwrr_cfg(hdev);
@@ -278,37 +286,24 @@ err_out:
static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
{
- u64 requests[HNAE3_MAX_TC], indications[HNAE3_MAX_TC];
struct hclge_vport *vport = hclge_get_vport(h);
struct hclge_dev *hdev = vport->back;
- u8 i, j, pfc_map, *prio_tc;
int ret;
memset(pfc, 0, sizeof(*pfc));
pfc->pfc_cap = hdev->pfc_max;
- prio_tc = hdev->tm_info.prio_tc;
- pfc_map = hdev->tm_info.hw_pfc_map;
-
- /* Pfc setting is based on TC */
- for (i = 0; i < hdev->tm_info.num_tc; i++) {
- for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
- if ((prio_tc[j] == i) && (pfc_map & BIT(i)))
- pfc->pfc_en |= BIT(j);
- }
- }
+ pfc->pfc_en = hdev->tm_info.pfc_en;
- ret = hclge_pfc_tx_stats_get(hdev, requests);
- if (ret)
+ ret = hclge_mac_update_stats(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to update MAC stats, ret = %d.\n", ret);
return ret;
+ }
- ret = hclge_pfc_rx_stats_get(hdev, indications);
- if (ret)
- return ret;
+ hclge_pfc_tx_stats_get(hdev, pfc->requests);
+ hclge_pfc_rx_stats_get(hdev, pfc->indications);
- for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
- pfc->requests[i] = requests[i];
- pfc->indications[i] = indications[i];
- }
return 0;
}
@@ -320,8 +315,7 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
u8 i, j, pfc_map, *prio_tc;
int ret;
- if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
- hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
+ if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
return -EINVAL;
if (pfc->pfc_en == hdev->tm_info.pfc_en)
@@ -365,6 +359,93 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
}
+static int hclge_ieee_setapp(struct hnae3_handle *h, struct dcb_app *app)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct net_device *netdev = h->kinfo.netdev;
+ struct hclge_dev *hdev = vport->back;
+ struct dcb_app old_app;
+ int ret;
+
+ if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP ||
+ app->protocol >= HNAE3_MAX_DSCP ||
+ app->priority >= HNAE3_MAX_USER_PRIO)
+ return -EINVAL;
+
+ dev_info(&hdev->pdev->dev, "setapp dscp=%u priority=%u\n",
+ app->protocol, app->priority);
+
+ if (app->priority == h->kinfo.dscp_prio[app->protocol])
+ return 0;
+
+ ret = dcb_ieee_setapp(netdev, app);
+ if (ret)
+ return ret;
+
+ old_app.selector = IEEE_8021QAZ_APP_SEL_DSCP;
+ old_app.protocol = app->protocol;
+ old_app.priority = h->kinfo.dscp_prio[app->protocol];
+
+ h->kinfo.dscp_prio[app->protocol] = app->priority;
+ ret = hclge_dscp_to_tc_map(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to set dscp to tc map, ret = %d\n", ret);
+ h->kinfo.dscp_prio[app->protocol] = old_app.priority;
+ (void)dcb_ieee_delapp(netdev, app);
+ return ret;
+ }
+
+ vport->nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_DSCP;
+ if (old_app.priority == HNAE3_PRIO_ID_INVALID)
+ h->kinfo.dscp_app_cnt++;
+ else
+ ret = dcb_ieee_delapp(netdev, &old_app);
+
+ return ret;
+}
+
+static int hclge_ieee_delapp(struct hnae3_handle *h, struct dcb_app *app)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct net_device *netdev = h->kinfo.netdev;
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP ||
+ app->protocol >= HNAE3_MAX_DSCP ||
+ app->priority >= HNAE3_MAX_USER_PRIO ||
+ app->priority != h->kinfo.dscp_prio[app->protocol])
+ return -EINVAL;
+
+ dev_info(&hdev->pdev->dev, "delapp dscp=%u priority=%u\n",
+ app->protocol, app->priority);
+
+ ret = dcb_ieee_delapp(netdev, app);
+ if (ret)
+ return ret;
+
+ h->kinfo.dscp_prio[app->protocol] = HNAE3_PRIO_ID_INVALID;
+ ret = hclge_dscp_to_tc_map(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to del dscp to tc map, ret = %d\n", ret);
+ h->kinfo.dscp_prio[app->protocol] = app->priority;
+ (void)dcb_ieee_setapp(netdev, app);
+ return ret;
+ }
+
+ if (h->kinfo.dscp_app_cnt)
+ h->kinfo.dscp_app_cnt--;
+
+ if (!h->kinfo.dscp_app_cnt) {
+ vport->nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_PRIO;
+ ret = hclge_up_to_tc_map(hdev);
+ }
+
+ return ret;
+}
+
/* DCBX configuration */
static u8 hclge_getdcbx(struct hnae3_handle *h)
{
@@ -396,32 +477,125 @@ static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode)
return 0;
}
+static int hclge_mqprio_qopt_check(struct hclge_dev *hdev,
+ struct tc_mqprio_qopt_offload *mqprio_qopt)
+{
+ u16 queue_sum = 0;
+ int ret;
+ int i;
+
+ if (!mqprio_qopt->qopt.num_tc) {
+ mqprio_qopt->qopt.num_tc = 1;
+ return 0;
+ }
+
+ ret = hclge_dcb_common_validate(hdev, mqprio_qopt->qopt.num_tc,
+ mqprio_qopt->qopt.prio_tc_map);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < mqprio_qopt->qopt.num_tc; i++) {
+ if (!is_power_of_2(mqprio_qopt->qopt.count[i])) {
+ dev_err(&hdev->pdev->dev,
+ "qopt queue count must be power of 2\n");
+ return -EINVAL;
+ }
+
+ if (mqprio_qopt->qopt.count[i] > hdev->pf_rss_size_max) {
+ dev_err(&hdev->pdev->dev,
+ "qopt queue count should be no more than %u\n",
+ hdev->pf_rss_size_max);
+ return -EINVAL;
+ }
+
+ if (mqprio_qopt->qopt.offset[i] != queue_sum) {
+ dev_err(&hdev->pdev->dev,
+ "qopt queue offset must start from 0, and being continuous\n");
+ return -EINVAL;
+ }
+
+ if (mqprio_qopt->min_rate[i] || mqprio_qopt->max_rate[i]) {
+ dev_err(&hdev->pdev->dev,
+ "qopt tx_rate is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ queue_sum = mqprio_qopt->qopt.offset[i];
+ queue_sum += mqprio_qopt->qopt.count[i];
+ }
+ if (hdev->vport[0].alloc_tqps < queue_sum) {
+ dev_err(&hdev->pdev->dev,
+ "qopt queue count sum should be less than %u\n",
+ hdev->vport[0].alloc_tqps);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void hclge_sync_mqprio_qopt(struct hnae3_tc_info *tc_info,
+ struct tc_mqprio_qopt_offload *mqprio_qopt)
+{
+ memset(tc_info, 0, sizeof(*tc_info));
+ tc_info->num_tc = mqprio_qopt->qopt.num_tc;
+ memcpy(tc_info->prio_tc, mqprio_qopt->qopt.prio_tc_map,
+ sizeof_field(struct hnae3_tc_info, prio_tc));
+ memcpy(tc_info->tqp_count, mqprio_qopt->qopt.count,
+ sizeof_field(struct hnae3_tc_info, tqp_count));
+ memcpy(tc_info->tqp_offset, mqprio_qopt->qopt.offset,
+ sizeof_field(struct hnae3_tc_info, tqp_offset));
+}
+
+static int hclge_config_tc(struct hclge_dev *hdev,
+ struct hnae3_tc_info *tc_info)
+{
+ int i;
+
+ hclge_tm_schd_info_update(hdev, tc_info->num_tc);
+ for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
+ hdev->tm_info.prio_tc[i] = tc_info->prio_tc[i];
+
+ return hclge_map_update(hdev);
+}
+
/* Set up TC for hardware offloaded mqprio in channel mode */
-static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc)
+static int hclge_setup_tc(struct hnae3_handle *h,
+ struct tc_mqprio_qopt_offload *mqprio_qopt)
{
struct hclge_vport *vport = hclge_get_vport(h);
+ struct hnae3_knic_private_info *kinfo;
struct hclge_dev *hdev = vport->back;
+ struct hnae3_tc_info old_tc_info;
+ u8 tc = mqprio_qopt->qopt.num_tc;
int ret;
+ /* if client unregistered, it's not allowed to change
+ * mqprio configuration, which may cause uninit ring
+ * fail.
+ */
+ if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
+ return -EBUSY;
+
if (hdev->flag & HCLGE_FLAG_DCB_ENABLE)
return -EINVAL;
- ret = hclge_dcb_common_validate(hdev, tc, prio_tc);
- if (ret)
- return -EINVAL;
+ ret = hclge_mqprio_qopt_check(hdev, mqprio_qopt);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to check mqprio qopt params, ret = %d\n", ret);
+ return ret;
+ }
ret = hclge_notify_down_uinit(hdev);
if (ret)
return ret;
- hclge_tm_schd_info_update(hdev, tc);
- hclge_tm_prio_tc_info_update(hdev, prio_tc);
-
- ret = hclge_tm_init_hw(hdev, false);
- if (ret)
- goto err_out;
+ kinfo = &vport->nic.kinfo;
+ memcpy(&old_tc_info, &kinfo->tc_info, sizeof(old_tc_info));
+ hclge_sync_mqprio_qopt(&kinfo->tc_info, mqprio_qopt);
+ kinfo->tc_info.mqprio_active = tc > 0;
- ret = hclge_client_setup_tc(hdev);
+ ret = hclge_config_tc(hdev, &kinfo->tc_info);
if (ret)
goto err_out;
@@ -435,6 +609,17 @@ static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc)
return hclge_notify_init_up(hdev);
err_out:
+ if (!tc) {
+ dev_warn(&hdev->pdev->dev,
+ "failed to destroy mqprio, will active after reset, ret = %d\n",
+ ret);
+ } else {
+ /* roll-back */
+ memcpy(&kinfo->tc_info, &old_tc_info, sizeof(old_tc_info));
+ if (hclge_config_tc(hdev, &kinfo->tc_info))
+ dev_err(&hdev->pdev->dev,
+ "failed to roll back tc configuration\n");
+ }
hclge_notify_init_up(hdev);
return ret;
@@ -445,6 +630,8 @@ static const struct hnae3_dcb_ops hns3_dcb_ops = {
.ieee_setets = hclge_ieee_setets,
.ieee_getpfc = hclge_ieee_getpfc,
.ieee_setpfc = hclge_ieee_setpfc,
+ .ieee_setapp = hclge_ieee_setapp,
+ .ieee_delapp = hclge_ieee_delapp,
.getdcbx = hclge_getdcbx,
.setdcbx = hclge_setdcbx,
.setup_tc = hclge_setup_tc,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 67fad80035d3..142415c84c6b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -4,74 +4,116 @@
#include <linux/device.h>
#include "hclge_debugfs.h"
+#include "hclge_err.h"
#include "hclge_main.h"
#include "hclge_tm.h"
#include "hnae3.h"
-static struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
- { .reg_type = "bios common",
+static const char * const state_str[] = { "off", "on" };
+static const char * const hclge_mac_state_str[] = {
+ "TO_ADD", "TO_DEL", "ACTIVE"
+};
+
+static const char * const tc_map_mode_str[] = { "PRIO", "DSCP" };
+
+static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
+ { .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
.dfx_msg = &hclge_dbg_bios_common_reg[0],
.reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_bios_common_reg),
.offset = HCLGE_DBG_DFX_BIOS_OFFSET,
.cmd = HCLGE_OPC_DFX_BIOS_COMMON_REG } },
- { .reg_type = "ssu",
+ { .cmd = HNAE3_DBG_CMD_REG_SSU,
.dfx_msg = &hclge_dbg_ssu_reg_0[0],
.reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_0),
.offset = HCLGE_DBG_DFX_SSU_0_OFFSET,
.cmd = HCLGE_OPC_DFX_SSU_REG_0 } },
- { .reg_type = "ssu",
+ { .cmd = HNAE3_DBG_CMD_REG_SSU,
.dfx_msg = &hclge_dbg_ssu_reg_1[0],
.reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_1),
.offset = HCLGE_DBG_DFX_SSU_1_OFFSET,
.cmd = HCLGE_OPC_DFX_SSU_REG_1 } },
- { .reg_type = "ssu",
+ { .cmd = HNAE3_DBG_CMD_REG_SSU,
.dfx_msg = &hclge_dbg_ssu_reg_2[0],
.reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_2),
.offset = HCLGE_DBG_DFX_SSU_2_OFFSET,
.cmd = HCLGE_OPC_DFX_SSU_REG_2 } },
- { .reg_type = "igu egu",
+ { .cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
.dfx_msg = &hclge_dbg_igu_egu_reg[0],
.reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_igu_egu_reg),
.offset = HCLGE_DBG_DFX_IGU_OFFSET,
.cmd = HCLGE_OPC_DFX_IGU_EGU_REG } },
- { .reg_type = "rpu",
+ { .cmd = HNAE3_DBG_CMD_REG_RPU,
.dfx_msg = &hclge_dbg_rpu_reg_0[0],
.reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_0),
.offset = HCLGE_DBG_DFX_RPU_0_OFFSET,
.cmd = HCLGE_OPC_DFX_RPU_REG_0 } },
- { .reg_type = "rpu",
+ { .cmd = HNAE3_DBG_CMD_REG_RPU,
.dfx_msg = &hclge_dbg_rpu_reg_1[0],
.reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_1),
.offset = HCLGE_DBG_DFX_RPU_1_OFFSET,
.cmd = HCLGE_OPC_DFX_RPU_REG_1 } },
- { .reg_type = "ncsi",
+ { .cmd = HNAE3_DBG_CMD_REG_NCSI,
.dfx_msg = &hclge_dbg_ncsi_reg[0],
.reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ncsi_reg),
.offset = HCLGE_DBG_DFX_NCSI_OFFSET,
.cmd = HCLGE_OPC_DFX_NCSI_REG } },
- { .reg_type = "rtc",
+ { .cmd = HNAE3_DBG_CMD_REG_RTC,
.dfx_msg = &hclge_dbg_rtc_reg[0],
.reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rtc_reg),
.offset = HCLGE_DBG_DFX_RTC_OFFSET,
.cmd = HCLGE_OPC_DFX_RTC_REG } },
- { .reg_type = "ppp",
+ { .cmd = HNAE3_DBG_CMD_REG_PPP,
.dfx_msg = &hclge_dbg_ppp_reg[0],
.reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ppp_reg),
.offset = HCLGE_DBG_DFX_PPP_OFFSET,
.cmd = HCLGE_OPC_DFX_PPP_REG } },
- { .reg_type = "rcb",
+ { .cmd = HNAE3_DBG_CMD_REG_RCB,
.dfx_msg = &hclge_dbg_rcb_reg[0],
.reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rcb_reg),
.offset = HCLGE_DBG_DFX_RCB_OFFSET,
.cmd = HCLGE_OPC_DFX_RCB_REG } },
- { .reg_type = "tqp",
+ { .cmd = HNAE3_DBG_CMD_REG_TQP,
.dfx_msg = &hclge_dbg_tqp_reg[0],
.reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_tqp_reg),
.offset = HCLGE_DBG_DFX_TQP_OFFSET,
.cmd = HCLGE_OPC_DFX_TQP_REG } },
};
-static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset)
+/* make sure: len(name) + interval >= maxlen(item data) + 2,
+ * for example, name = "pkt_num"(len: 7), the prototype of item data is u32,
+ * and print as "%u"(maxlen: 10), so the interval should be at least 5.
+ */
+static void hclge_dbg_fill_content(char *content, u16 len,
+ const struct hclge_dbg_item *items,
+ const char **result, u16 size)
+{
+ char *pos = content;
+ u16 i;
+
+ memset(content, ' ', len);
+ for (i = 0; i < size; i++) {
+ if (result)
+ strncpy(pos, result[i], strlen(result[i]));
+ else
+ strncpy(pos, items[i].name, strlen(items[i].name));
+ pos += strlen(items[i].name) + items[i].interval;
+ }
+ *pos++ = '\n';
+ *pos++ = '\0';
+}
+
+static char *hclge_dbg_get_func_id_str(char *buf, u8 id)
+{
+ if (id)
+ sprintf(buf, "vf%u", id - 1U);
+ else
+ sprintf(buf, "pf");
+
+ return buf;
+}
+
+static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset,
+ u32 *bd_num)
{
struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
int entries_per_desc;
@@ -81,13 +123,21 @@ static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset)
ret = hclge_query_bd_num_cmd_send(hdev, desc);
if (ret) {
dev_err(&hdev->pdev->dev,
- "get dfx bdnum fail, ret = %d\n", ret);
+ "failed to get dfx bd_num, offset = %d, ret = %d\n",
+ offset, ret);
return ret;
}
entries_per_desc = ARRAY_SIZE(desc[0].data);
index = offset % entries_per_desc;
- return (int)desc[offset / entries_per_desc].data[index];
+
+ *bd_num = le32_to_cpu(desc[offset / entries_per_desc].data[index]);
+ if (!(*bd_num)) {
+ dev_err(&hdev->pdev->dev, "The value of dfx bd_num is 0!\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
@@ -102,7 +152,7 @@ static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
desc->data[0] = cpu_to_le32(index);
for (i = 1; i < bd_num; i++) {
- desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
desc++;
hclge_cmd_setup_basic_desc(desc, cmd, true);
}
@@ -114,728 +164,1283 @@ static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
return ret;
}
-static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
- struct hclge_dbg_reg_type_info *reg_info,
- const char *cmd_buf)
+static int
+hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev,
+ const struct hclge_dbg_reg_type_info *reg_info,
+ char *buf, int len, int *pos)
{
-#define IDX_OFFSET 1
-
- const char *s = &cmd_buf[strlen(reg_info->reg_type) + IDX_OFFSET];
- struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
- struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
+ const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
+ const struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
struct hclge_desc *desc_src;
+ u32 index, entry, i, cnt;
+ int bd_num, min_num, ret;
struct hclge_desc *desc;
- int entries_per_desc;
- int bd_num, buf_len;
- int index = 0;
- int min_num;
- int ret, i;
- if (*s) {
- ret = kstrtouint(s, 0, &index);
- index = (ret != 0) ? 0 : index;
- }
+ ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
+ if (ret)
+ return ret;
- bd_num = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset);
- if (bd_num <= 0) {
- dev_err(&hdev->pdev->dev, "get cmd(%d) bd num(%d) failed\n",
- reg_msg->offset, bd_num);
- return;
- }
+ desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ if (!desc_src)
+ return -ENOMEM;
+
+ min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num);
+
+ for (i = 0, cnt = 0; i < min_num; i++, dfx_message++)
+ *pos += scnprintf(buf + *pos, len - *pos, "item%u = %s\n",
+ cnt++, dfx_message->message);
+
+ for (i = 0; i < cnt; i++)
+ *pos += scnprintf(buf + *pos, len - *pos, "item%u\t", i);
+
+ *pos += scnprintf(buf + *pos, len - *pos, "\n");
+
+ for (index = 0; index < hdev->vport[0].alloc_tqps; index++) {
+ dfx_message = reg_info->dfx_msg;
+ desc = desc_src;
+ ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num,
+ reg_msg->cmd);
+ if (ret)
+ break;
+
+ for (i = 0; i < min_num; i++, dfx_message++) {
+ entry = i % HCLGE_DESC_DATA_LEN;
+ if (i > 0 && !entry)
+ desc++;
- buf_len = sizeof(struct hclge_desc) * bd_num;
- desc_src = kzalloc(buf_len, GFP_KERNEL);
- if (!desc_src) {
- dev_err(&hdev->pdev->dev, "call kzalloc failed\n");
- return;
+ *pos += scnprintf(buf + *pos, len - *pos, "%#x\t",
+ le32_to_cpu(desc->data[entry]));
+ }
+ *pos += scnprintf(buf + *pos, len - *pos, "\n");
}
+ kfree(desc_src);
+ return ret;
+}
+
+static int
+hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
+ const struct hclge_dbg_reg_type_info *reg_info,
+ char *buf, int len, int *pos)
+{
+ const struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
+ const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
+ struct hclge_desc *desc_src;
+ int bd_num, min_num, ret;
+ struct hclge_desc *desc;
+ u32 entry, i;
+
+ ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
+ if (ret)
+ return ret;
+
+ desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ if (!desc_src)
+ return -ENOMEM;
+
desc = desc_src;
- ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, reg_msg->cmd);
+
+ ret = hclge_dbg_cmd_send(hdev, desc, 0, bd_num, reg_msg->cmd);
if (ret) {
- kfree(desc_src);
- return;
+ kfree(desc);
+ return ret;
}
- entries_per_desc = ARRAY_SIZE(desc->data);
- min_num = min_t(int, bd_num * entries_per_desc, reg_msg->msg_num);
+ min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num);
- desc = desc_src;
- for (i = 0; i < min_num; i++) {
- if (i > 0 && (i % entries_per_desc) == 0)
+ for (i = 0; i < min_num; i++, dfx_message++) {
+ entry = i % HCLGE_DESC_DATA_LEN;
+ if (i > 0 && !entry)
desc++;
- if (dfx_message->flag)
- dev_info(&hdev->pdev->dev, "%s: 0x%x\n",
- dfx_message->message,
- le32_to_cpu(desc->data[i % entries_per_desc]));
+ if (!dfx_message->flag)
+ continue;
- dfx_message++;
+ *pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
+ dfx_message->message,
+ le32_to_cpu(desc->data[entry]));
}
kfree(desc_src);
+ return 0;
}
-static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
+static const struct hclge_dbg_status_dfx_info hclge_dbg_mac_en_status[] = {
+ {HCLGE_MAC_TX_EN_B, "mac_trans_en"},
+ {HCLGE_MAC_RX_EN_B, "mac_rcv_en"},
+ {HCLGE_MAC_PAD_TX_B, "pad_trans_en"},
+ {HCLGE_MAC_PAD_RX_B, "pad_rcv_en"},
+ {HCLGE_MAC_1588_TX_B, "1588_trans_en"},
+ {HCLGE_MAC_1588_RX_B, "1588_rcv_en"},
+ {HCLGE_MAC_APP_LP_B, "mac_app_loop_en"},
+ {HCLGE_MAC_LINE_LP_B, "mac_line_loop_en"},
+ {HCLGE_MAC_FCS_TX_B, "mac_fcs_tx_en"},
+ {HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, "mac_rx_oversize_truncate_en"},
+ {HCLGE_MAC_RX_FCS_STRIP_B, "mac_rx_fcs_strip_en"},
+ {HCLGE_MAC_RX_FCS_B, "mac_rx_fcs_en"},
+ {HCLGE_MAC_TX_UNDER_MIN_ERR_B, "mac_tx_under_min_err_en"},
+ {HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, "mac_tx_oversize_truncate_en"}
+};
+
+static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf,
+ int len, int *pos)
{
- struct device *dev = &hdev->pdev->dev;
- struct hclge_dbg_bitmap_cmd *bitmap;
- int rq_id, pri_id, qset_id;
- int port_id, nq_id, pg_id;
- struct hclge_desc desc[2];
+ struct hclge_config_mac_mode_cmd *req;
+ struct hclge_desc desc;
+ u32 loop_en, i, offset;
+ int ret;
- int cnt, ret;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
- cnt = sscanf(cmd_buf, "%i %i %i %i %i %i",
- &port_id, &pri_id, &pg_id, &rq_id, &nq_id, &qset_id);
- if (cnt != 6) {
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
dev_err(&hdev->pdev->dev,
- "dump dcb: bad command parameter, cnt=%d\n", cnt);
- return;
+ "failed to dump mac enable status, ret = %d\n", ret);
+ return ret;
}
- ret = hclge_dbg_cmd_send(hdev, desc, qset_id, 1,
- HCLGE_OPC_QSET_DFX_STS);
- if (ret)
- return;
+ req = (struct hclge_config_mac_mode_cmd *)desc.data;
+ loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
- dev_info(dev, "roce_qset_mask: 0x%x\n", bitmap->bit0);
- dev_info(dev, "nic_qs_mask: 0x%x\n", bitmap->bit1);
- dev_info(dev, "qs_shaping_pass: 0x%x\n", bitmap->bit2);
- dev_info(dev, "qs_bp_sts: 0x%x\n", bitmap->bit3);
+ for (i = 0; i < ARRAY_SIZE(hclge_dbg_mac_en_status); i++) {
+ offset = hclge_dbg_mac_en_status[i].offset;
+ *pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
+ hclge_dbg_mac_en_status[i].message,
+ hnae3_get_bit(loop_en, offset));
+ }
- ret = hclge_dbg_cmd_send(hdev, desc, pri_id, 1, HCLGE_OPC_PRI_DFX_STS);
- if (ret)
- return;
+ return 0;
+}
+
+static int hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev, char *buf,
+ int len, int *pos)
+{
+ struct hclge_config_max_frm_size_cmd *req;
+ struct hclge_desc desc;
+ int ret;
- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
- dev_info(dev, "pri_mask: 0x%x\n", bitmap->bit0);
- dev_info(dev, "pri_cshaping_pass: 0x%x\n", bitmap->bit1);
- dev_info(dev, "pri_pshaping_pass: 0x%x\n", bitmap->bit2);
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, true);
- ret = hclge_dbg_cmd_send(hdev, desc, pg_id, 1, HCLGE_OPC_PG_DFX_STS);
- if (ret)
- return;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump mac frame size, ret = %d\n", ret);
+ return ret;
+ }
- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
- dev_info(dev, "pg_mask: 0x%x\n", bitmap->bit0);
- dev_info(dev, "pg_cshaping_pass: 0x%x\n", bitmap->bit1);
- dev_info(dev, "pg_pshaping_pass: 0x%x\n", bitmap->bit2);
+ req = (struct hclge_config_max_frm_size_cmd *)desc.data;
- ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
- HCLGE_OPC_PORT_DFX_STS);
+ *pos += scnprintf(buf + *pos, len - *pos, "max_frame_size: %u\n",
+ le16_to_cpu(req->max_frm_size));
+ *pos += scnprintf(buf + *pos, len - *pos, "min_frame_size: %u\n",
+ req->min_frm_size);
+
+ return 0;
+}
+
+static int hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev, char *buf,
+ int len, int *pos)
+{
+#define HCLGE_MAC_SPEED_SHIFT 0
+#define HCLGE_MAC_SPEED_MASK GENMASK(5, 0)
+#define HCLGE_MAC_DUPLEX_SHIFT 7
+
+ struct hclge_config_mac_speed_dup_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, true);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump mac speed duplex, ret = %d\n", ret);
+ return ret;
+ }
+
+ req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
+
+ *pos += scnprintf(buf + *pos, len - *pos, "speed: %#lx\n",
+ hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK,
+ HCLGE_MAC_SPEED_SHIFT));
+ *pos += scnprintf(buf + *pos, len - *pos, "duplex: %#x\n",
+ hnae3_get_bit(req->speed_dup,
+ HCLGE_MAC_DUPLEX_SHIFT));
+ return 0;
+}
+
+static int hclge_dbg_dump_mac(struct hclge_dev *hdev, char *buf, int len)
+{
+ int pos = 0;
+ int ret;
+
+ ret = hclge_dbg_dump_mac_enable_status(hdev, buf, len, &pos);
if (ret)
- return;
+ return ret;
+
+ ret = hclge_dbg_dump_mac_frame_size(hdev, buf, len, &pos);
+ if (ret)
+ return ret;
- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
- dev_info(dev, "port_mask: 0x%x\n", bitmap->bit0);
- dev_info(dev, "port_shaping_pass: 0x%x\n", bitmap->bit1);
+ return hclge_dbg_dump_mac_speed_duplex(hdev, buf, len, &pos);
+}
- ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, HCLGE_OPC_SCH_NQ_CNT);
+static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
+ int *pos)
+{
+ struct hclge_dbg_bitmap_cmd req;
+ struct hclge_desc desc;
+ u16 qset_id, qset_num;
+ int ret;
+
+ ret = hclge_tm_get_qset_num(hdev, &qset_num);
if (ret)
- return;
+ return ret;
- dev_info(dev, "sch_nq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1]));
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "qset_id roce_qset_mask nic_qset_mask qset_shaping_pass qset_bp_status\n");
+ for (qset_id = 0; qset_id < qset_num; qset_id++) {
+ ret = hclge_dbg_cmd_send(hdev, &desc, qset_id, 1,
+ HCLGE_OPC_QSET_DFX_STS);
+ if (ret)
+ return ret;
- ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, HCLGE_OPC_SCH_RQ_CNT);
+ req.bitmap = (u8)le32_to_cpu(desc.data[1]);
+
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "%04u %#x %#x %#x %#x\n",
+ qset_id, req.bit0, req.bit1, req.bit2,
+ req.bit3);
+ }
+
+ return 0;
+}
+
+static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
+ int *pos)
+{
+ struct hclge_dbg_bitmap_cmd req;
+ struct hclge_desc desc;
+ u8 pri_id, pri_num;
+ int ret;
+
+ ret = hclge_tm_get_pri_num(hdev, &pri_num);
if (ret)
- return;
+ return ret;
- dev_info(dev, "sch_rq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1]));
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "pri_id pri_mask pri_cshaping_pass pri_pshaping_pass\n");
+ for (pri_id = 0; pri_id < pri_num; pri_id++) {
+ ret = hclge_dbg_cmd_send(hdev, &desc, pri_id, 1,
+ HCLGE_OPC_PRI_DFX_STS);
+ if (ret)
+ return ret;
+
+ req.bitmap = (u8)le32_to_cpu(desc.data[1]);
+
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "%03u %#x %#x %#x\n",
+ pri_id, req.bit0, req.bit1, req.bit2);
+ }
+
+ return 0;
+}
+
+static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
+ int *pos)
+{
+ struct hclge_dbg_bitmap_cmd req;
+ struct hclge_desc desc;
+ u8 pg_id;
+ int ret;
- ret = hclge_dbg_cmd_send(hdev, desc, 0, 2, HCLGE_OPC_TM_INTERNAL_STS);
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "pg_id pg_mask pg_cshaping_pass pg_pshaping_pass\n");
+ for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
+ ret = hclge_dbg_cmd_send(hdev, &desc, pg_id, 1,
+ HCLGE_OPC_PG_DFX_STS);
+ if (ret)
+ return ret;
+
+ req.bitmap = (u8)le32_to_cpu(desc.data[1]);
+
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "%03u %#x %#x %#x\n",
+ pg_id, req.bit0, req.bit1, req.bit2);
+ }
+
+ return 0;
+}
+
+static int hclge_dbg_dump_dcb_queue(struct hclge_dev *hdev, char *buf, int len,
+ int *pos)
+{
+ struct hclge_desc desc;
+ u16 nq_id;
+ int ret;
+
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "nq_id sch_nic_queue_cnt sch_roce_queue_cnt\n");
+ for (nq_id = 0; nq_id < hdev->num_tqps; nq_id++) {
+ ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
+ HCLGE_OPC_SCH_NQ_CNT);
+ if (ret)
+ return ret;
+
+ *pos += scnprintf(buf + *pos, len - *pos, "%04u %#x",
+ nq_id, le32_to_cpu(desc.data[1]));
+
+ ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
+ HCLGE_OPC_SCH_RQ_CNT);
+ if (ret)
+ return ret;
+
+ *pos += scnprintf(buf + *pos, len - *pos,
+ " %#x\n",
+ le32_to_cpu(desc.data[1]));
+ }
+
+ return 0;
+}
+
+static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
+ int *pos)
+{
+ struct hclge_dbg_bitmap_cmd req;
+ struct hclge_desc desc;
+ u8 port_id = 0;
+ int ret;
+
+ ret = hclge_dbg_cmd_send(hdev, &desc, port_id, 1,
+ HCLGE_OPC_PORT_DFX_STS);
if (ret)
- return;
-
- dev_info(dev, "pri_bp: 0x%x\n", le32_to_cpu(desc[0].data[1]));
- dev_info(dev, "fifo_dfx_info: 0x%x\n", le32_to_cpu(desc[0].data[2]));
- dev_info(dev, "sch_roce_fifo_afull_gap: 0x%x\n",
- le32_to_cpu(desc[0].data[3]));
- dev_info(dev, "tx_private_waterline: 0x%x\n",
- le32_to_cpu(desc[0].data[4]));
- dev_info(dev, "tm_bypass_en: 0x%x\n", le32_to_cpu(desc[0].data[5]));
- dev_info(dev, "SSU_TM_BYPASS_EN: 0x%x\n", le32_to_cpu(desc[1].data[0]));
- dev_info(dev, "SSU_RESERVE_CFG: 0x%x\n", le32_to_cpu(desc[1].data[1]));
+ return ret;
+
+ req.bitmap = (u8)le32_to_cpu(desc.data[1]);
+
+ *pos += scnprintf(buf + *pos, len - *pos, "port_mask: %#x\n",
+ req.bit0);
+ *pos += scnprintf(buf + *pos, len - *pos, "port_shaping_pass: %#x\n",
+ req.bit1);
+
+ return 0;
+}
+
+static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, char *buf, int len,
+ int *pos)
+{
+ struct hclge_desc desc[2];
+ u8 port_id = 0;
+ int ret;
ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
HCLGE_OPC_TM_INTERNAL_CNT);
if (ret)
- return;
+ return ret;
- dev_info(dev, "SCH_NIC_NUM: 0x%x\n", le32_to_cpu(desc[0].data[1]));
- dev_info(dev, "SCH_ROCE_NUM: 0x%x\n", le32_to_cpu(desc[0].data[2]));
+ *pos += scnprintf(buf + *pos, len - *pos, "SCH_NIC_NUM: %#x\n",
+ le32_to_cpu(desc[0].data[1]));
+ *pos += scnprintf(buf + *pos, len - *pos, "SCH_ROCE_NUM: %#x\n",
+ le32_to_cpu(desc[0].data[2]));
+
+ ret = hclge_dbg_cmd_send(hdev, desc, port_id, 2,
+ HCLGE_OPC_TM_INTERNAL_STS);
+ if (ret)
+ return ret;
+
+ *pos += scnprintf(buf + *pos, len - *pos, "pri_bp: %#x\n",
+ le32_to_cpu(desc[0].data[1]));
+ *pos += scnprintf(buf + *pos, len - *pos, "fifo_dfx_info: %#x\n",
+ le32_to_cpu(desc[0].data[2]));
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "sch_roce_fifo_afull_gap: %#x\n",
+ le32_to_cpu(desc[0].data[3]));
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "tx_private_waterline: %#x\n",
+ le32_to_cpu(desc[0].data[4]));
+ *pos += scnprintf(buf + *pos, len - *pos, "tm_bypass_en: %#x\n",
+ le32_to_cpu(desc[0].data[5]));
+ *pos += scnprintf(buf + *pos, len - *pos, "SSU_TM_BYPASS_EN: %#x\n",
+ le32_to_cpu(desc[1].data[0]));
+ *pos += scnprintf(buf + *pos, len - *pos, "SSU_RESERVE_CFG: %#x\n",
+ le32_to_cpu(desc[1].data[1]));
+
+ if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER)
+ return 0;
ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
HCLGE_OPC_TM_INTERNAL_STS_1);
if (ret)
- return;
+ return ret;
+
+ *pos += scnprintf(buf + *pos, len - *pos, "TC_MAP_SEL: %#x\n",
+ le32_to_cpu(desc[0].data[1]));
+ *pos += scnprintf(buf + *pos, len - *pos, "IGU_PFC_PRI_EN: %#x\n",
+ le32_to_cpu(desc[0].data[2]));
+ *pos += scnprintf(buf + *pos, len - *pos, "MAC_PFC_PRI_EN: %#x\n",
+ le32_to_cpu(desc[0].data[3]));
+ *pos += scnprintf(buf + *pos, len - *pos, "IGU_PRI_MAP_TC_CFG: %#x\n",
+ le32_to_cpu(desc[0].data[4]));
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "IGU_TX_PRI_MAP_TC_CFG: %#x\n",
+ le32_to_cpu(desc[0].data[5]));
+
+ return 0;
+}
+
+static int hclge_dbg_dump_dcb(struct hclge_dev *hdev, char *buf, int len)
+{
+ int pos = 0;
+ int ret;
+
+ ret = hclge_dbg_dump_dcb_qset(hdev, buf, len, &pos);
+ if (ret)
+ return ret;
+
+ ret = hclge_dbg_dump_dcb_pri(hdev, buf, len, &pos);
+ if (ret)
+ return ret;
- dev_info(dev, "TC_MAP_SEL: 0x%x\n", le32_to_cpu(desc[0].data[1]));
- dev_info(dev, "IGU_PFC_PRI_EN: 0x%x\n", le32_to_cpu(desc[0].data[2]));
- dev_info(dev, "MAC_PFC_PRI_EN: 0x%x\n", le32_to_cpu(desc[0].data[3]));
- dev_info(dev, "IGU_PRI_MAP_TC_CFG: 0x%x\n",
- le32_to_cpu(desc[0].data[4]));
- dev_info(dev, "IGU_TX_PRI_MAP_TC_CFG: 0x%x\n",
- le32_to_cpu(desc[0].data[5]));
+ ret = hclge_dbg_dump_dcb_pg(hdev, buf, len, &pos);
+ if (ret)
+ return ret;
+
+ ret = hclge_dbg_dump_dcb_queue(hdev, buf, len, &pos);
+ if (ret)
+ return ret;
+
+ ret = hclge_dbg_dump_dcb_port(hdev, buf, len, &pos);
+ if (ret)
+ return ret;
+
+ return hclge_dbg_dump_dcb_tm(hdev, buf, len, &pos);
}
-static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf)
+static int hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev,
+ enum hnae3_dbg_cmd cmd, char *buf, int len)
{
- struct hclge_dbg_reg_type_info *reg_info;
- bool has_dump = false;
+ const struct hclge_dbg_reg_type_info *reg_info;
+ int pos = 0, ret = 0;
int i;
for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) {
reg_info = &hclge_dbg_reg_info[i];
- if (!strncmp(cmd_buf, reg_info->reg_type,
- strlen(reg_info->reg_type))) {
- hclge_dbg_dump_reg_common(hdev, reg_info, cmd_buf);
- has_dump = true;
+ if (cmd == reg_info->cmd) {
+ if (cmd == HNAE3_DBG_CMD_REG_TQP)
+ return hclge_dbg_dump_reg_tqp(hdev, reg_info,
+ buf, len, &pos);
+
+ ret = hclge_dbg_dump_reg_common(hdev, reg_info, buf,
+ len, &pos);
+ if (ret)
+ break;
}
}
- if (strncmp(cmd_buf, "dcb", 3) == 0) {
- hclge_dbg_dump_dcb(hdev, &cmd_buf[sizeof("dcb")]);
- has_dump = true;
- }
-
- if (!has_dump) {
- dev_info(&hdev->pdev->dev, "unknown command\n");
- return;
- }
-}
-
-static void hclge_title_idx_print(struct hclge_dev *hdev, bool flag, int index,
- char *title_buf, char *true_buf,
- char *false_buf)
-{
- if (flag)
- dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index,
- true_buf);
- else
- dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index,
- false_buf);
+ return ret;
}
-static void hclge_dbg_dump_tc(struct hclge_dev *hdev)
+static int hclge_dbg_dump_tc(struct hclge_dev *hdev, char *buf, int len)
{
struct hclge_ets_tc_weight_cmd *ets_weight;
struct hclge_desc desc;
- int i, ret;
+ char *sch_mode_str;
+ int pos = 0;
+ int ret;
+ u8 i;
if (!hnae3_dev_dcb_supported(hdev)) {
- dev_info(&hdev->pdev->dev,
- "Only DCB-supported dev supports tc\n");
- return;
+ dev_err(&hdev->pdev->dev,
+ "Only DCB-supported dev supports tc\n");
+ return -EOPNOTSUPP;
}
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true);
-
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
- dev_err(&hdev->pdev->dev, "dump tc fail, ret = %d\n", ret);
- return;
+ dev_err(&hdev->pdev->dev, "failed to get tc weight, ret = %d\n",
+ ret);
+ return ret;
}
ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "dump tc\n");
- dev_info(&hdev->pdev->dev, "weight_offset: %u\n",
- ets_weight->weight_offset);
+ pos += scnprintf(buf + pos, len - pos, "enabled tc number: %u\n",
+ hdev->tm_info.num_tc);
+ pos += scnprintf(buf + pos, len - pos, "weight_offset: %u\n",
+ ets_weight->weight_offset);
+
+ pos += scnprintf(buf + pos, len - pos, "TC MODE WEIGHT\n");
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ sch_mode_str = ets_weight->tc_weight[i] ? "dwrr" : "sp";
+ pos += scnprintf(buf + pos, len - pos, "%u %4s %3u\n",
+ i, sch_mode_str,
+ hdev->tm_info.pg_info[0].tc_dwrr[i]);
+ }
- for (i = 0; i < HNAE3_MAX_TC; i++)
- hclge_title_idx_print(hdev, ets_weight->tc_weight[i], i,
- "tc", "no sp mode", "sp mode");
+ return 0;
}
-static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
+static const struct hclge_dbg_item tm_pg_items[] = {
+ { "ID", 2 },
+ { "PRI_MAP", 2 },
+ { "MODE", 2 },
+ { "DWRR", 2 },
+ { "C_IR_B", 2 },
+ { "C_IR_U", 2 },
+ { "C_IR_S", 2 },
+ { "C_BS_B", 2 },
+ { "C_BS_S", 2 },
+ { "C_FLAG", 2 },
+ { "C_RATE(Mbps)", 2 },
+ { "P_IR_B", 2 },
+ { "P_IR_U", 2 },
+ { "P_IR_S", 2 },
+ { "P_BS_B", 2 },
+ { "P_BS_S", 2 },
+ { "P_FLAG", 2 },
+ { "P_RATE(Mbps)", 0 }
+};
+
+static void hclge_dbg_fill_shaper_content(struct hclge_tm_shaper_para *para,
+ char **result, u8 *index)
{
- struct hclge_port_shapping_cmd *port_shap_cfg_cmd;
- struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
- struct hclge_pg_shapping_cmd *pg_shap_cfg_cmd;
- enum hclge_opcode_type cmd;
- struct hclge_desc desc;
+ sprintf(result[(*index)++], "%3u", para->ir_b);
+ sprintf(result[(*index)++], "%3u", para->ir_u);
+ sprintf(result[(*index)++], "%3u", para->ir_s);
+ sprintf(result[(*index)++], "%3u", para->bs_b);
+ sprintf(result[(*index)++], "%3u", para->bs_s);
+ sprintf(result[(*index)++], "%3u", para->flag);
+ sprintf(result[(*index)++], "%6u", para->rate);
+}
+
+static int __hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *data_str,
+ char *buf, int len)
+{
+ struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
+ char *result[ARRAY_SIZE(tm_pg_items)], *sch_mode_str;
+ u8 pg_id, sch_mode, weight, pri_bit_map, i, j;
+ char content[HCLGE_DBG_TM_INFO_LEN];
+ int pos = 0;
int ret;
- cmd = HCLGE_OPC_TM_PG_C_SHAPPING;
- hclge_cmd_setup_basic_desc(&desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- goto err_tm_pg_cmd_send;
+ for (i = 0; i < ARRAY_SIZE(tm_pg_items); i++) {
+ result[i] = data_str;
+ data_str += HCLGE_DBG_DATA_STR_LEN;
+ }
- pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "PG_C pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
- dev_info(&hdev->pdev->dev, "PG_C pg_shapping: 0x%x\n",
- le32_to_cpu(pg_shap_cfg_cmd->pg_shapping_para));
+ hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
+ NULL, ARRAY_SIZE(tm_pg_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
- cmd = HCLGE_OPC_TM_PG_P_SHAPPING;
- hclge_cmd_setup_basic_desc(&desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- goto err_tm_pg_cmd_send;
+ for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
+ ret = hclge_tm_get_pg_to_pri_map(hdev, pg_id, &pri_bit_map);
+ if (ret)
+ return ret;
- pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "PG_P pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
- dev_info(&hdev->pdev->dev, "PG_P pg_shapping: 0x%x\n",
- le32_to_cpu(pg_shap_cfg_cmd->pg_shapping_para));
+ ret = hclge_tm_get_pg_sch_mode(hdev, pg_id, &sch_mode);
+ if (ret)
+ return ret;
- cmd = HCLGE_OPC_TM_PORT_SHAPPING;
- hclge_cmd_setup_basic_desc(&desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- goto err_tm_pg_cmd_send;
+ ret = hclge_tm_get_pg_weight(hdev, pg_id, &weight);
+ if (ret)
+ return ret;
- port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "PORT port_shapping: 0x%x\n",
- le32_to_cpu(port_shap_cfg_cmd->port_shapping_para));
+ ret = hclge_tm_get_pg_shaper(hdev, pg_id,
+ HCLGE_OPC_TM_PG_C_SHAPPING,
+ &c_shaper_para);
+ if (ret)
+ return ret;
- cmd = HCLGE_OPC_TM_PG_SCH_MODE_CFG;
- hclge_cmd_setup_basic_desc(&desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- goto err_tm_pg_cmd_send;
+ ret = hclge_tm_get_pg_shaper(hdev, pg_id,
+ HCLGE_OPC_TM_PG_P_SHAPPING,
+ &p_shaper_para);
+ if (ret)
+ return ret;
+
+ sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
+ "sp";
+
+ j = 0;
+ sprintf(result[j++], "%02u", pg_id);
+ sprintf(result[j++], "0x%02x", pri_bit_map);
+ sprintf(result[j++], "%4s", sch_mode_str);
+ sprintf(result[j++], "%3u", weight);
+ hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
+ hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);
+
+ hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
+ (const char **)result,
+ ARRAY_SIZE(tm_pg_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+ }
- dev_info(&hdev->pdev->dev, "PG_SCH pg_id: %u\n",
- le32_to_cpu(desc.data[0]));
+ return 0;
+}
- cmd = HCLGE_OPC_TM_PRI_SCH_MODE_CFG;
- hclge_cmd_setup_basic_desc(&desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- goto err_tm_pg_cmd_send;
+static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
+{
+ char *data_str;
+ int ret;
- dev_info(&hdev->pdev->dev, "PRI_SCH pri_id: %u\n",
- le32_to_cpu(desc.data[0]));
+ data_str = kcalloc(ARRAY_SIZE(tm_pg_items),
+ HCLGE_DBG_DATA_STR_LEN, GFP_KERNEL);
+ if (!data_str)
+ return -ENOMEM;
- cmd = HCLGE_OPC_TM_QS_SCH_MODE_CFG;
- hclge_cmd_setup_basic_desc(&desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- goto err_tm_pg_cmd_send;
+ ret = __hclge_dbg_dump_tm_pg(hdev, data_str, buf, len);
- dev_info(&hdev->pdev->dev, "QS_SCH qs_id: %u\n",
- le32_to_cpu(desc.data[0]));
+ kfree(data_str);
- if (!hnae3_dev_dcb_supported(hdev)) {
- dev_info(&hdev->pdev->dev,
- "Only DCB-supported dev supports tm mapping\n");
- return;
- }
+ return ret;
+}
- cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
- hclge_cmd_setup_basic_desc(&desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- goto err_tm_pg_cmd_send;
-
- bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "BP_TO_QSET tc_id: %u\n",
- bp_to_qs_map_cmd->tc_id);
- dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_group_id: 0x%x\n",
- bp_to_qs_map_cmd->qs_group_id);
- dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_bit_map: 0x%x\n",
- le32_to_cpu(bp_to_qs_map_cmd->qs_bit_map));
- return;
-
-err_tm_pg_cmd_send:
- dev_err(&hdev->pdev->dev, "dump tm_pg fail(0x%x), ret = %d\n",
- cmd, ret);
-}
-
-static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
-{
- struct hclge_priority_weight_cmd *priority_weight;
- struct hclge_pg_to_pri_link_cmd *pg_to_pri_map;
- struct hclge_qs_to_pri_link_cmd *qs_to_pri_map;
- struct hclge_nq_to_qs_link_cmd *nq_to_qs_map;
- struct hclge_pri_shapping_cmd *shap_cfg_cmd;
- struct hclge_pg_weight_cmd *pg_weight;
- struct hclge_qs_weight_cmd *qs_weight;
- enum hclge_opcode_type cmd;
- struct hclge_desc desc;
+static int hclge_dbg_dump_tm_port(struct hclge_dev *hdev, char *buf, int len)
+{
+ struct hclge_tm_shaper_para shaper_para;
+ int pos = 0;
int ret;
- cmd = HCLGE_OPC_TM_PG_TO_PRI_LINK;
- hclge_cmd_setup_basic_desc(&desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ ret = hclge_tm_get_port_shaper(hdev, &shaper_para);
if (ret)
- goto err_tm_cmd_send;
+ return ret;
- pg_to_pri_map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "dump tm\n");
- dev_info(&hdev->pdev->dev, "PG_TO_PRI gp_id: %u\n",
- pg_to_pri_map->pg_id);
- dev_info(&hdev->pdev->dev, "PG_TO_PRI map: 0x%x\n",
- pg_to_pri_map->pri_bit_map);
+ pos += scnprintf(buf + pos, len - pos,
+ "IR_B IR_U IR_S BS_B BS_S FLAG RATE(Mbps)\n");
+ pos += scnprintf(buf + pos, len - pos,
+ "%3u %3u %3u %3u %3u %1u %6u\n",
+ shaper_para.ir_b, shaper_para.ir_u, shaper_para.ir_s,
+ shaper_para.bs_b, shaper_para.bs_s, shaper_para.flag,
+ shaper_para.rate);
- cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK;
- hclge_cmd_setup_basic_desc(&desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- goto err_tm_cmd_send;
-
- qs_to_pri_map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "QS_TO_PRI qs_id: %u\n",
- le16_to_cpu(qs_to_pri_map->qs_id));
- dev_info(&hdev->pdev->dev, "QS_TO_PRI priority: %u\n",
- qs_to_pri_map->priority);
- dev_info(&hdev->pdev->dev, "QS_TO_PRI link_vld: %u\n",
- qs_to_pri_map->link_vld);
-
- cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK;
- hclge_cmd_setup_basic_desc(&desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- goto err_tm_cmd_send;
+ return 0;
+}
- nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n",
- le16_to_cpu(nq_to_qs_map->nq_id));
- dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: 0x%x\n",
- le16_to_cpu(nq_to_qs_map->qset_id));
+static int hclge_dbg_dump_tm_bp_qset_map(struct hclge_dev *hdev, u8 tc_id,
+ char *buf, int len)
+{
+ u32 qset_mapping[HCLGE_BP_EXT_GRP_NUM];
+ struct hclge_bp_to_qs_map_cmd *map;
+ struct hclge_desc desc;
+ int pos = 0;
+ u8 group_id;
+ u8 grp_num;
+ u16 i = 0;
+ int ret;
- cmd = HCLGE_OPC_TM_PG_WEIGHT;
- hclge_cmd_setup_basic_desc(&desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- goto err_tm_cmd_send;
+ grp_num = hdev->num_tqps <= HCLGE_TQP_MAX_SIZE_DEV_V2 ?
+ HCLGE_BP_GRP_NUM : HCLGE_BP_EXT_GRP_NUM;
+ map = (struct hclge_bp_to_qs_map_cmd *)desc.data;
+ for (group_id = 0; group_id < grp_num; group_id++) {
+ hclge_cmd_setup_basic_desc(&desc,
+ HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
+ true);
+ map->tc_id = tc_id;
+ map->qs_group_id = group_id;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get bp to qset map, ret = %d\n",
+ ret);
+ return ret;
+ }
- pg_weight = (struct hclge_pg_weight_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "PG pg_id: %u\n", pg_weight->pg_id);
- dev_info(&hdev->pdev->dev, "PG dwrr: %u\n", pg_weight->dwrr);
+ qset_mapping[group_id] = le32_to_cpu(map->qs_bit_map);
+ }
- cmd = HCLGE_OPC_TM_QS_WEIGHT;
- hclge_cmd_setup_basic_desc(&desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- goto err_tm_cmd_send;
+ pos += scnprintf(buf + pos, len - pos, "INDEX | TM BP QSET MAPPING:\n");
+ for (group_id = 0; group_id < grp_num / 8; group_id++) {
+ pos += scnprintf(buf + pos, len - pos,
+ "%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
+ group_id * 256, qset_mapping[i + 7],
+ qset_mapping[i + 6], qset_mapping[i + 5],
+ qset_mapping[i + 4], qset_mapping[i + 3],
+ qset_mapping[i + 2], qset_mapping[i + 1],
+ qset_mapping[i]);
+ i += 8;
+ }
- qs_weight = (struct hclge_qs_weight_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "QS qs_id: %u\n",
- le16_to_cpu(qs_weight->qs_id));
- dev_info(&hdev->pdev->dev, "QS dwrr: %u\n", qs_weight->dwrr);
+ return pos;
+}
- cmd = HCLGE_OPC_TM_PRI_WEIGHT;
- hclge_cmd_setup_basic_desc(&desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- goto err_tm_cmd_send;
+static int hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *buf, int len)
+{
+ u16 queue_id;
+ u16 qset_id;
+ u8 link_vld;
+ int pos = 0;
+ u8 pri_id;
+ u8 tc_id;
+ int ret;
- priority_weight = (struct hclge_priority_weight_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "PRI pri_id: %u\n", priority_weight->pri_id);
- dev_info(&hdev->pdev->dev, "PRI dwrr: %u\n", priority_weight->dwrr);
+ for (queue_id = 0; queue_id < hdev->num_tqps; queue_id++) {
+ ret = hclge_tm_get_q_to_qs_map(hdev, queue_id, &qset_id);
+ if (ret)
+ return ret;
- cmd = HCLGE_OPC_TM_PRI_C_SHAPPING;
- hclge_cmd_setup_basic_desc(&desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- goto err_tm_cmd_send;
+ ret = hclge_tm_get_qset_map_pri(hdev, qset_id, &pri_id,
+ &link_vld);
+ if (ret)
+ return ret;
- shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "PRI_C pri_id: %u\n", shap_cfg_cmd->pri_id);
- dev_info(&hdev->pdev->dev, "PRI_C pri_shapping: 0x%x\n",
- le32_to_cpu(shap_cfg_cmd->pri_shapping_para));
+ ret = hclge_tm_get_q_to_tc(hdev, queue_id, &tc_id);
+ if (ret)
+ return ret;
- cmd = HCLGE_OPC_TM_PRI_P_SHAPPING;
- hclge_cmd_setup_basic_desc(&desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- goto err_tm_cmd_send;
+ pos += scnprintf(buf + pos, len - pos,
+ "QUEUE_ID QSET_ID PRI_ID TC_ID\n");
+ pos += scnprintf(buf + pos, len - pos,
+ "%04u %4u %3u %2u\n",
+ queue_id, qset_id, pri_id, tc_id);
- shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "PRI_P pri_id: %u\n", shap_cfg_cmd->pri_id);
- dev_info(&hdev->pdev->dev, "PRI_P pri_shapping: 0x%x\n",
- le32_to_cpu(shap_cfg_cmd->pri_shapping_para));
+ if (!hnae3_dev_dcb_supported(hdev))
+ continue;
- hclge_dbg_dump_tm_pg(hdev);
+ ret = hclge_dbg_dump_tm_bp_qset_map(hdev, tc_id, buf + pos,
+ len - pos);
+ if (ret < 0)
+ return ret;
+ pos += ret;
- return;
+ pos += scnprintf(buf + pos, len - pos, "\n");
+ }
-err_tm_cmd_send:
- dev_err(&hdev->pdev->dev, "dump tm fail(0x%x), ret = %d\n",
- cmd, ret);
+ return 0;
}
-static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
- const char *cmd_buf)
+static int hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len)
{
- struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
- struct hclge_nq_to_qs_link_cmd *nq_to_qs_map;
- struct hclge_qs_to_pri_link_cmd *map;
- struct hclge_tqp_tx_queue_tc_cmd *tc;
- enum hclge_opcode_type cmd;
+ struct hclge_tm_nodes_cmd *nodes;
struct hclge_desc desc;
- int queue_id, group_id;
- u32 qset_maping[32];
- int tc_id, qset_id;
- int pri_id, ret;
- u32 i;
-
- ret = kstrtouint(cmd_buf, 0, &queue_id);
- queue_id = (ret != 0) ? 0 : queue_id;
+ int pos = 0;
+ int ret;
- cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK;
- nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
- hclge_cmd_setup_basic_desc(&desc, cmd, true);
- nq_to_qs_map->nq_id = cpu_to_le16(queue_id);
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- goto err_tm_map_cmd_send;
- qset_id = nq_to_qs_map->qset_id & 0x3FF;
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump tm nodes, ret = %d\n", ret);
+ return ret;
+ }
- cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK;
- map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
- hclge_cmd_setup_basic_desc(&desc, cmd, true);
- map->qs_id = cpu_to_le16(qset_id);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- goto err_tm_map_cmd_send;
- pri_id = map->priority;
+ nodes = (struct hclge_tm_nodes_cmd *)desc.data;
- cmd = HCLGE_OPC_TQP_TX_QUEUE_TC;
- tc = (struct hclge_tqp_tx_queue_tc_cmd *)desc.data;
- hclge_cmd_setup_basic_desc(&desc, cmd, true);
- tc->queue_id = cpu_to_le16(queue_id);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ pos += scnprintf(buf + pos, len - pos, " BASE_ID MAX_NUM\n");
+ pos += scnprintf(buf + pos, len - pos, "PG %4u %4u\n",
+ nodes->pg_base_id, nodes->pg_num);
+ pos += scnprintf(buf + pos, len - pos, "PRI %4u %4u\n",
+ nodes->pri_base_id, nodes->pri_num);
+ pos += scnprintf(buf + pos, len - pos, "QSET %4u %4u\n",
+ le16_to_cpu(nodes->qset_base_id),
+ le16_to_cpu(nodes->qset_num));
+ pos += scnprintf(buf + pos, len - pos, "QUEUE %4u %4u\n",
+ le16_to_cpu(nodes->queue_base_id),
+ le16_to_cpu(nodes->queue_num));
+
+ return 0;
+}
+
+static const struct hclge_dbg_item tm_pri_items[] = {
+ { "ID", 4 },
+ { "MODE", 2 },
+ { "DWRR", 2 },
+ { "C_IR_B", 2 },
+ { "C_IR_U", 2 },
+ { "C_IR_S", 2 },
+ { "C_BS_B", 2 },
+ { "C_BS_S", 2 },
+ { "C_FLAG", 2 },
+ { "C_RATE(Mbps)", 2 },
+ { "P_IR_B", 2 },
+ { "P_IR_U", 2 },
+ { "P_IR_S", 2 },
+ { "P_BS_B", 2 },
+ { "P_BS_S", 2 },
+ { "P_FLAG", 2 },
+ { "P_RATE(Mbps)", 0 }
+};
+
+static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len)
+{
+ char data_str[ARRAY_SIZE(tm_pri_items)][HCLGE_DBG_DATA_STR_LEN];
+ struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
+ char *result[ARRAY_SIZE(tm_pri_items)], *sch_mode_str;
+ char content[HCLGE_DBG_TM_INFO_LEN];
+ u8 pri_num, sch_mode, weight, i, j;
+ int pos, ret;
+
+ ret = hclge_tm_get_pri_num(hdev, &pri_num);
if (ret)
- goto err_tm_map_cmd_send;
- tc_id = tc->tc_id & 0x7;
+ return ret;
- dev_info(&hdev->pdev->dev, "queue_id | qset_id | pri_id | tc_id\n");
- dev_info(&hdev->pdev->dev, "%04d | %04d | %02d | %02d\n",
- queue_id, qset_id, pri_id, tc_id);
+ for (i = 0; i < ARRAY_SIZE(tm_pri_items); i++)
+ result[i] = &data_str[i][0];
- if (!hnae3_dev_dcb_supported(hdev)) {
- dev_info(&hdev->pdev->dev,
- "Only DCB-supported dev supports tm mapping\n");
- return;
- }
+ hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
+ NULL, ARRAY_SIZE(tm_pri_items));
+ pos = scnprintf(buf, len, "%s", content);
- cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
- bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
- for (group_id = 0; group_id < 32; group_id++) {
- hclge_cmd_setup_basic_desc(&desc, cmd, true);
- bp_to_qs_map_cmd->tc_id = tc_id;
- bp_to_qs_map_cmd->qs_group_id = group_id;
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ for (i = 0; i < pri_num; i++) {
+ ret = hclge_tm_get_pri_sch_mode(hdev, i, &sch_mode);
if (ret)
- goto err_tm_map_cmd_send;
+ return ret;
- qset_maping[group_id] = bp_to_qs_map_cmd->qs_bit_map;
- }
+ ret = hclge_tm_get_pri_weight(hdev, i, &weight);
+ if (ret)
+ return ret;
- dev_info(&hdev->pdev->dev, "index | tm bp qset maping:\n");
+ ret = hclge_tm_get_pri_shaper(hdev, i,
+ HCLGE_OPC_TM_PRI_C_SHAPPING,
+ &c_shaper_para);
+ if (ret)
+ return ret;
- i = 0;
- for (group_id = 0; group_id < 4; group_id++) {
- dev_info(&hdev->pdev->dev,
- "%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
- group_id * 256, qset_maping[(u32)(i + 7)],
- qset_maping[(u32)(i + 6)], qset_maping[(u32)(i + 5)],
- qset_maping[(u32)(i + 4)], qset_maping[(u32)(i + 3)],
- qset_maping[(u32)(i + 2)], qset_maping[(u32)(i + 1)],
- qset_maping[i]);
- i += 8;
+ ret = hclge_tm_get_pri_shaper(hdev, i,
+ HCLGE_OPC_TM_PRI_P_SHAPPING,
+ &p_shaper_para);
+ if (ret)
+ return ret;
+
+ sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
+ "sp";
+
+ j = 0;
+ sprintf(result[j++], "%04u", i);
+ sprintf(result[j++], "%4s", sch_mode_str);
+ sprintf(result[j++], "%3u", weight);
+ hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
+ hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);
+ hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
+ (const char **)result,
+ ARRAY_SIZE(tm_pri_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
}
- return;
+ return 0;
+}
+
+static const struct hclge_dbg_item tm_qset_items[] = {
+ { "ID", 4 },
+ { "MAP_PRI", 2 },
+ { "LINK_VLD", 2 },
+ { "MODE", 2 },
+ { "DWRR", 2 },
+ { "IR_B", 2 },
+ { "IR_U", 2 },
+ { "IR_S", 2 },
+ { "BS_B", 2 },
+ { "BS_S", 2 },
+ { "FLAG", 2 },
+ { "RATE(Mbps)", 0 }
+};
+
+static int hclge_dbg_dump_tm_qset(struct hclge_dev *hdev, char *buf, int len)
+{
+ char data_str[ARRAY_SIZE(tm_qset_items)][HCLGE_DBG_DATA_STR_LEN];
+ char *result[ARRAY_SIZE(tm_qset_items)], *sch_mode_str;
+ u8 priority, link_vld, sch_mode, weight;
+ struct hclge_tm_shaper_para shaper_para;
+ char content[HCLGE_DBG_TM_INFO_LEN];
+ u16 qset_num, i;
+ int ret, pos;
+ u8 j;
+
+ ret = hclge_tm_get_qset_num(hdev, &qset_num);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(tm_qset_items); i++)
+ result[i] = &data_str[i][0];
+
+ hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
+ NULL, ARRAY_SIZE(tm_qset_items));
+ pos = scnprintf(buf, len, "%s", content);
+
+ for (i = 0; i < qset_num; i++) {
+ ret = hclge_tm_get_qset_map_pri(hdev, i, &priority, &link_vld);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_get_qset_sch_mode(hdev, i, &sch_mode);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_get_qset_weight(hdev, i, &weight);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_get_qset_shaper(hdev, i, &shaper_para);
+ if (ret)
+ return ret;
+
+ sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
+ "sp";
+
+ j = 0;
+ sprintf(result[j++], "%04u", i);
+ sprintf(result[j++], "%4u", priority);
+ sprintf(result[j++], "%4u", link_vld);
+ sprintf(result[j++], "%4s", sch_mode_str);
+ sprintf(result[j++], "%3u", weight);
+ hclge_dbg_fill_shaper_content(&shaper_para, result, &j);
+
+ hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
+ (const char **)result,
+ ARRAY_SIZE(tm_qset_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+ }
-err_tm_map_cmd_send:
- dev_err(&hdev->pdev->dev, "dump tqp map fail(0x%x), ret = %d\n",
- cmd, ret);
+ return 0;
}
-static void hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev)
+static int hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev, char *buf,
+ int len)
{
struct hclge_cfg_pause_param_cmd *pause_param;
struct hclge_desc desc;
+ int pos = 0;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
-
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
- dev_err(&hdev->pdev->dev, "dump checksum fail, ret = %d\n",
- ret);
- return;
+ dev_err(&hdev->pdev->dev,
+ "failed to dump qos pause, ret = %d\n", ret);
+ return ret;
}
pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "dump qos pause cfg\n");
- dev_info(&hdev->pdev->dev, "pause_trans_gap: 0x%x\n",
- pause_param->pause_trans_gap);
- dev_info(&hdev->pdev->dev, "pause_trans_time: 0x%x\n",
- le16_to_cpu(pause_param->pause_trans_time));
+
+ pos += scnprintf(buf + pos, len - pos, "pause_trans_gap: 0x%x\n",
+ pause_param->pause_trans_gap);
+ pos += scnprintf(buf + pos, len - pos, "pause_trans_time: 0x%x\n",
+ le16_to_cpu(pause_param->pause_trans_time));
+ return 0;
}
-static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev)
+#define HCLGE_DBG_TC_MASK 0x0F
+
+static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf,
+ int len)
{
+#define HCLGE_DBG_TC_BIT_WIDTH 4
+
struct hclge_qos_pri_map_cmd *pri_map;
struct hclge_desc desc;
+ int pos = 0;
+ u8 *pri_tc;
+ u8 tc, i;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true);
-
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
- "dump qos pri map fail, ret = %d\n", ret);
- return;
+ "failed to dump qos pri map, ret = %d\n", ret);
+ return ret;
}
pri_map = (struct hclge_qos_pri_map_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "dump qos pri map\n");
- dev_info(&hdev->pdev->dev, "vlan_to_pri: 0x%x\n", pri_map->vlan_pri);
- dev_info(&hdev->pdev->dev, "pri_0_to_tc: 0x%x\n", pri_map->pri0_tc);
- dev_info(&hdev->pdev->dev, "pri_1_to_tc: 0x%x\n", pri_map->pri1_tc);
- dev_info(&hdev->pdev->dev, "pri_2_to_tc: 0x%x\n", pri_map->pri2_tc);
- dev_info(&hdev->pdev->dev, "pri_3_to_tc: 0x%x\n", pri_map->pri3_tc);
- dev_info(&hdev->pdev->dev, "pri_4_to_tc: 0x%x\n", pri_map->pri4_tc);
- dev_info(&hdev->pdev->dev, "pri_5_to_tc: 0x%x\n", pri_map->pri5_tc);
- dev_info(&hdev->pdev->dev, "pri_6_to_tc: 0x%x\n", pri_map->pri6_tc);
- dev_info(&hdev->pdev->dev, "pri_7_to_tc: 0x%x\n", pri_map->pri7_tc);
+
+ pos += scnprintf(buf + pos, len - pos, "vlan_to_pri: 0x%x\n",
+ pri_map->vlan_pri);
+ pos += scnprintf(buf + pos, len - pos, "PRI TC\n");
+
+ pri_tc = (u8 *)pri_map;
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ tc = pri_tc[i >> 1] >> ((i & 1) * HCLGE_DBG_TC_BIT_WIDTH);
+ tc &= HCLGE_DBG_TC_MASK;
+ pos += scnprintf(buf + pos, len - pos, "%u %u\n", i, tc);
+ }
+
+ return 0;
+}
+
+static int hclge_dbg_dump_qos_dscp_map(struct hclge_dev *hdev, char *buf,
+ int len)
+{
+ struct hnae3_knic_private_info *kinfo = &hdev->vport[0].nic.kinfo;
+ struct hclge_desc desc[HCLGE_DSCP_MAP_TC_BD_NUM];
+ u8 *req0 = (u8 *)desc[0].data;
+ u8 *req1 = (u8 *)desc[1].data;
+ u8 dscp_tc[HNAE3_MAX_DSCP];
+ int pos, ret;
+ u8 i, j;
+
+ pos = scnprintf(buf, len, "tc map mode: %s\n",
+ tc_map_mode_str[kinfo->tc_map_mode]);
+
+ if (kinfo->tc_map_mode != HNAE3_TC_MAP_MODE_DSCP)
+ return 0;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QOS_MAP, true);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_QOS_MAP, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_DSCP_MAP_TC_BD_NUM);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump qos dscp map, ret = %d\n", ret);
+ return ret;
+ }
+
+ pos += scnprintf(buf + pos, len - pos, "\nDSCP PRIO TC\n");
+
+ /* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */
+ for (i = 0; i < HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) {
+ j = i + HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM;
+ /* Each dscp setting has 4 bits, so each byte saves two dscp
+ * setting
+ */
+ dscp_tc[i] = req0[i >> 1] >> HCLGE_DSCP_TC_SHIFT(i);
+ dscp_tc[j] = req1[i >> 1] >> HCLGE_DSCP_TC_SHIFT(i);
+ dscp_tc[i] &= HCLGE_DBG_TC_MASK;
+ dscp_tc[j] &= HCLGE_DBG_TC_MASK;
+ }
+
+ for (i = 0; i < HNAE3_MAX_DSCP; i++) {
+ if (kinfo->dscp_prio[i] == HNAE3_PRIO_ID_INVALID)
+ continue;
+
+ pos += scnprintf(buf + pos, len - pos, " %2u %u %u\n",
+ i, kinfo->dscp_prio[i], dscp_tc[i]);
+ }
+
+ return 0;
}
-static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
+static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, char *buf, int len)
{
struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
+ struct hclge_desc desc;
+ int pos = 0;
+ int i, ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump tx buf, ret = %d\n", ret);
+ return ret;
+ }
+
+ tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc.data;
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
+ pos += scnprintf(buf + pos, len - pos,
+ "tx_packet_buf_tc_%d: 0x%x\n", i,
+ le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
+
+ return pos;
+}
+
+static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev, char *buf,
+ int len)
+{
struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
- struct hclge_rx_priv_wl_buf *rx_priv_wl;
- struct hclge_rx_com_wl *rx_packet_cnt;
- struct hclge_rx_com_thrd *rx_com_thrd;
- struct hclge_rx_com_wl *rx_com_wl;
- enum hclge_opcode_type cmd;
- struct hclge_desc desc[2];
+ struct hclge_desc desc;
+ int pos = 0;
int i, ret;
- cmd = HCLGE_OPC_TX_BUFF_ALLOC;
- hclge_cmd_setup_basic_desc(desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, desc, 1);
- if (ret)
- goto err_qos_cmd_send;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump rx priv buf, ret = %d\n", ret);
+ return ret;
+ }
- dev_info(&hdev->pdev->dev, "dump qos buf cfg\n");
+ pos += scnprintf(buf + pos, len - pos, "\n");
- tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data;
+ rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc.data;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
- dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i,
- le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
+ pos += scnprintf(buf + pos, len - pos,
+ "rx_packet_buf_tc_%d: 0x%x\n", i,
+ le16_to_cpu(rx_buf_cmd->buf_num[i]));
- cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC;
- hclge_cmd_setup_basic_desc(desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, desc, 1);
- if (ret)
- goto err_qos_cmd_send;
+ pos += scnprintf(buf + pos, len - pos, "rx_share_buf: 0x%x\n",
+ le16_to_cpu(rx_buf_cmd->shared_buf));
- dev_info(&hdev->pdev->dev, "\n");
- rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data;
- for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
- dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i,
- le16_to_cpu(rx_buf_cmd->buf_num[i]));
+ return pos;
+}
- dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n",
- le16_to_cpu(rx_buf_cmd->shared_buf));
+static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev, char *buf,
+ int len)
+{
+ struct hclge_rx_com_wl *rx_com_wl;
+ struct hclge_desc desc;
+ int pos = 0;
+ int ret;
- cmd = HCLGE_OPC_RX_COM_WL_ALLOC;
- hclge_cmd_setup_basic_desc(desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, desc, 1);
- if (ret)
- goto err_qos_cmd_send;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump rx common wl, ret = %d\n", ret);
+ return ret;
+ }
- rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data;
- dev_info(&hdev->pdev->dev, "\n");
- dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n",
- le16_to_cpu(rx_com_wl->com_wl.high),
- le16_to_cpu(rx_com_wl->com_wl.low));
+ rx_com_wl = (struct hclge_rx_com_wl *)desc.data;
+ pos += scnprintf(buf + pos, len - pos, "\n");
+ pos += scnprintf(buf + pos, len - pos,
+ "rx_com_wl: high: 0x%x, low: 0x%x\n",
+ le16_to_cpu(rx_com_wl->com_wl.high),
+ le16_to_cpu(rx_com_wl->com_wl.low));
- cmd = HCLGE_OPC_RX_GBL_PKT_CNT;
- hclge_cmd_setup_basic_desc(desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, desc, 1);
- if (ret)
- goto err_qos_cmd_send;
+ return pos;
+}
- rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data;
- dev_info(&hdev->pdev->dev,
- "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
- le16_to_cpu(rx_packet_cnt->com_wl.high),
- le16_to_cpu(rx_packet_cnt->com_wl.low));
- dev_info(&hdev->pdev->dev, "\n");
+static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev, char *buf,
+ int len)
+{
+ struct hclge_rx_com_wl *rx_packet_cnt;
+ struct hclge_desc desc;
+ int pos = 0;
+ int ret;
- if (!hnae3_dev_dcb_supported(hdev)) {
- dev_info(&hdev->pdev->dev,
- "Only DCB-supported dev supports rx priv wl\n");
- return;
- }
- cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC;
- hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_GBL_PKT_CNT, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump rx global pkt cnt, ret = %d\n", ret);
+ return ret;
+ }
+
+ rx_packet_cnt = (struct hclge_rx_com_wl *)desc.data;
+ pos += scnprintf(buf + pos, len - pos,
+ "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
+ le16_to_cpu(rx_packet_cnt->com_wl.high),
+ le16_to_cpu(rx_packet_cnt->com_wl.low));
+
+ return pos;
+}
+
+static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev, char *buf,
+ int len)
+{
+ struct hclge_rx_priv_wl_buf *rx_priv_wl;
+ struct hclge_desc desc[2];
+ int pos = 0;
+ int i, ret;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
ret = hclge_cmd_send(&hdev->hw, desc, 2);
- if (ret)
- goto err_qos_cmd_send;
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump rx priv wl buf, ret = %d\n", ret);
+ return ret;
+ }
rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
- dev_info(&hdev->pdev->dev,
+ pos += scnprintf(buf + pos, len - pos,
"rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
le16_to_cpu(rx_priv_wl->tc_wl[i].high),
le16_to_cpu(rx_priv_wl->tc_wl[i].low));
rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
- dev_info(&hdev->pdev->dev,
+ pos += scnprintf(buf + pos, len - pos,
"rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
i + HCLGE_TC_NUM_ONE_DESC,
le16_to_cpu(rx_priv_wl->tc_wl[i].high),
le16_to_cpu(rx_priv_wl->tc_wl[i].low));
- cmd = HCLGE_OPC_RX_COM_THRD_ALLOC;
- hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
+ return pos;
+}
+
+static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev,
+ char *buf, int len)
+{
+ struct hclge_rx_com_thrd *rx_com_thrd;
+ struct hclge_desc desc[2];
+ int pos = 0;
+ int i, ret;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
ret = hclge_cmd_send(&hdev->hw, desc, 2);
- if (ret)
- goto err_qos_cmd_send;
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump rx common threshold, ret = %d\n", ret);
+ return ret;
+ }
- dev_info(&hdev->pdev->dev, "\n");
+ pos += scnprintf(buf + pos, len - pos, "\n");
rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
- dev_info(&hdev->pdev->dev,
+ pos += scnprintf(buf + pos, len - pos,
"rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
le16_to_cpu(rx_com_thrd->com_thrd[i].high),
le16_to_cpu(rx_com_thrd->com_thrd[i].low));
rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
- dev_info(&hdev->pdev->dev,
+ pos += scnprintf(buf + pos, len - pos,
"rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
i + HCLGE_TC_NUM_ONE_DESC,
le16_to_cpu(rx_com_thrd->com_thrd[i].high),
le16_to_cpu(rx_com_thrd->com_thrd[i].low));
- return;
-err_qos_cmd_send:
- dev_err(&hdev->pdev->dev,
- "dump qos buf cfg fail(0x%x), ret = %d\n", cmd, ret);
+ return pos;
}
-static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev)
+static int hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev, char *buf,
+ int len)
+{
+ int pos = 0;
+ int ret;
+
+ ret = hclge_dbg_dump_tx_buf_cfg(hdev, buf + pos, len - pos);
+ if (ret < 0)
+ return ret;
+ pos += ret;
+
+ ret = hclge_dbg_dump_rx_priv_buf_cfg(hdev, buf + pos, len - pos);
+ if (ret < 0)
+ return ret;
+ pos += ret;
+
+ ret = hclge_dbg_dump_rx_common_wl_cfg(hdev, buf + pos, len - pos);
+ if (ret < 0)
+ return ret;
+ pos += ret;
+
+ ret = hclge_dbg_dump_rx_global_pkt_cnt(hdev, buf + pos, len - pos);
+ if (ret < 0)
+ return ret;
+ pos += ret;
+
+ pos += scnprintf(buf + pos, len - pos, "\n");
+ if (!hnae3_dev_dcb_supported(hdev))
+ return 0;
+
+ ret = hclge_dbg_dump_rx_priv_wl_buf_cfg(hdev, buf + pos, len - pos);
+ if (ret < 0)
+ return ret;
+ pos += ret;
+
+ ret = hclge_dbg_dump_rx_common_threshold_cfg(hdev, buf + pos,
+ len - pos);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int hclge_dbg_dump_mng_table(struct hclge_dev *hdev, char *buf, int len)
{
struct hclge_mac_ethertype_idx_rd_cmd *req0;
- char printf_buf[HCLGE_DBG_BUF_LEN];
struct hclge_desc desc;
+ u32 msg_egress_port;
+ int pos = 0;
int ret, i;
- dev_info(&hdev->pdev->dev, "mng tab:\n");
- memset(printf_buf, 0, HCLGE_DBG_BUF_LEN);
- strncat(printf_buf,
- "entry|mac_addr |mask|ether|mask|vlan|mask",
- HCLGE_DBG_BUF_LEN - 1);
- strncat(printf_buf + strlen(printf_buf),
- "|i_map|i_dir|e_type|pf_id|vf_id|q_id|drop\n",
- HCLGE_DBG_BUF_LEN - strlen(printf_buf) - 1);
-
- dev_info(&hdev->pdev->dev, "%s", printf_buf);
+ pos += scnprintf(buf + pos, len - pos,
+ "entry mac_addr mask ether ");
+ pos += scnprintf(buf + pos, len - pos,
+ "mask vlan mask i_map i_dir e_type ");
+ pos += scnprintf(buf + pos, len - pos, "pf_id vf_id q_id drop\n");
for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) {
hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD,
@@ -846,89 +1451,95 @@ static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev)
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
- "call hclge_cmd_send fail, ret = %d\n", ret);
- return;
+ "failed to dump manage table, ret = %d\n", ret);
+ return ret;
}
if (!req0->resp_code)
continue;
- memset(printf_buf, 0, HCLGE_DBG_BUF_LEN);
- snprintf(printf_buf, HCLGE_DBG_BUF_LEN,
- "%02u |%02x:%02x:%02x:%02x:%02x:%02x|",
- le16_to_cpu(req0->index),
- req0->mac_addr[0], req0->mac_addr[1],
- req0->mac_addr[2], req0->mac_addr[3],
- req0->mac_addr[4], req0->mac_addr[5]);
-
- snprintf(printf_buf + strlen(printf_buf),
- HCLGE_DBG_BUF_LEN - strlen(printf_buf),
- "%x |%04x |%x |%04x|%x |%02x |%02x |",
- !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
- req0->ethter_type,
- !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
- req0->vlan_tag & HCLGE_DBG_MNG_VLAN_TAG,
- !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
- req0->i_port_bitmap, req0->i_port_direction);
-
- snprintf(printf_buf + strlen(printf_buf),
- HCLGE_DBG_BUF_LEN - strlen(printf_buf),
- "%d |%d |%02d |%04d|%x\n",
- !!(req0->egress_port & HCLGE_DBG_MNG_E_TYPE_B),
- req0->egress_port & HCLGE_DBG_MNG_PF_ID,
- (req0->egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
- req0->egress_queue,
- !!(req0->egress_port & HCLGE_DBG_MNG_DROP_B));
-
- dev_info(&hdev->pdev->dev, "%s", printf_buf);
+ pos += scnprintf(buf + pos, len - pos, "%02u %pM ",
+ le16_to_cpu(req0->index), req0->mac_addr);
+
+ pos += scnprintf(buf + pos, len - pos,
+ "%x %04x %x %04x ",
+ !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
+ le16_to_cpu(req0->ethter_type),
+ !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
+ le16_to_cpu(req0->vlan_tag) &
+ HCLGE_DBG_MNG_VLAN_TAG);
+
+ pos += scnprintf(buf + pos, len - pos,
+ "%x %02x %02x ",
+ !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
+ req0->i_port_bitmap, req0->i_port_direction);
+
+ msg_egress_port = le16_to_cpu(req0->egress_port);
+ pos += scnprintf(buf + pos, len - pos,
+ "%x %x %02x %04x %x\n",
+ !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B),
+ msg_egress_port & HCLGE_DBG_MNG_PF_ID,
+ (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
+ le16_to_cpu(req0->egress_queue),
+ !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B));
}
+
+ return 0;
}
-static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage,
- bool sel_x, u32 loc)
+#define HCLGE_DBG_TCAM_BUF_SIZE 256
+
+static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
+ char *tcam_buf,
+ struct hclge_dbg_tcam_msg tcam_msg)
{
struct hclge_fd_tcam_config_1_cmd *req1;
struct hclge_fd_tcam_config_2_cmd *req2;
struct hclge_fd_tcam_config_3_cmd *req3;
struct hclge_desc desc[3];
+ int pos = 0;
int ret, i;
u32 *req;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true);
- desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true);
req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
- req1->stage = stage;
+ req1->stage = tcam_msg.stage;
req1->xy_sel = sel_x ? 1 : 0;
- req1->index = cpu_to_le32(loc);
+ req1->index = cpu_to_le32(tcam_msg.loc);
ret = hclge_cmd_send(&hdev->hw, desc, 3);
if (ret)
return ret;
- dev_info(&hdev->pdev->dev, " read result tcam key %s(%u):\n",
- sel_x ? "x" : "y", loc);
+ pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
+ "read result tcam key %s(%u):\n", sel_x ? "x" : "y",
+ tcam_msg.loc);
/* tcam_data0 ~ tcam_data1 */
req = (u32 *)req1->tcam_data;
for (i = 0; i < 2; i++)
- dev_info(&hdev->pdev->dev, "%08x\n", *req++);
+ pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
+ "%08x\n", *req++);
/* tcam_data2 ~ tcam_data7 */
req = (u32 *)req2->tcam_data;
for (i = 0; i < 6; i++)
- dev_info(&hdev->pdev->dev, "%08x\n", *req++);
+ pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
+ "%08x\n", *req++);
/* tcam_data8 ~ tcam_data12 */
req = (u32 *)req3->tcam_data;
for (i = 0; i < 5; i++)
- dev_info(&hdev->pdev->dev, "%08x\n", *req++);
+ pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
+ "%08x\n", *req++);
return ret;
}
@@ -946,369 +1557,1013 @@ static int hclge_dbg_get_rules_location(struct hclge_dev *hdev, u16 *rule_locs)
}
spin_unlock_bh(&hdev->fd_rule_lock);
- if (cnt != hdev->hclge_fd_rule_num)
+ if (cnt != hdev->hclge_fd_rule_num || cnt == 0)
return -EINVAL;
return cnt;
}
-static void hclge_dbg_fd_tcam(struct hclge_dev *hdev)
+static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
{
+ u32 rule_num = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
+ struct hclge_dbg_tcam_msg tcam_msg;
int i, ret, rule_cnt;
u16 *rule_locs;
+ char *tcam_buf;
+ int pos = 0;
- if (!hnae3_dev_fd_supported(hdev)) {
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
dev_err(&hdev->pdev->dev,
"Only FD-supported dev supports dump fd tcam\n");
- return;
+ return -EOPNOTSUPP;
}
- if (!hdev->hclge_fd_rule_num ||
- !hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
- return;
+ if (!hdev->hclge_fd_rule_num || !rule_num)
+ return 0;
- rule_locs = kcalloc(hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
- sizeof(u16), GFP_KERNEL);
+ rule_locs = kcalloc(rule_num, sizeof(u16), GFP_KERNEL);
if (!rule_locs)
- return;
+ return -ENOMEM;
+
+ tcam_buf = kzalloc(HCLGE_DBG_TCAM_BUF_SIZE, GFP_KERNEL);
+ if (!tcam_buf) {
+ kfree(rule_locs);
+ return -ENOMEM;
+ }
rule_cnt = hclge_dbg_get_rules_location(hdev, rule_locs);
- if (rule_cnt <= 0) {
+ if (rule_cnt < 0) {
+ ret = rule_cnt;
dev_err(&hdev->pdev->dev,
- "failed to get rule number, ret = %d\n", rule_cnt);
- kfree(rule_locs);
- return;
+ "failed to get rule number, ret = %d\n", ret);
+ goto out;
}
+ ret = 0;
for (i = 0; i < rule_cnt; i++) {
- ret = hclge_dbg_fd_tcam_read(hdev, 0, true, rule_locs[i]);
+ tcam_msg.stage = HCLGE_FD_STAGE_1;
+ tcam_msg.loc = rule_locs[i];
+
+ ret = hclge_dbg_fd_tcam_read(hdev, true, tcam_buf, tcam_msg);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to get fd tcam key x, ret = %d\n", ret);
- kfree(rule_locs);
- return;
+ goto out;
}
- ret = hclge_dbg_fd_tcam_read(hdev, 0, false, rule_locs[i]);
+ pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
+
+ ret = hclge_dbg_fd_tcam_read(hdev, false, tcam_buf, tcam_msg);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to get fd tcam key y, ret = %d\n", ret);
- kfree(rule_locs);
- return;
+ goto out;
}
+
+ pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
}
+out:
+ kfree(tcam_buf);
kfree(rule_locs);
+ return ret;
}
-void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
-{
- dev_info(&hdev->pdev->dev, "PF reset count: %u\n",
- hdev->rst_stats.pf_rst_cnt);
- dev_info(&hdev->pdev->dev, "FLR reset count: %u\n",
- hdev->rst_stats.flr_rst_cnt);
- dev_info(&hdev->pdev->dev, "GLOBAL reset count: %u\n",
- hdev->rst_stats.global_rst_cnt);
- dev_info(&hdev->pdev->dev, "IMP reset count: %u\n",
- hdev->rst_stats.imp_rst_cnt);
- dev_info(&hdev->pdev->dev, "reset done count: %u\n",
- hdev->rst_stats.reset_done_cnt);
- dev_info(&hdev->pdev->dev, "HW reset done count: %u\n",
- hdev->rst_stats.hw_reset_done_cnt);
- dev_info(&hdev->pdev->dev, "reset count: %u\n",
- hdev->rst_stats.reset_cnt);
- dev_info(&hdev->pdev->dev, "reset fail count: %u\n",
- hdev->rst_stats.reset_fail_cnt);
- dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n",
- hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_REG_BASE));
- dev_info(&hdev->pdev->dev, "reset interrupt source: 0x%x\n",
- hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG));
- dev_info(&hdev->pdev->dev, "reset interrupt status: 0x%x\n",
- hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS));
- dev_info(&hdev->pdev->dev, "hardware reset status: 0x%x\n",
- hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
- dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n",
- hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG));
- dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
- hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING));
- dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
-}
-
-static void hclge_dbg_dump_serv_info(struct hclge_dev *hdev)
-{
- dev_info(&hdev->pdev->dev, "last_serv_processed: %lu\n",
- hdev->last_serv_processed);
- dev_info(&hdev->pdev->dev, "last_serv_cnt: %lu\n",
- hdev->serv_processed_cnt);
-}
-
-static void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
-{
- struct hclge_desc *desc_src, *desc_tmp;
- struct hclge_get_m7_bd_cmd *req;
+static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
+{
+ u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
+ struct hclge_fd_ad_cnt_read_cmd *req;
+ char str_id[HCLGE_DBG_ID_LEN];
struct hclge_desc desc;
- u32 bd_num, buf_len;
- int ret, i;
+ int pos = 0;
+ int ret;
+ u64 cnt;
+ u8 i;
+
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return -EOPNOTSUPP;
+
+ pos += scnprintf(buf + pos, len - pos,
+ "func_id\thit_times\n");
+
+ for (i = 0; i < func_num; i++) {
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_CNT_OP, true);
+ req = (struct hclge_fd_ad_cnt_read_cmd *)desc.data;
+ req->index = cpu_to_le16(i);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "failed to get fd counter, ret = %d\n",
+ ret);
+ return ret;
+ }
+ cnt = le64_to_cpu(req->cnt);
+ hclge_dbg_get_func_id_str(str_id, i);
+ pos += scnprintf(buf + pos, len - pos,
+ "%s\t%llu\n", str_id, cnt);
+ }
+
+ return 0;
+}
+
+static const struct hclge_dbg_status_dfx_info hclge_dbg_rst_info[] = {
+ {HCLGE_MISC_VECTOR_REG_BASE, "vector0 interrupt enable status"},
+ {HCLGE_MISC_RESET_STS_REG, "reset interrupt source"},
+ {HCLGE_MISC_VECTOR_INT_STS, "reset interrupt status"},
+ {HCLGE_RAS_PF_OTHER_INT_STS_REG, "RAS interrupt status"},
+ {HCLGE_GLOBAL_RESET_REG, "hardware reset status"},
+ {HCLGE_NIC_CSQ_DEPTH_REG, "handshake status"},
+ {HCLGE_FUN_RST_ING, "function reset status"}
+};
+
+int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len)
+{
+ u32 i, offset;
+ int pos = 0;
+
+ pos += scnprintf(buf + pos, len - pos, "PF reset count: %u\n",
+ hdev->rst_stats.pf_rst_cnt);
+ pos += scnprintf(buf + pos, len - pos, "FLR reset count: %u\n",
+ hdev->rst_stats.flr_rst_cnt);
+ pos += scnprintf(buf + pos, len - pos, "GLOBAL reset count: %u\n",
+ hdev->rst_stats.global_rst_cnt);
+ pos += scnprintf(buf + pos, len - pos, "IMP reset count: %u\n",
+ hdev->rst_stats.imp_rst_cnt);
+ pos += scnprintf(buf + pos, len - pos, "reset done count: %u\n",
+ hdev->rst_stats.reset_done_cnt);
+ pos += scnprintf(buf + pos, len - pos, "HW reset done count: %u\n",
+ hdev->rst_stats.hw_reset_done_cnt);
+ pos += scnprintf(buf + pos, len - pos, "reset count: %u\n",
+ hdev->rst_stats.reset_cnt);
+ pos += scnprintf(buf + pos, len - pos, "reset fail count: %u\n",
+ hdev->rst_stats.reset_fail_cnt);
+
+ for (i = 0; i < ARRAY_SIZE(hclge_dbg_rst_info); i++) {
+ offset = hclge_dbg_rst_info[i].offset;
+ pos += scnprintf(buf + pos, len - pos, "%s: 0x%x\n",
+ hclge_dbg_rst_info[i].message,
+ hclge_read_dev(&hdev->hw, offset));
+ }
+
+ pos += scnprintf(buf + pos, len - pos, "hdev state: 0x%lx\n",
+ hdev->state);
+
+ return 0;
+}
+
+static int hclge_dbg_dump_serv_info(struct hclge_dev *hdev, char *buf, int len)
+{
+ unsigned long rem_nsec;
+ int pos = 0;
+ u64 lc;
+
+ lc = local_clock();
+ rem_nsec = do_div(lc, HCLGE_BILLION_NANO_SECONDS);
+
+ pos += scnprintf(buf + pos, len - pos, "local_clock: [%5lu.%06lu]\n",
+ (unsigned long)lc, rem_nsec / 1000);
+ pos += scnprintf(buf + pos, len - pos, "delta: %u(ms)\n",
+ jiffies_to_msecs(jiffies - hdev->last_serv_processed));
+ pos += scnprintf(buf + pos, len - pos,
+ "last_service_task_processed: %lu(jiffies)\n",
+ hdev->last_serv_processed);
+ pos += scnprintf(buf + pos, len - pos, "last_service_task_cnt: %lu\n",
+ hdev->serv_processed_cnt);
+
+ return 0;
+}
+
+static int hclge_dbg_dump_interrupt(struct hclge_dev *hdev, char *buf, int len)
+{
+ int pos = 0;
+
+ pos += scnprintf(buf + pos, len - pos, "num_nic_msi: %u\n",
+ hdev->num_nic_msi);
+ pos += scnprintf(buf + pos, len - pos, "num_roce_msi: %u\n",
+ hdev->num_roce_msi);
+ pos += scnprintf(buf + pos, len - pos, "num_msi_used: %u\n",
+ hdev->num_msi_used);
+ pos += scnprintf(buf + pos, len - pos, "num_msi_left: %u\n",
+ hdev->num_msi_left);
+
+ return 0;
+}
+
+static void hclge_dbg_imp_info_data_print(struct hclge_desc *desc_src,
+ char *buf, int len, u32 bd_num)
+{
+#define HCLGE_DBG_IMP_INFO_PRINT_OFFSET 0x2
+
+ struct hclge_desc *desc_index = desc_src;
+ u32 offset = 0;
+ int pos = 0;
+ u32 i, j;
+
+ pos += scnprintf(buf + pos, len - pos, "offset | data\n");
+
+ for (i = 0; i < bd_num; i++) {
+ j = 0;
+ while (j < HCLGE_DESC_DATA_LEN - 1) {
+ pos += scnprintf(buf + pos, len - pos, "0x%04x | ",
+ offset);
+ pos += scnprintf(buf + pos, len - pos, "0x%08x ",
+ le32_to_cpu(desc_index->data[j++]));
+ pos += scnprintf(buf + pos, len - pos, "0x%08x\n",
+ le32_to_cpu(desc_index->data[j++]));
+ offset += sizeof(u32) * HCLGE_DBG_IMP_INFO_PRINT_OFFSET;
+ }
+ desc_index++;
+ }
+}
+
+static int
+hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
+{
+ struct hclge_get_imp_bd_cmd *req;
+ struct hclge_desc *desc_src;
+ struct hclge_desc desc;
+ u32 bd_num;
+ int ret;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_M7_STATS_BD, true);
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_STATS_BD, true);
- req = (struct hclge_get_m7_bd_cmd *)desc.data;
+ req = (struct hclge_get_imp_bd_cmd *)desc.data;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
- "get firmware statistics bd number failed, ret = %d\n",
+ "failed to get imp statistics bd number, ret = %d\n",
ret);
- return;
+ return ret;
}
bd_num = le32_to_cpu(req->bd_num);
-
- buf_len = sizeof(struct hclge_desc) * bd_num;
- desc_src = kzalloc(buf_len, GFP_KERNEL);
- if (!desc_src) {
- dev_err(&hdev->pdev->dev,
- "allocate desc for get_m7_stats failed\n");
- return;
+ if (!bd_num) {
+ dev_err(&hdev->pdev->dev, "imp statistics bd number is 0!\n");
+ return -EINVAL;
}
- desc_tmp = desc_src;
- ret = hclge_dbg_cmd_send(hdev, desc_tmp, 0, bd_num,
- HCLGE_OPC_M7_STATS_INFO);
+ desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ if (!desc_src)
+ return -ENOMEM;
+
+ ret = hclge_dbg_cmd_send(hdev, desc_src, 0, bd_num,
+ HCLGE_OPC_IMP_STATS_INFO);
if (ret) {
kfree(desc_src);
dev_err(&hdev->pdev->dev,
- "get firmware statistics failed, ret = %d\n", ret);
- return;
+ "failed to get imp statistics, ret = %d\n", ret);
+ return ret;
}
- for (i = 0; i < bd_num; i++) {
- dev_info(&hdev->pdev->dev, "0x%08x 0x%08x 0x%08x\n",
- le32_to_cpu(desc_tmp->data[0]),
- le32_to_cpu(desc_tmp->data[1]),
- le32_to_cpu(desc_tmp->data[2]));
- dev_info(&hdev->pdev->dev, "0x%08x 0x%08x 0x%08x\n",
- le32_to_cpu(desc_tmp->data[3]),
- le32_to_cpu(desc_tmp->data[4]),
- le32_to_cpu(desc_tmp->data[5]));
-
- desc_tmp++;
- }
+ hclge_dbg_imp_info_data_print(desc_src, buf, len, bd_num);
kfree(desc_src);
+
+ return 0;
}
#define HCLGE_CMD_NCL_CONFIG_BD_NUM 5
+#define HCLGE_MAX_NCL_CONFIG_LENGTH 16384
-static void hclge_ncl_config_data_print(struct hclge_dev *hdev,
- struct hclge_desc *desc, int *offset,
- int *length)
+static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index,
+ char *buf, int len, int *pos)
{
#define HCLGE_CMD_DATA_NUM 6
- int i;
- int j;
+ int offset = HCLGE_MAX_NCL_CONFIG_LENGTH - *index;
+ int i, j;
for (i = 0; i < HCLGE_CMD_NCL_CONFIG_BD_NUM; i++) {
for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) {
if (i == 0 && j == 0)
continue;
- dev_info(&hdev->pdev->dev, "0x%04x | 0x%08x\n",
- *offset,
- le32_to_cpu(desc[i].data[j]));
- *offset += sizeof(u32);
- *length -= sizeof(u32);
- if (*length <= 0)
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "0x%04x | 0x%08x\n", offset,
+ le32_to_cpu(desc[i].data[j]));
+
+ offset += sizeof(u32);
+ *index -= sizeof(u32);
+
+ if (*index <= 0)
return;
}
}
}
-/* hclge_dbg_dump_ncl_config: print specified range of NCL_CONFIG file
- * @hdev: pointer to struct hclge_dev
- * @cmd_buf: string that contains offset and length
- */
-static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
- const char *cmd_buf)
+static int
+hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *buf, int len)
{
-#define HCLGE_MAX_NCL_CONFIG_OFFSET 4096
-#define HCLGE_MAX_NCL_CONFIG_LENGTH (20 + 24 * 4)
+#define HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD (20 + 24 * 4)
struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
- int offset;
- int length;
- int data0;
+ int index = HCLGE_MAX_NCL_CONFIG_LENGTH;
+ int pos = 0;
+ u32 data0;
int ret;
- ret = sscanf(cmd_buf, "%x %x", &offset, &length);
- if (ret != 2 || offset >= HCLGE_MAX_NCL_CONFIG_OFFSET ||
- length > HCLGE_MAX_NCL_CONFIG_OFFSET - offset) {
- dev_err(&hdev->pdev->dev, "Invalid offset or length.\n");
- return;
- }
- if (offset < 0 || length <= 0) {
- dev_err(&hdev->pdev->dev, "Non-positive offset or length.\n");
- return;
- }
-
- dev_info(&hdev->pdev->dev, "offset | data\n");
+ pos += scnprintf(buf + pos, len - pos, "offset | data\n");
- while (length > 0) {
- data0 = offset;
- if (length >= HCLGE_MAX_NCL_CONFIG_LENGTH)
- data0 |= HCLGE_MAX_NCL_CONFIG_LENGTH << 16;
+ while (index > 0) {
+ data0 = HCLGE_MAX_NCL_CONFIG_LENGTH - index;
+ if (index >= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD)
+ data0 |= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD << 16;
else
- data0 |= length << 16;
+ data0 |= (u32)index << 16;
ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num,
HCLGE_OPC_QUERY_NCL_CONFIG);
if (ret)
- return;
+ return ret;
- hclge_ncl_config_data_print(hdev, desc, &offset, &length);
+ hclge_ncl_config_data_print(desc, &index, buf, len, &pos);
}
+
+ return 0;
+}
+
+static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
+{
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ struct hclge_config_mac_mode_cmd *req_app;
+ struct hclge_common_lb_cmd *req_common;
+ struct hclge_desc desc;
+ u8 loopback_en;
+ int pos = 0;
+ int ret;
+
+ req_app = (struct hclge_config_mac_mode_cmd *)desc.data;
+ req_common = (struct hclge_common_lb_cmd *)desc.data;
+
+ pos += scnprintf(buf + pos, len - pos, "mac id: %u\n",
+ hdev->hw.mac.mac_id);
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump app loopback status, ret = %d\n", ret);
+ return ret;
+ }
+
+ loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en),
+ HCLGE_MAC_APP_LP_B);
+ pos += scnprintf(buf + pos, len - pos, "app loopback: %s\n",
+ state_str[loopback_en]);
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump common loopback status, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ loopback_en = req_common->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
+ pos += scnprintf(buf + pos, len - pos, "serdes serial loopback: %s\n",
+ state_str[loopback_en]);
+
+ loopback_en = req_common->enable &
+ HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B ? 1 : 0;
+ pos += scnprintf(buf + pos, len - pos, "serdes parallel loopback: %s\n",
+ state_str[loopback_en]);
+
+ if (phydev) {
+ loopback_en = phydev->loopback_enabled;
+ pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
+ state_str[loopback_en]);
+ } else if (hnae3_dev_phy_imp_supported(hdev)) {
+ loopback_en = req_common->enable &
+ HCLGE_CMD_GE_PHY_INNER_LOOP_B;
+ pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
+ state_str[loopback_en]);
+ }
+
+ return 0;
}
/* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt
* @hdev: pointer to struct hclge_dev
*/
-static void hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev)
+static int
+hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev, char *buf, int len)
{
-#define HCLGE_BILLION_NANO_SECONDS 1000000000
-
struct hclge_mac_tnl_stats stats;
unsigned long rem_nsec;
+ int pos = 0;
- dev_info(&hdev->pdev->dev, "Recently generated mac tnl interruption:\n");
+ pos += scnprintf(buf + pos, len - pos,
+ "Recently generated mac tnl interruption:\n");
while (kfifo_get(&hdev->mac_tnl_log, &stats)) {
rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS);
- dev_info(&hdev->pdev->dev, "[%07lu.%03lu] status = 0x%x\n",
- (unsigned long)stats.time, rem_nsec / 1000,
- stats.status);
+
+ pos += scnprintf(buf + pos, len - pos,
+ "[%07lu.%03lu] status = 0x%x\n",
+ (unsigned long)stats.time, rem_nsec / 1000,
+ stats.status);
+ }
+
+ return 0;
+}
+
+
+static const struct hclge_dbg_item mac_list_items[] = {
+ { "FUNC_ID", 2 },
+ { "MAC_ADDR", 12 },
+ { "STATE", 2 },
+};
+
+static void hclge_dbg_dump_mac_list(struct hclge_dev *hdev, char *buf, int len,
+ bool is_unicast)
+{
+ char data_str[ARRAY_SIZE(mac_list_items)][HCLGE_DBG_DATA_STR_LEN];
+ char content[HCLGE_DBG_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
+ char *result[ARRAY_SIZE(mac_list_items)];
+ struct hclge_mac_node *mac_node, *tmp;
+ struct hclge_vport *vport;
+ struct list_head *list;
+ u32 func_id;
+ int pos = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mac_list_items); i++)
+ result[i] = &data_str[i][0];
+
+ pos += scnprintf(buf + pos, len - pos, "%s MAC_LIST:\n",
+ is_unicast ? "UC" : "MC");
+ hclge_dbg_fill_content(content, sizeof(content), mac_list_items,
+ NULL, ARRAY_SIZE(mac_list_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+
+ for (func_id = 0; func_id < hdev->num_alloc_vport; func_id++) {
+ vport = &hdev->vport[func_id];
+ list = is_unicast ? &vport->uc_mac_list : &vport->mc_mac_list;
+ spin_lock_bh(&vport->mac_list_lock);
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ i = 0;
+ result[i++] = hclge_dbg_get_func_id_str(str_id,
+ func_id);
+ sprintf(result[i++], "%pM", mac_node->mac_addr);
+ sprintf(result[i++], "%5s",
+ hclge_mac_state_str[mac_node->state]);
+ hclge_dbg_fill_content(content, sizeof(content),
+ mac_list_items,
+ (const char **)result,
+ ARRAY_SIZE(mac_list_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+ }
+ spin_unlock_bh(&vport->mac_list_lock);
+ }
+}
+
+static int hclge_dbg_dump_umv_info(struct hclge_dev *hdev, char *buf, int len)
+{
+ u8 func_num = pci_num_vf(hdev->pdev) + 1;
+ struct hclge_vport *vport;
+ int pos = 0;
+ u8 i;
+
+ pos += scnprintf(buf, len, "num_alloc_vport : %u\n",
+ hdev->num_alloc_vport);
+ pos += scnprintf(buf + pos, len - pos, "max_umv_size : %u\n",
+ hdev->max_umv_size);
+ pos += scnprintf(buf + pos, len - pos, "wanted_umv_size : %u\n",
+ hdev->wanted_umv_size);
+ pos += scnprintf(buf + pos, len - pos, "priv_umv_size : %u\n",
+ hdev->priv_umv_size);
+
+ mutex_lock(&hdev->vport_lock);
+ pos += scnprintf(buf + pos, len - pos, "share_umv_size : %u\n",
+ hdev->share_umv_size);
+ for (i = 0; i < func_num; i++) {
+ vport = &hdev->vport[i];
+ pos += scnprintf(buf + pos, len - pos,
+ "vport(%u) used_umv_num : %u\n",
+ i, vport->used_umv_num);
}
+ mutex_unlock(&hdev->vport_lock);
+
+ pos += scnprintf(buf + pos, len - pos, "used_mc_mac_num : %u\n",
+ hdev->used_mc_mac_num);
+
+ return 0;
}
-static void hclge_dbg_dump_qs_shaper_single(struct hclge_dev *hdev, u16 qsid)
+static int hclge_get_vlan_rx_offload_cfg(struct hclge_dev *hdev, u8 vf_id,
+ struct hclge_dbg_vlan_cfg *vlan_cfg)
{
- struct hclge_qs_shapping_cmd *shap_cfg_cmd;
- u8 ir_u, ir_b, ir_s, bs_b, bs_s;
+ struct hclge_vport_vtag_rx_cfg_cmd *req;
struct hclge_desc desc;
- u32 shapping_para;
+ u16 bmap_index;
+ u8 rx_cfg;
int ret;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, true);
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, true);
- shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data;
- shap_cfg_cmd->qs_id = cpu_to_le16(qsid);
+ req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
+ req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD;
+ bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE;
+ req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
- "qs%u failed to get tx_rate, ret=%d\n",
- qsid, ret);
- return;
+ "failed to get vport%u rxvlan cfg, ret = %d\n",
+ vf_id, ret);
+ return ret;
}
- shapping_para = le32_to_cpu(shap_cfg_cmd->qs_shapping_para);
- ir_b = hclge_tm_get_field(shapping_para, IR_B);
- ir_u = hclge_tm_get_field(shapping_para, IR_U);
- ir_s = hclge_tm_get_field(shapping_para, IR_S);
- bs_b = hclge_tm_get_field(shapping_para, BS_B);
- bs_s = hclge_tm_get_field(shapping_para, BS_S);
+ rx_cfg = req->vport_vlan_cfg;
+ vlan_cfg->strip_tag1 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG1_EN_B);
+ vlan_cfg->strip_tag2 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG2_EN_B);
+ vlan_cfg->drop_tag1 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG1_EN_B);
+ vlan_cfg->drop_tag2 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG2_EN_B);
+ vlan_cfg->pri_only1 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG1_EN_B);
+ vlan_cfg->pri_only2 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG2_EN_B);
- dev_info(&hdev->pdev->dev,
- "qs%u ir_b:%u, ir_u:%u, ir_s:%u, bs_b:%u, bs_s:%u\n",
- qsid, ir_b, ir_u, ir_s, bs_b, bs_s);
+ return 0;
}
-static void hclge_dbg_dump_qs_shaper_all(struct hclge_dev *hdev)
+static int hclge_get_vlan_tx_offload_cfg(struct hclge_dev *hdev, u8 vf_id,
+ struct hclge_dbg_vlan_cfg *vlan_cfg)
{
- struct hnae3_knic_private_info *kinfo;
- struct hclge_vport *vport;
- int vport_id, i;
+ struct hclge_vport_vtag_tx_cfg_cmd *req;
+ struct hclge_desc desc;
+ u16 bmap_index;
+ u8 tx_cfg;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, true);
+ req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
+ req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD;
+ bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE;
+ req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get vport%u txvlan cfg, ret = %d\n",
+ vf_id, ret);
+ return ret;
+ }
- for (vport_id = 0; vport_id <= pci_num_vf(hdev->pdev); vport_id++) {
- vport = &hdev->vport[vport_id];
- kinfo = &vport->nic.kinfo;
+ tx_cfg = req->vport_vlan_cfg;
+ vlan_cfg->pvid = le16_to_cpu(req->def_vlan_tag1);
- dev_info(&hdev->pdev->dev, "qs cfg of vport%d:\n", vport_id);
+ vlan_cfg->accept_tag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG1_B);
+ vlan_cfg->accept_tag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG2_B);
+ vlan_cfg->accept_untag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG1_B);
+ vlan_cfg->accept_untag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG2_B);
+ vlan_cfg->insert_tag1 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG1_EN_B);
+ vlan_cfg->insert_tag2 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG2_EN_B);
+ vlan_cfg->shift_tag = hnae3_get_bit(tx_cfg, HCLGE_TAG_SHIFT_MODE_EN_B);
- for (i = 0; i < kinfo->num_tc; i++) {
- u16 qsid = vport->qs_offset + i;
+ return 0;
+}
- hclge_dbg_dump_qs_shaper_single(hdev, qsid);
- }
- }
+static int hclge_get_vlan_filter_config_cmd(struct hclge_dev *hdev,
+ u8 vlan_type, u8 vf_id,
+ struct hclge_desc *desc)
+{
+ struct hclge_vlan_filter_ctrl_cmd *req;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
+ req = (struct hclge_vlan_filter_ctrl_cmd *)desc->data;
+ req->vlan_type = vlan_type;
+ req->vf_id = vf_id;
+
+ ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to get vport%u vlan filter config, ret = %d.\n",
+ vf_id, ret);
+
+ return ret;
}
-static void hclge_dbg_dump_qs_shaper(struct hclge_dev *hdev,
- const char *cmd_buf)
+static int hclge_get_vlan_filter_state(struct hclge_dev *hdev, u8 vlan_type,
+ u8 vf_id, u8 *vlan_fe)
{
-#define HCLGE_MAX_QSET_NUM 1024
+ struct hclge_vlan_filter_ctrl_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ ret = hclge_get_vlan_filter_config_cmd(hdev, vlan_type, vf_id, &desc);
+ if (ret)
+ return ret;
- u16 qsid;
+ req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
+ *vlan_fe = req->vlan_fe;
+
+ return 0;
+}
+
+static int hclge_get_port_vlan_filter_bypass_state(struct hclge_dev *hdev,
+ u8 vf_id, u8 *bypass_en)
+{
+ struct hclge_port_vlan_filter_bypass_cmd *req;
+ struct hclge_desc desc;
int ret;
- ret = kstrtou16(cmd_buf, 0, &qsid);
+ if (!test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, hdev->ae_dev->caps))
+ return 0;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, true);
+ req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
+ req->vf_id = vf_id;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
- hclge_dbg_dump_qs_shaper_all(hdev);
- return;
+ dev_err(&hdev->pdev->dev,
+ "failed to get vport%u port vlan filter bypass state, ret = %d.\n",
+ vf_id, ret);
+ return ret;
}
- if (qsid >= HCLGE_MAX_QSET_NUM) {
- dev_err(&hdev->pdev->dev, "qsid(%u) out of range[0-1023]\n",
- qsid);
- return;
+ *bypass_en = hnae3_get_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B);
+
+ return 0;
+}
+
+static const struct hclge_dbg_item vlan_filter_items[] = {
+ { "FUNC_ID", 2 },
+ { "I_VF_VLAN_FILTER", 2 },
+ { "E_VF_VLAN_FILTER", 2 },
+ { "PORT_VLAN_FILTER_BYPASS", 0 }
+};
+
+static const struct hclge_dbg_item vlan_offload_items[] = {
+ { "FUNC_ID", 2 },
+ { "PVID", 4 },
+ { "ACCEPT_TAG1", 2 },
+ { "ACCEPT_TAG2", 2 },
+ { "ACCEPT_UNTAG1", 2 },
+ { "ACCEPT_UNTAG2", 2 },
+ { "INSERT_TAG1", 2 },
+ { "INSERT_TAG2", 2 },
+ { "SHIFT_TAG", 2 },
+ { "STRIP_TAG1", 2 },
+ { "STRIP_TAG2", 2 },
+ { "DROP_TAG1", 2 },
+ { "DROP_TAG2", 2 },
+ { "PRI_ONLY_TAG1", 2 },
+ { "PRI_ONLY_TAG2", 0 }
+};
+
+static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf,
+ int len, int *pos)
+{
+ char content[HCLGE_DBG_VLAN_FLTR_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
+ const char *result[ARRAY_SIZE(vlan_filter_items)];
+ u8 i, j, vlan_fe, bypass, ingress, egress;
+ u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
+ int ret;
+
+ ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_PORT, 0,
+ &vlan_fe);
+ if (ret)
+ return ret;
+ ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B;
+ egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
+
+ *pos += scnprintf(buf, len, "I_PORT_VLAN_FILTER: %s\n",
+ state_str[ingress]);
+ *pos += scnprintf(buf + *pos, len - *pos, "E_PORT_VLAN_FILTER: %s\n",
+ state_str[egress]);
+
+ hclge_dbg_fill_content(content, sizeof(content), vlan_filter_items,
+ NULL, ARRAY_SIZE(vlan_filter_items));
+ *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+
+ for (i = 0; i < func_num; i++) {
+ ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_VF, i,
+ &vlan_fe);
+ if (ret)
+ return ret;
+
+ ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B;
+ egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
+ ret = hclge_get_port_vlan_filter_bypass_state(hdev, i, &bypass);
+ if (ret)
+ return ret;
+ j = 0;
+ result[j++] = hclge_dbg_get_func_id_str(str_id, i);
+ result[j++] = state_str[ingress];
+ result[j++] = state_str[egress];
+ result[j++] =
+ test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
+ hdev->ae_dev->caps) ? state_str[bypass] : "NA";
+ hclge_dbg_fill_content(content, sizeof(content),
+ vlan_filter_items, result,
+ ARRAY_SIZE(vlan_filter_items));
+ *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
}
+ *pos += scnprintf(buf + *pos, len - *pos, "\n");
- hclge_dbg_dump_qs_shaper_single(hdev, qsid);
+ return 0;
}
-int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
+static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev, char *buf,
+ int len, int *pos)
{
-#define DUMP_REG "dump reg"
-#define DUMP_TM_MAP "dump tm map"
+ char str_id[HCLGE_DBG_ID_LEN], str_pvid[HCLGE_DBG_ID_LEN];
+ const char *result[ARRAY_SIZE(vlan_offload_items)];
+ char content[HCLGE_DBG_VLAN_OFFLOAD_INFO_LEN];
+ u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
+ struct hclge_dbg_vlan_cfg vlan_cfg;
+ int ret;
+ u8 i, j;
+
+ hclge_dbg_fill_content(content, sizeof(content), vlan_offload_items,
+ NULL, ARRAY_SIZE(vlan_offload_items));
+ *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+ for (i = 0; i < func_num; i++) {
+ ret = hclge_get_vlan_tx_offload_cfg(hdev, i, &vlan_cfg);
+ if (ret)
+ return ret;
+
+ ret = hclge_get_vlan_rx_offload_cfg(hdev, i, &vlan_cfg);
+ if (ret)
+ return ret;
+
+ sprintf(str_pvid, "%u", vlan_cfg.pvid);
+ j = 0;
+ result[j++] = hclge_dbg_get_func_id_str(str_id, i);
+ result[j++] = str_pvid;
+ result[j++] = state_str[vlan_cfg.accept_tag1];
+ result[j++] = state_str[vlan_cfg.accept_tag2];
+ result[j++] = state_str[vlan_cfg.accept_untag1];
+ result[j++] = state_str[vlan_cfg.accept_untag2];
+ result[j++] = state_str[vlan_cfg.insert_tag1];
+ result[j++] = state_str[vlan_cfg.insert_tag2];
+ result[j++] = state_str[vlan_cfg.shift_tag];
+ result[j++] = state_str[vlan_cfg.strip_tag1];
+ result[j++] = state_str[vlan_cfg.strip_tag2];
+ result[j++] = state_str[vlan_cfg.drop_tag1];
+ result[j++] = state_str[vlan_cfg.drop_tag2];
+ result[j++] = state_str[vlan_cfg.pri_only1];
+ result[j++] = state_str[vlan_cfg.pri_only2];
+
+ hclge_dbg_fill_content(content, sizeof(content),
+ vlan_offload_items, result,
+ ARRAY_SIZE(vlan_offload_items));
+ *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+ }
+
+ return 0;
+}
+
+static int hclge_dbg_dump_vlan_config(struct hclge_dev *hdev, char *buf,
+ int len)
+{
+ int pos = 0;
+ int ret;
+
+ ret = hclge_dbg_dump_vlan_filter_config(hdev, buf, len, &pos);
+ if (ret)
+ return ret;
+
+ return hclge_dbg_dump_vlan_offload_config(hdev, buf, len, &pos);
+}
+
+static int hclge_dbg_dump_ptp_info(struct hclge_dev *hdev, char *buf, int len)
+{
+ struct hclge_ptp *ptp = hdev->ptp;
+ u32 sw_cfg = ptp->ptp_cfg;
+ unsigned int tx_start;
+ unsigned int last_rx;
+ int pos = 0;
+ u32 hw_cfg;
+ int ret;
+
+ pos += scnprintf(buf + pos, len - pos, "phc %s's debug info:\n",
+ ptp->info.name);
+ pos += scnprintf(buf + pos, len - pos, "ptp enable: %s\n",
+ test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags) ?
+ "yes" : "no");
+ pos += scnprintf(buf + pos, len - pos, "ptp tx enable: %s\n",
+ test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ?
+ "yes" : "no");
+ pos += scnprintf(buf + pos, len - pos, "ptp rx enable: %s\n",
+ test_bit(HCLGE_PTP_FLAG_RX_EN, &ptp->flags) ?
+ "yes" : "no");
+
+ last_rx = jiffies_to_msecs(ptp->last_rx);
+ pos += scnprintf(buf + pos, len - pos, "last rx time: %lu.%lu\n",
+ last_rx / MSEC_PER_SEC, last_rx % MSEC_PER_SEC);
+ pos += scnprintf(buf + pos, len - pos, "rx count: %lu\n", ptp->rx_cnt);
+
+ tx_start = jiffies_to_msecs(ptp->tx_start);
+ pos += scnprintf(buf + pos, len - pos, "last tx start time: %lu.%lu\n",
+ tx_start / MSEC_PER_SEC, tx_start % MSEC_PER_SEC);
+ pos += scnprintf(buf + pos, len - pos, "tx count: %lu\n", ptp->tx_cnt);
+ pos += scnprintf(buf + pos, len - pos, "tx skipped count: %lu\n",
+ ptp->tx_skipped);
+ pos += scnprintf(buf + pos, len - pos, "tx timeout count: %lu\n",
+ ptp->tx_timeout);
+ pos += scnprintf(buf + pos, len - pos, "last tx seqid: %u\n",
+ ptp->last_tx_seqid);
+
+ ret = hclge_ptp_cfg_qry(hdev, &hw_cfg);
+ if (ret)
+ return ret;
+
+ pos += scnprintf(buf + pos, len - pos, "sw_cfg: %#x, hw_cfg: %#x\n",
+ sw_cfg, hw_cfg);
+
+ pos += scnprintf(buf + pos, len - pos, "tx type: %d, rx filter: %d\n",
+ ptp->ts_cfg.tx_type, ptp->ts_cfg.rx_filter);
+
+ return 0;
+}
+
+static int hclge_dbg_dump_mac_uc(struct hclge_dev *hdev, char *buf, int len)
+{
+ hclge_dbg_dump_mac_list(hdev, buf, len, true);
+
+ return 0;
+}
+
+static int hclge_dbg_dump_mac_mc(struct hclge_dev *hdev, char *buf, int len)
+{
+ hclge_dbg_dump_mac_list(hdev, buf, len, false);
+
+ return 0;
+}
+
+static const struct hclge_dbg_func hclge_dbg_cmd_func[] = {
+ {
+ .cmd = HNAE3_DBG_CMD_TM_NODES,
+ .dbg_dump = hclge_dbg_dump_tm_nodes,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_TM_PRI,
+ .dbg_dump = hclge_dbg_dump_tm_pri,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_TM_QSET,
+ .dbg_dump = hclge_dbg_dump_tm_qset,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_TM_MAP,
+ .dbg_dump = hclge_dbg_dump_tm_map,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_TM_PG,
+ .dbg_dump = hclge_dbg_dump_tm_pg,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_TM_PORT,
+ .dbg_dump = hclge_dbg_dump_tm_port,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_TC_SCH_INFO,
+ .dbg_dump = hclge_dbg_dump_tc,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_QOS_PAUSE_CFG,
+ .dbg_dump = hclge_dbg_dump_qos_pause_cfg,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_QOS_PRI_MAP,
+ .dbg_dump = hclge_dbg_dump_qos_pri_map,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_QOS_DSCP_MAP,
+ .dbg_dump = hclge_dbg_dump_qos_dscp_map,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
+ .dbg_dump = hclge_dbg_dump_qos_buf_cfg,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_MAC_UC,
+ .dbg_dump = hclge_dbg_dump_mac_uc,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_MAC_MC,
+ .dbg_dump = hclge_dbg_dump_mac_mc,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_MNG_TBL,
+ .dbg_dump = hclge_dbg_dump_mng_table,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_LOOPBACK,
+ .dbg_dump = hclge_dbg_dump_loopback,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_PTP_INFO,
+ .dbg_dump = hclge_dbg_dump_ptp_info,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_INTERRUPT_INFO,
+ .dbg_dump = hclge_dbg_dump_interrupt,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_RESET_INFO,
+ .dbg_dump = hclge_dbg_dump_rst_info,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_IMP_INFO,
+ .dbg_dump = hclge_dbg_get_imp_stats_info,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_NCL_CONFIG,
+ .dbg_dump = hclge_dbg_dump_ncl_config,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
+ .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_REG_SSU,
+ .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
+ .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_REG_RPU,
+ .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_REG_NCSI,
+ .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_REG_RTC,
+ .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_REG_PPP,
+ .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_REG_RCB,
+ .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_REG_TQP,
+ .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_REG_MAC,
+ .dbg_dump = hclge_dbg_dump_mac,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_REG_DCB,
+ .dbg_dump = hclge_dbg_dump_dcb,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_FD_TCAM,
+ .dbg_dump = hclge_dbg_dump_fd_tcam,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_MAC_TNL_STATUS,
+ .dbg_dump = hclge_dbg_dump_mac_tnl_status,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_SERV_INFO,
+ .dbg_dump = hclge_dbg_dump_serv_info,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_VLAN_CONFIG,
+ .dbg_dump = hclge_dbg_dump_vlan_config,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_FD_COUNTER,
+ .dbg_dump = hclge_dbg_dump_fd_counter,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_UMV_INFO,
+ .dbg_dump = hclge_dbg_dump_umv_info,
+ },
+};
+
+int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
+ char *buf, int len)
+{
struct hclge_vport *vport = hclge_get_vport(handle);
+ const struct hclge_dbg_func *cmd_func;
struct hclge_dev *hdev = vport->back;
+ u32 i;
- if (strncmp(cmd_buf, "dump fd tcam", 12) == 0) {
- hclge_dbg_fd_tcam(hdev);
- } else if (strncmp(cmd_buf, "dump tc", 7) == 0) {
- hclge_dbg_dump_tc(hdev);
- } else if (strncmp(cmd_buf, DUMP_TM_MAP, strlen(DUMP_TM_MAP)) == 0) {
- hclge_dbg_dump_tm_map(hdev, &cmd_buf[sizeof(DUMP_TM_MAP)]);
- } else if (strncmp(cmd_buf, "dump tm", 7) == 0) {
- hclge_dbg_dump_tm(hdev);
- } else if (strncmp(cmd_buf, "dump qos pause cfg", 18) == 0) {
- hclge_dbg_dump_qos_pause_cfg(hdev);
- } else if (strncmp(cmd_buf, "dump qos pri map", 16) == 0) {
- hclge_dbg_dump_qos_pri_map(hdev);
- } else if (strncmp(cmd_buf, "dump qos buf cfg", 16) == 0) {
- hclge_dbg_dump_qos_buf_cfg(hdev);
- } else if (strncmp(cmd_buf, "dump mng tbl", 12) == 0) {
- hclge_dbg_dump_mng_table(hdev);
- } else if (strncmp(cmd_buf, DUMP_REG, strlen(DUMP_REG)) == 0) {
- hclge_dbg_dump_reg_cmd(hdev, &cmd_buf[sizeof(DUMP_REG)]);
- } else if (strncmp(cmd_buf, "dump reset info", 15) == 0) {
- hclge_dbg_dump_rst_info(hdev);
- } else if (strncmp(cmd_buf, "dump serv info", 14) == 0) {
- hclge_dbg_dump_serv_info(hdev);
- } else if (strncmp(cmd_buf, "dump m7 info", 12) == 0) {
- hclge_dbg_get_m7_stats_info(hdev);
- } else if (strncmp(cmd_buf, "dump ncl_config", 15) == 0) {
- hclge_dbg_dump_ncl_config(hdev,
- &cmd_buf[sizeof("dump ncl_config")]);
- } else if (strncmp(cmd_buf, "dump mac tnl status", 19) == 0) {
- hclge_dbg_dump_mac_tnl_status(hdev);
- } else if (strncmp(cmd_buf, "dump qs shaper", 14) == 0) {
- hclge_dbg_dump_qs_shaper(hdev,
- &cmd_buf[sizeof("dump qs shaper")]);
- } else {
- dev_info(&hdev->pdev->dev, "unknown command\n");
- return -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(hclge_dbg_cmd_func); i++) {
+ if (cmd == hclge_dbg_cmd_func[i].cmd) {
+ cmd_func = &hclge_dbg_cmd_func[i];
+ if (cmd_func->dbg_dump)
+ return cmd_func->dbg_dump(hdev, buf, len);
+ else
+ return cmd_func->dbg_dump_reg(hdev, cmd, buf,
+ len);
+ }
}
- return 0;
+ dev_err(&hdev->pdev->dev, "invalid command(%d)\n", cmd);
+ return -EINVAL;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
index 38b79321c4c4..724052928b88 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
@@ -7,7 +7,6 @@
#include <linux/etherdevice.h>
#include "hclge_cmd.h"
-#define HCLGE_DBG_BUF_LEN 256
#define HCLGE_DBG_MNG_TBL_MAX 64
#define HCLGE_DBG_MNG_VLAN_MASK_B BIT(0)
@@ -35,8 +34,6 @@
#define HCLGE_DBG_DFX_SSU_2_OFFSET 12
-#pragma pack(1)
-
struct hclge_qos_pri_map_cmd {
u8 pri0_tc : 4,
pri1_tc : 4;
@@ -72,6 +69,11 @@ struct hclge_dbg_reg_common_msg {
enum hclge_opcode_type cmd;
};
+struct hclge_dbg_tcam_msg {
+ u8 stage;
+ u32 loc;
+};
+
#define HCLGE_DBG_MAX_DFX_MSG_LEN 60
struct hclge_dbg_dfx_message {
int flag;
@@ -80,14 +82,24 @@ struct hclge_dbg_dfx_message {
#define HCLGE_DBG_MAC_REG_TYPE_LEN 32
struct hclge_dbg_reg_type_info {
- const char *reg_type;
- struct hclge_dbg_dfx_message *dfx_msg;
+ enum hnae3_dbg_cmd cmd;
+ const struct hclge_dbg_dfx_message *dfx_msg;
struct hclge_dbg_reg_common_msg reg_msg;
};
-#pragma pack()
+struct hclge_dbg_func {
+ enum hnae3_dbg_cmd cmd;
+ int (*dbg_dump)(struct hclge_dev *hdev, char *buf, int len);
+ int (*dbg_dump_reg)(struct hclge_dev *hdev, enum hnae3_dbg_cmd cmd,
+ char *buf, int len);
+};
-static struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = {
+struct hclge_dbg_status_dfx_info {
+ u32 offset;
+ char message[HCLGE_DBG_MAX_DFX_MSG_LEN];
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = {
{false, "Reserved"},
{true, "BP_CPU_STATE"},
{true, "DFX_MSIX_INFO_NIC_0"},
@@ -103,7 +115,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = {
{false, "Reserved"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_0[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_0[] = {
{false, "Reserved"},
{true, "SSU_ETS_PORT_STATUS"},
{true, "SSU_ETS_TCG_STATUS"},
@@ -175,7 +187,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_0[] = {
{false, "Reserved"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_1[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_1[] = {
{true, "prt_id"},
{true, "PACKET_TC_CURR_BUFFER_CNT_0"},
{true, "PACKET_TC_CURR_BUFFER_CNT_1"},
@@ -282,7 +294,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_1[] = {
{false, "Reserved"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_2[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_2[] = {
{true, "OQ_INDEX"},
{true, "QUEUE_CNT"},
{false, "Reserved"},
@@ -291,7 +303,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_2[] = {
{false, "Reserved"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_igu_egu_reg[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_igu_egu_reg[] = {
{true, "prt_id"},
{true, "IGU_RX_ERR_PKT"},
{true, "IGU_RX_NO_SOF_PKT"},
@@ -314,10 +326,10 @@ static struct hclge_dbg_dfx_message hclge_dbg_igu_egu_reg[] = {
{true, "IGU_RX_OUT_UDP0_PKT"},
{true, "IGU_RX_IN_UDP0_PKT"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
+ {true, "IGU_MC_CAR_DROP_PKT_L"},
+ {true, "IGU_MC_CAR_DROP_PKT_H"},
+ {true, "IGU_BC_CAR_DROP_PKT_L"},
+ {true, "IGU_BC_CAR_DROP_PKT_H"},
{false, "Reserved"},
{true, "IGU_RX_OVERSIZE_PKT_L"},
@@ -356,7 +368,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_igu_egu_reg[] = {
{false, "Reserved"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_0[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_0[] = {
{true, "tc_queue_num"},
{true, "FSM_DFX_ST0"},
{true, "FSM_DFX_ST1"},
@@ -365,7 +377,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_0[] = {
{true, "BUF_WAIT_TIMEOUT_QID"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_1[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_1[] = {
{false, "Reserved"},
{true, "FIFO_DFX_ST0"},
{true, "FIFO_DFX_ST1"},
@@ -381,7 +393,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_1[] = {
{false, "Reserved"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_ncsi_reg[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_ncsi_reg[] = {
{false, "Reserved"},
{true, "NCSI_EGU_TX_FIFO_STS"},
{true, "NCSI_PAUSE_STATUS"},
@@ -453,7 +465,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_ncsi_reg[] = {
{true, "NCSI_MAC_RX_PAUSE_FRAMES"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_rtc_reg[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_rtc_reg[] = {
{false, "Reserved"},
{true, "LGE_IGU_AFIFO_DFX_0"},
{true, "LGE_IGU_AFIFO_DFX_1"},
@@ -483,7 +495,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_rtc_reg[] = {
{false, "Reserved"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_ppp_reg[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_ppp_reg[] = {
{false, "Reserved"},
{true, "DROP_FROM_PRT_PKT_CNT"},
{true, "DROP_FROM_HOST_PKT_CNT"},
@@ -639,7 +651,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_ppp_reg[] = {
{false, "Reserved"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_rcb_reg[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_rcb_reg[] = {
{false, "Reserved"},
{true, "FSM_DFX_ST0"},
{true, "FSM_DFX_ST1"},
@@ -711,7 +723,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_rcb_reg[] = {
{false, "Reserved"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_tqp_reg[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_tqp_reg[] = {
{true, "q_num"},
{true, "RCB_CFG_RX_RING_TAIL"},
{true, "RCB_CFG_RX_RING_HEAD"},
@@ -727,4 +739,36 @@ static struct hclge_dbg_dfx_message hclge_dbg_tqp_reg[] = {
{true, "RCB_CFG_TX_RING_EBDNUM"},
};
+#define HCLGE_DBG_INFO_LEN 256
+#define HCLGE_DBG_VLAN_FLTR_INFO_LEN 256
+#define HCLGE_DBG_VLAN_OFFLOAD_INFO_LEN 512
+#define HCLGE_DBG_ID_LEN 16
+#define HCLGE_DBG_ITEM_NAME_LEN 32
+#define HCLGE_DBG_DATA_STR_LEN 32
+#define HCLGE_DBG_TM_INFO_LEN 256
+
+#define HCLGE_BILLION_NANO_SECONDS 1000000000
+
+struct hclge_dbg_item {
+ char name[HCLGE_DBG_ITEM_NAME_LEN];
+ u16 interval; /* blank numbers after the item */
+};
+
+struct hclge_dbg_vlan_cfg {
+ u16 pvid;
+ u8 accept_tag1;
+ u8 accept_tag2;
+ u8 accept_untag1;
+ u8 accept_untag2;
+ u8 insert_tag1;
+ u8 insert_tag2;
+ u8 shift_tag;
+ u8 strip_tag1;
+ u8 strip_tag2;
+ u8 drop_tag1;
+ u8 drop_tag2;
+ u8 pri_only1;
+ u8 pri_only2;
+};
+
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
new file mode 100644
index 000000000000..4c441e6a5082
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2021 Hisilicon Limited. */
+
+#include <net/devlink.h>
+
+#include "hclge_devlink.h"
+
+static int hclge_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+#define HCLGE_DEVLINK_FW_STRING_LEN 32
+ struct hclge_devlink_priv *priv = devlink_priv(devlink);
+ char version_str[HCLGE_DEVLINK_FW_STRING_LEN];
+ struct hclge_dev *hdev = priv->hdev;
+ int ret;
+
+ ret = devlink_info_driver_name_put(req, KBUILD_MODNAME);
+ if (ret)
+ return ret;
+
+ snprintf(version_str, sizeof(version_str), "%lu.%lu.%lu.%lu",
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
+ HNAE3_FW_VERSION_BYTE3_SHIFT),
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
+ HNAE3_FW_VERSION_BYTE2_SHIFT),
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
+ HNAE3_FW_VERSION_BYTE1_SHIFT),
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
+ HNAE3_FW_VERSION_BYTE0_SHIFT));
+
+ return devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ version_str);
+}
+
+static int hclge_devlink_reload_down(struct devlink *devlink, bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ struct netlink_ext_ack *extack)
+{
+ struct hclge_devlink_priv *priv = devlink_priv(devlink);
+ struct hclge_dev *hdev = priv->hdev;
+ struct hnae3_handle *h = &hdev->vport->nic;
+ struct pci_dev *pdev = hdev->pdev;
+ int ret;
+
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
+ dev_err(&pdev->dev, "reset is handling\n");
+ return -EBUSY;
+ }
+
+ switch (action) {
+ case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
+ rtnl_lock();
+ ret = hdev->nic_client->ops->reset_notify(h, HNAE3_DOWN_CLIENT);
+ if (ret) {
+ rtnl_unlock();
+ return ret;
+ }
+
+ ret = hdev->nic_client->ops->reset_notify(h,
+ HNAE3_UNINIT_CLIENT);
+ rtnl_unlock();
+ return ret;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int hclge_devlink_reload_up(struct devlink *devlink,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ u32 *actions_performed,
+ struct netlink_ext_ack *extack)
+{
+ struct hclge_devlink_priv *priv = devlink_priv(devlink);
+ struct hclge_dev *hdev = priv->hdev;
+ struct hnae3_handle *h = &hdev->vport->nic;
+ int ret;
+
+ *actions_performed = BIT(action);
+ switch (action) {
+ case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
+ rtnl_lock();
+ ret = hdev->nic_client->ops->reset_notify(h, HNAE3_INIT_CLIENT);
+ if (ret) {
+ rtnl_unlock();
+ return ret;
+ }
+
+ ret = hdev->nic_client->ops->reset_notify(h, HNAE3_UP_CLIENT);
+ rtnl_unlock();
+ return ret;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct devlink_ops hclge_devlink_ops = {
+ .info_get = hclge_devlink_info_get,
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT),
+ .reload_down = hclge_devlink_reload_down,
+ .reload_up = hclge_devlink_reload_up,
+};
+
+int hclge_devlink_init(struct hclge_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ struct hclge_devlink_priv *priv;
+ struct devlink *devlink;
+
+ devlink = devlink_alloc(&hclge_devlink_ops,
+ sizeof(struct hclge_devlink_priv), &pdev->dev);
+ if (!devlink)
+ return -ENOMEM;
+
+ priv = devlink_priv(devlink);
+ priv->hdev = hdev;
+ hdev->devlink = devlink;
+
+ devlink_set_features(devlink, DEVLINK_F_RELOAD);
+ devlink_register(devlink);
+ return 0;
+}
+
+void hclge_devlink_uninit(struct hclge_dev *hdev)
+{
+ struct devlink *devlink = hdev->devlink;
+
+ devlink_unregister(devlink);
+
+ devlink_free(devlink);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h
new file mode 100644
index 000000000000..918be04507a5
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2021 Hisilicon Limited. */
+
+#ifndef __HCLGE_DEVLINK_H
+#define __HCLGE_DEVLINK_H
+
+#include "hclge_main.h"
+
+struct hclge_devlink_priv {
+ struct hclge_dev *hdev;
+};
+
+int hclge_devlink_init(struct hclge_dev *hdev);
+void hclge_devlink_uninit(struct hclge_dev *hdev);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index c85b72dc44d2..6efd768cc07c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -4,468 +4,895 @@
#include "hclge_err.h"
static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = {
- { .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(17), .msg = "imp_itcm4_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(1),
+ .msg = "imp_itcm0_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "imp_itcm1_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "imp_itcm2_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(7),
+ .msg = "imp_itcm3_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(9),
+ .msg = "imp_dtcm0_mem0_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(11),
+ .msg = "imp_dtcm0_mem1_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(13),
+ .msg = "imp_dtcm1_mem0_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(15),
+ .msg = "imp_dtcm1_mem1_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(17),
+ .msg = "imp_itcm4_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = {
- { .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(17), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(19), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(21), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(23), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(25), .msg = "cmdq_rocee_rx_head_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(27), .msg = "cmdq_rocee_tx_head_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(29), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(31), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(1),
+ .msg = "cmdq_nic_rx_depth_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "cmdq_nic_tx_depth_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "cmdq_nic_rx_tail_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(7),
+ .msg = "cmdq_nic_tx_tail_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(9),
+ .msg = "cmdq_nic_rx_head_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(11),
+ .msg = "cmdq_nic_tx_head_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(13),
+ .msg = "cmdq_nic_rx_addr_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(15),
+ .msg = "cmdq_nic_tx_addr_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(17),
+ .msg = "cmdq_rocee_rx_depth_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(19),
+ .msg = "cmdq_rocee_tx_depth_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(21),
+ .msg = "cmdq_rocee_rx_tail_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(23),
+ .msg = "cmdq_rocee_tx_tail_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(25),
+ .msg = "cmdq_rocee_rx_head_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(27),
+ .msg = "cmdq_rocee_tx_head_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(29),
+ .msg = "cmdq_rocee_rx_addr_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(31),
+ .msg = "cmdq_rocee_tx_addr_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = {
- { .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(9), .msg = "tqp_int_ctrl_odd_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(10), .msg = "tx_que_scan_int_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(11), .msg = "rx_que_scan_int_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(6),
+ .msg = "tqp_int_cfg_even_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(7),
+ .msg = "tqp_int_cfg_odd_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(8),
+ .msg = "tqp_int_ctrl_even_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(9),
+ .msg = "tqp_int_ctrl_odd_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(10),
+ .msg = "tx_que_scan_int_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(11),
+ .msg = "rx_que_scan_int_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_msix_sram_ecc_int[] = {
- { .int_msk = BIT(1), .msg = "msix_nic_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(3), .msg = "msix_rocee_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(1),
+ .msg = "msix_nic_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "msix_rocee_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_igu_int[] = {
- { .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(0),
+ .msg = "igu_rx_buf0_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(2),
+ .msg = "igu_rx_buf1_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = {
- { .int_msk = BIT(0), .msg = "rx_buf_overflow",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(2), .msg = "rx_stp_fifo_underflow",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(3), .msg = "tx_buf_overflow",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(4), .msg = "tx_buf_underrun",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(5), .msg = "rx_stp_buf_overflow",
- .reset_level = HNAE3_GLOBAL_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(0),
+ .msg = "rx_buf_overflow",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(1),
+ .msg = "rx_stp_fifo_overflow",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(2),
+ .msg = "rx_stp_fifo_underflow",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "tx_buf_overflow",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(4),
+ .msg = "tx_buf_underrun",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "rx_stp_buf_overflow",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_ncsi_err_int[] = {
- { .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(1),
+ .msg = "ncsi_tx_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = {
- { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_m1bit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(27), .msg = "flow_director_ad_mem0_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(28), .msg = "flow_director_ad_mem1_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(29), .msg = "rx_vlan_tag_memory_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(30), .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(0),
+ .msg = "vf_vlan_ad_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(1),
+ .msg = "umv_mcast_group_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(2),
+ .msg = "umv_key_mem0_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "umv_key_mem1_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(4),
+ .msg = "umv_key_mem2_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "umv_key_mem3_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(6),
+ .msg = "umv_ad_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(7),
+ .msg = "rss_tc_mode_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(8),
+ .msg = "rss_idt_mem0_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(9),
+ .msg = "rss_idt_mem1_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(10),
+ .msg = "rss_idt_mem2_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(11),
+ .msg = "rss_idt_mem3_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(12),
+ .msg = "rss_idt_mem4_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(13),
+ .msg = "rss_idt_mem5_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(14),
+ .msg = "rss_idt_mem6_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(15),
+ .msg = "rss_idt_mem7_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(16),
+ .msg = "rss_idt_mem8_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(17),
+ .msg = "rss_idt_mem9_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(18),
+ .msg = "rss_idt_mem10_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(19),
+ .msg = "rss_idt_mem11_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(20),
+ .msg = "rss_idt_mem12_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(21),
+ .msg = "rss_idt_mem13_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(22),
+ .msg = "rss_idt_mem14_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(23),
+ .msg = "rss_idt_mem15_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(24),
+ .msg = "port_vlan_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(25),
+ .msg = "mcast_linear_table_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(26),
+ .msg = "mcast_result_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(27),
+ .msg = "flow_director_ad_mem0_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(28),
+ .msg = "flow_director_ad_mem1_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(29),
+ .msg = "rx_vlan_tag_memory_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(30),
+ .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_ppp_pf_abnormal_int[] = {
- { .int_msk = BIT(0), .msg = "tx_vlan_tag_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err",
- .reset_level = HNAE3_NONE_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(0),
+ .msg = "tx_vlan_tag_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(1),
+ .msg = "rss_list_tc_unassigned_queue_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st3[] = {
- { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(0),
+ .msg = "hfs_fifo_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(1),
+ .msg = "rslt_descr_fifo_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(2),
+ .msg = "tx_vlan_tag_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "FD_CN0_memory_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(4),
+ .msg = "FD_CN1_memory_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "GRO_AD_memory_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_tm_sch_rint[] = {
- { .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(12), .msg = "tm_sch_port_shap_offset_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(13), .msg = "tm_sch_port_shap_offset_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(14), .msg = "tm_sch_pg_pshap_offset_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(15), .msg = "tm_sch_pg_pshap_offset_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(16), .msg = "tm_sch_pg_cshap_offset_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(17), .msg = "tm_sch_pg_cshap_offset_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(18), .msg = "tm_sch_pri_pshap_offset_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(19), .msg = "tm_sch_pri_pshap_offset_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(20), .msg = "tm_sch_pri_cshap_offset_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(21), .msg = "tm_sch_pri_cshap_offset_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(1),
+ .msg = "tm_sch_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(2),
+ .msg = "tm_sch_port_shap_sub_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "tm_sch_port_shap_sub_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(4),
+ .msg = "tm_sch_pg_pshap_sub_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "tm_sch_pg_pshap_sub_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(6),
+ .msg = "tm_sch_pg_cshap_sub_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(7),
+ .msg = "tm_sch_pg_cshap_sub_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(8),
+ .msg = "tm_sch_pri_pshap_sub_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(9),
+ .msg = "tm_sch_pri_pshap_sub_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(10),
+ .msg = "tm_sch_pri_cshap_sub_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(11),
+ .msg = "tm_sch_pri_cshap_sub_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(12),
+ .msg = "tm_sch_port_shap_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(13),
+ .msg = "tm_sch_port_shap_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(14),
+ .msg = "tm_sch_pg_pshap_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(15),
+ .msg = "tm_sch_pg_pshap_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(16),
+ .msg = "tm_sch_pg_cshap_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(17),
+ .msg = "tm_sch_pg_cshap_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(18),
+ .msg = "tm_sch_pri_pshap_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(19),
+ .msg = "tm_sch_pri_pshap_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(20),
+ .msg = "tm_sch_pri_cshap_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(21),
+ .msg = "tm_sch_pri_cshap_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(22),
+ .msg = "tm_sch_rq_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(23),
+ .msg = "tm_sch_rq_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(24),
+ .msg = "tm_sch_nq_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(25),
+ .msg = "tm_sch_nq_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(26),
+ .msg = "tm_sch_roce_up_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(27),
+ .msg = "tm_sch_roce_up_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(28),
+ .msg = "tm_sch_rcb_byte_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(29),
+ .msg = "tm_sch_rcb_byte_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(30),
+ .msg = "tm_sch_ssu_byte_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(31),
+ .msg = "tm_sch_ssu_byte_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_qcn_fifo_rint[] = {
- { .int_msk = BIT(0), .msg = "qcn_shap_gp0_sch_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(1), .msg = "qcn_shap_gp0_sch_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(2), .msg = "qcn_shap_gp1_sch_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(3), .msg = "qcn_shap_gp1_sch_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(4), .msg = "qcn_shap_gp2_sch_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(5), .msg = "qcn_shap_gp2_sch_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(6), .msg = "qcn_shap_gp3_sch_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(7), .msg = "qcn_shap_gp3_sch_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(8), .msg = "qcn_shap_gp0_offset_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(9), .msg = "qcn_shap_gp0_offset_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(10), .msg = "qcn_shap_gp1_offset_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(11), .msg = "qcn_shap_gp1_offset_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(12), .msg = "qcn_shap_gp2_offset_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(13), .msg = "qcn_shap_gp2_offset_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(14), .msg = "qcn_shap_gp3_offset_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(15), .msg = "qcn_shap_gp3_offset_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(16), .msg = "qcn_byte_info_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(17), .msg = "qcn_byte_info_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(0),
+ .msg = "qcn_shap_gp0_sch_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(1),
+ .msg = "qcn_shap_gp0_sch_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(2),
+ .msg = "qcn_shap_gp1_sch_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "qcn_shap_gp1_sch_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(4),
+ .msg = "qcn_shap_gp2_sch_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "qcn_shap_gp2_sch_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(6),
+ .msg = "qcn_shap_gp3_sch_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(7),
+ .msg = "qcn_shap_gp3_sch_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(8),
+ .msg = "qcn_shap_gp0_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(9),
+ .msg = "qcn_shap_gp0_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(10),
+ .msg = "qcn_shap_gp1_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(11),
+ .msg = "qcn_shap_gp1_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(12),
+ .msg = "qcn_shap_gp2_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(13),
+ .msg = "qcn_shap_gp2_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(14),
+ .msg = "qcn_shap_gp3_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(15),
+ .msg = "qcn_shap_gp3_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(16),
+ .msg = "qcn_byte_info_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(17),
+ .msg = "qcn_byte_info_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_qcn_ecc_rint[] = {
- { .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(7), .msg = "qcn_link_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(9), .msg = "qcn_rate_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(11), .msg = "qcn_tmplt_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(13), .msg = "qcn_shap_cfg_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(15), .msg = "qcn_gp0_barrel_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(17), .msg = "qcn_gp1_barrel_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(19), .msg = "qcn_gp2_barrel_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(21), .msg = "qcn_gp3_barral_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(1),
+ .msg = "qcn_byte_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "qcn_time_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "qcn_fb_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(7),
+ .msg = "qcn_link_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(9),
+ .msg = "qcn_rate_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(11),
+ .msg = "qcn_tmplt_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(13),
+ .msg = "qcn_shap_cfg_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(15),
+ .msg = "qcn_gp0_barrel_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(17),
+ .msg = "qcn_gp1_barrel_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(19),
+ .msg = "qcn_gp2_barrel_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(21),
+ .msg = "qcn_gp3_barral_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_mac_afifo_tnl_int[] = {
- { .int_msk = BIT(0), .msg = "egu_cge_afifo_ecc_1bit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(1), .msg = "egu_cge_afifo_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(2), .msg = "egu_lge_afifo_ecc_1bit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(3), .msg = "egu_lge_afifo_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(4), .msg = "cge_igu_afifo_ecc_1bit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(5), .msg = "cge_igu_afifo_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(6), .msg = "lge_igu_afifo_ecc_1bit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(7), .msg = "lge_igu_afifo_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(8), .msg = "cge_igu_afifo_overflow_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(9), .msg = "lge_igu_afifo_overflow_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(10), .msg = "egu_cge_afifo_underrun_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(11), .msg = "egu_lge_afifo_underrun_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(12), .msg = "egu_ge_afifo_underrun_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(13), .msg = "ge_igu_afifo_overflow_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(0),
+ .msg = "egu_cge_afifo_ecc_1bit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(1),
+ .msg = "egu_cge_afifo_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(2),
+ .msg = "egu_lge_afifo_ecc_1bit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "egu_lge_afifo_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(4),
+ .msg = "cge_igu_afifo_ecc_1bit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "cge_igu_afifo_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(6),
+ .msg = "lge_igu_afifo_ecc_1bit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(7),
+ .msg = "lge_igu_afifo_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(8),
+ .msg = "cge_igu_afifo_overflow_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(9),
+ .msg = "lge_igu_afifo_overflow_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(10),
+ .msg = "egu_cge_afifo_underrun_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(11),
+ .msg = "egu_lge_afifo_underrun_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(12),
+ .msg = "egu_ge_afifo_underrun_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(13),
+ .msg = "ge_igu_afifo_overflow_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st2[] = {
- { .int_msk = BIT(13), .msg = "rpu_rx_pkt_bit32_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(14), .msg = "rpu_rx_pkt_bit33_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(15), .msg = "rpu_rx_pkt_bit34_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(16), .msg = "rpu_rx_pkt_bit35_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(17), .msg = "rcb_tx_ring_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(18), .msg = "rcb_rx_ring_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(19), .msg = "rcb_tx_fbd_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(20), .msg = "rcb_rx_ebd_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(21), .msg = "rcb_tso_info_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(22), .msg = "rcb_tx_int_info_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(23), .msg = "rcb_rx_int_info_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(24), .msg = "tpu_tx_pkt_0_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(25), .msg = "tpu_tx_pkt_1_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(26), .msg = "rd_bus_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(27), .msg = "wr_bus_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(28), .msg = "reg_search_miss",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(29), .msg = "rx_q_search_miss",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(30), .msg = "ooo_ecc_err_detect",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(31), .msg = "ooo_ecc_err_multpl",
- .reset_level = HNAE3_GLOBAL_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(13),
+ .msg = "rpu_rx_pkt_bit32_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(14),
+ .msg = "rpu_rx_pkt_bit33_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(15),
+ .msg = "rpu_rx_pkt_bit34_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(16),
+ .msg = "rpu_rx_pkt_bit35_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(17),
+ .msg = "rcb_tx_ring_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(18),
+ .msg = "rcb_rx_ring_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(19),
+ .msg = "rcb_tx_fbd_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(20),
+ .msg = "rcb_rx_ebd_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(21),
+ .msg = "rcb_tso_info_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(22),
+ .msg = "rcb_tx_int_info_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(23),
+ .msg = "rcb_rx_int_info_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(24),
+ .msg = "tpu_tx_pkt_0_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(25),
+ .msg = "tpu_tx_pkt_1_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(26),
+ .msg = "rd_bus_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(27),
+ .msg = "wr_bus_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(28),
+ .msg = "reg_search_miss",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(29),
+ .msg = "rx_q_search_miss",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(30),
+ .msg = "ooo_ecc_err_detect",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(31),
+ .msg = "ooo_ecc_err_multpl",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st3[] = {
- { .int_msk = BIT(4), .msg = "gro_bd_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(5), .msg = "gro_context_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(6), .msg = "rx_stash_cfg_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(7), .msg = "axi_rd_fbd_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(4),
+ .msg = "gro_bd_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "gro_context_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(6),
+ .msg = "rx_stash_cfg_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(7),
+ .msg = "axi_rd_fbd_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_ppu_pf_abnormal_int[] = {
- { .int_msk = BIT(0), .msg = "over_8bd_no_fe",
- .reset_level = HNAE3_FUNC_RESET },
- { .int_msk = BIT(1), .msg = "tso_mss_cmp_min_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(2), .msg = "tso_mss_cmp_max_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(3), .msg = "tx_rd_fbd_poison",
- .reset_level = HNAE3_FUNC_RESET },
- { .int_msk = BIT(4), .msg = "rx_rd_ebd_poison",
- .reset_level = HNAE3_FUNC_RESET },
- { .int_msk = BIT(5), .msg = "buf_wait_timeout",
- .reset_level = HNAE3_NONE_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(0),
+ .msg = "over_8bd_no_fe",
+ .reset_level = HNAE3_FUNC_RESET
+ }, {
+ .int_msk = BIT(1),
+ .msg = "tso_mss_cmp_min_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(2),
+ .msg = "tso_mss_cmp_max_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "tx_rd_fbd_poison",
+ .reset_level = HNAE3_FUNC_RESET
+ }, {
+ .int_msk = BIT(4),
+ .msg = "rx_rd_ebd_poison",
+ .reset_level = HNAE3_FUNC_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "buf_wait_timeout",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_ssu_com_err_int[] = {
- { .int_msk = BIT(0), .msg = "buf_sum_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(1), .msg = "ppp_mb_num_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(2), .msg = "ppp_mbid_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(3), .msg = "ppp_rlt_mac_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(4), .msg = "ppp_rlt_host_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(5), .msg = "cks_edit_position_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(6), .msg = "cks_edit_condition_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(7), .msg = "vlan_edit_condition_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(8), .msg = "vlan_num_ot_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(9), .msg = "vlan_num_in_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(0),
+ .msg = "buf_sum_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(1),
+ .msg = "ppp_mb_num_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(2),
+ .msg = "ppp_mbid_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "ppp_rlt_mac_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(4),
+ .msg = "ppp_rlt_host_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "cks_edit_position_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(6),
+ .msg = "cks_edit_condition_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(7),
+ .msg = "vlan_edit_condition_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(8),
+ .msg = "vlan_num_ot_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(9),
+ .msg = "vlan_num_in_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
};
#define HCLGE_SSU_MEM_ECC_ERR(x) \
- { .int_msk = BIT(x), .msg = "ssu_mem" #x "_ecc_mbit_err", \
- .reset_level = HNAE3_GLOBAL_RESET }
+{ \
+ .int_msk = BIT(x), \
+ .msg = "ssu_mem" #x "_ecc_mbit_err", \
+ .reset_level = HNAE3_GLOBAL_RESET \
+}
static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = {
HCLGE_SSU_MEM_ECC_ERR(0),
@@ -504,131 +931,409 @@ static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = {
};
static const struct hclge_hw_error hclge_ssu_port_based_err_int[] = {
- { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port",
- .reset_level = HNAE3_FUNC_RESET },
- { .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(2), .msg = "igu_pkt_without_key_port",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(3), .msg = "roc_eof_mis_match_port",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(4), .msg = "tpu_eof_mis_match_port",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(5), .msg = "igu_eof_mis_match_port",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(6), .msg = "roc_sof_mis_match_port",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(7), .msg = "tpu_sof_mis_match_port",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(8), .msg = "igu_sof_mis_match_port",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(11), .msg = "ets_rd_int_rx_port",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(12), .msg = "ets_wr_int_rx_port",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(13), .msg = "ets_rd_int_tx_port",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(14), .msg = "ets_wr_int_tx_port",
- .reset_level = HNAE3_GLOBAL_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(0),
+ .msg = "roc_pkt_without_key_port",
+ .reset_level = HNAE3_FUNC_RESET
+ }, {
+ .int_msk = BIT(1),
+ .msg = "tpu_pkt_without_key_port",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(2),
+ .msg = "igu_pkt_without_key_port",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "roc_eof_mis_match_port",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(4),
+ .msg = "tpu_eof_mis_match_port",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "igu_eof_mis_match_port",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(6),
+ .msg = "roc_sof_mis_match_port",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(7),
+ .msg = "tpu_sof_mis_match_port",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(8),
+ .msg = "igu_sof_mis_match_port",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(11),
+ .msg = "ets_rd_int_rx_port",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(12),
+ .msg = "ets_wr_int_rx_port",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(13),
+ .msg = "ets_rd_int_tx_port",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(14),
+ .msg = "ets_wr_int_tx_port",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_ssu_fifo_overflow_int[] = {
- { .int_msk = BIT(0), .msg = "ig_mac_inf_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(1), .msg = "ig_host_inf_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(2), .msg = "ig_roc_buf_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(3), .msg = "ig_host_data_fifo_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(4), .msg = "ig_host_key_fifo_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(5), .msg = "tx_qcn_fifo_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(6), .msg = "rx_qcn_fifo_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(7), .msg = "tx_pf_rd_fifo_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(8), .msg = "rx_pf_rd_fifo_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(9), .msg = "qm_eof_fifo_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(10), .msg = "mb_rlt_fifo_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(11), .msg = "dup_uncopy_fifo_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(12), .msg = "dup_cnt_rd_fifo_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(13), .msg = "dup_cnt_drop_fifo_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(14), .msg = "dup_cnt_wrb_fifo_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(15), .msg = "host_cmd_fifo_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(16), .msg = "mac_cmd_fifo_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(17), .msg = "host_cmd_bitmap_empty_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(18), .msg = "mac_cmd_bitmap_empty_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(19), .msg = "dup_bitmap_empty_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(20), .msg = "out_queue_bitmap_empty_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(21), .msg = "bank2_bitmap_empty_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(22), .msg = "bank1_bitmap_empty_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(23), .msg = "bank0_bitmap_empty_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(0),
+ .msg = "ig_mac_inf_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(1),
+ .msg = "ig_host_inf_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(2),
+ .msg = "ig_roc_buf_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "ig_host_data_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(4),
+ .msg = "ig_host_key_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "tx_qcn_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(6),
+ .msg = "rx_qcn_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(7),
+ .msg = "tx_pf_rd_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(8),
+ .msg = "rx_pf_rd_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(9),
+ .msg = "qm_eof_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(10),
+ .msg = "mb_rlt_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(11),
+ .msg = "dup_uncopy_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(12),
+ .msg = "dup_cnt_rd_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(13),
+ .msg = "dup_cnt_drop_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(14),
+ .msg = "dup_cnt_wrb_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(15),
+ .msg = "host_cmd_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(16),
+ .msg = "mac_cmd_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(17),
+ .msg = "host_cmd_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(18),
+ .msg = "mac_cmd_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(19),
+ .msg = "dup_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(20),
+ .msg = "out_queue_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(21),
+ .msg = "bank2_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(22),
+ .msg = "bank1_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(23),
+ .msg = "bank0_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_ssu_ets_tcg_int[] = {
- { .int_msk = BIT(0), .msg = "ets_rd_int_rx_tcg",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(1), .msg = "ets_wr_int_rx_tcg",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(2), .msg = "ets_rd_int_tx_tcg",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(3), .msg = "ets_wr_int_tx_tcg",
- .reset_level = HNAE3_GLOBAL_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(0),
+ .msg = "ets_rd_int_rx_tcg",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(1),
+ .msg = "ets_wr_int_rx_tcg",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(2),
+ .msg = "ets_rd_int_tx_tcg",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "ets_wr_int_tx_tcg",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_ssu_port_based_pf_int[] = {
- { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port",
- .reset_level = HNAE3_FUNC_RESET },
- { .int_msk = BIT(9), .msg = "low_water_line_err_port",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(10), .msg = "hi_water_line_err_port",
- .reset_level = HNAE3_GLOBAL_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(0),
+ .msg = "roc_pkt_without_key_port",
+ .reset_level = HNAE3_FUNC_RESET
+ }, {
+ .int_msk = BIT(9),
+ .msg = "low_water_line_err_port",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(10),
+ .msg = "hi_water_line_err_port",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = {
- { .int_msk = 0, .msg = "rocee qmm ovf: sgid invalid err" },
- { .int_msk = 0x4, .msg = "rocee qmm ovf: sgid ovf err" },
- { .int_msk = 0x8, .msg = "rocee qmm ovf: smac invalid err" },
- { .int_msk = 0xC, .msg = "rocee qmm ovf: smac ovf err" },
- { .int_msk = 0x10, .msg = "rocee qmm ovf: cqc invalid err" },
- { .int_msk = 0x11, .msg = "rocee qmm ovf: cqc ovf err" },
- { .int_msk = 0x12, .msg = "rocee qmm ovf: cqc hopnum err" },
- { .int_msk = 0x13, .msg = "rocee qmm ovf: cqc ba0 err" },
- { .int_msk = 0x14, .msg = "rocee qmm ovf: srqc invalid err" },
- { .int_msk = 0x15, .msg = "rocee qmm ovf: srqc ovf err" },
- { .int_msk = 0x16, .msg = "rocee qmm ovf: srqc hopnum err" },
- { .int_msk = 0x17, .msg = "rocee qmm ovf: srqc ba0 err" },
- { .int_msk = 0x18, .msg = "rocee qmm ovf: mpt invalid err" },
- { .int_msk = 0x19, .msg = "rocee qmm ovf: mpt ovf err" },
- { .int_msk = 0x1A, .msg = "rocee qmm ovf: mpt hopnum err" },
- { .int_msk = 0x1B, .msg = "rocee qmm ovf: mpt ba0 err" },
- { .int_msk = 0x1C, .msg = "rocee qmm ovf: qpc invalid err" },
- { .int_msk = 0x1D, .msg = "rocee qmm ovf: qpc ovf err" },
- { .int_msk = 0x1E, .msg = "rocee qmm ovf: qpc hopnum err" },
- { .int_msk = 0x1F, .msg = "rocee qmm ovf: qpc ba0 err" },
- { /* sentinel */ }
+ {
+ .int_msk = 0,
+ .msg = "rocee qmm ovf: sgid invalid err"
+ }, {
+ .int_msk = 0x4,
+ .msg = "rocee qmm ovf: sgid ovf err"
+ }, {
+ .int_msk = 0x8,
+ .msg = "rocee qmm ovf: smac invalid err"
+ }, {
+ .int_msk = 0xC,
+ .msg = "rocee qmm ovf: smac ovf err"
+ }, {
+ .int_msk = 0x10,
+ .msg = "rocee qmm ovf: cqc invalid err"
+ }, {
+ .int_msk = 0x11,
+ .msg = "rocee qmm ovf: cqc ovf err"
+ }, {
+ .int_msk = 0x12,
+ .msg = "rocee qmm ovf: cqc hopnum err"
+ }, {
+ .int_msk = 0x13,
+ .msg = "rocee qmm ovf: cqc ba0 err"
+ }, {
+ .int_msk = 0x14,
+ .msg = "rocee qmm ovf: srqc invalid err"
+ }, {
+ .int_msk = 0x15,
+ .msg = "rocee qmm ovf: srqc ovf err"
+ }, {
+ .int_msk = 0x16,
+ .msg = "rocee qmm ovf: srqc hopnum err"
+ }, {
+ .int_msk = 0x17,
+ .msg = "rocee qmm ovf: srqc ba0 err"
+ }, {
+ .int_msk = 0x18,
+ .msg = "rocee qmm ovf: mpt invalid err"
+ }, {
+ .int_msk = 0x19,
+ .msg = "rocee qmm ovf: mpt ovf err"
+ }, {
+ .int_msk = 0x1A,
+ .msg = "rocee qmm ovf: mpt hopnum err"
+ }, {
+ .int_msk = 0x1B,
+ .msg = "rocee qmm ovf: mpt ba0 err"
+ }, {
+ .int_msk = 0x1C,
+ .msg = "rocee qmm ovf: qpc invalid err"
+ }, {
+ .int_msk = 0x1D,
+ .msg = "rocee qmm ovf: qpc ovf err"
+ }, {
+ .int_msk = 0x1E,
+ .msg = "rocee qmm ovf: qpc hopnum err"
+ }, {
+ .int_msk = 0x1F,
+ .msg = "rocee qmm ovf: qpc ba0 err"
+ }, {
+ /* sentinel */
+ }
+};
+
+static const struct hclge_hw_module_id hclge_hw_module_id_st[] = {
+ {
+ .module_id = MODULE_NONE,
+ .msg = "MODULE_NONE"
+ }, {
+ .module_id = MODULE_BIOS_COMMON,
+ .msg = "MODULE_BIOS_COMMON"
+ }, {
+ .module_id = MODULE_GE,
+ .msg = "MODULE_GE"
+ }, {
+ .module_id = MODULE_IGU_EGU,
+ .msg = "MODULE_IGU_EGU"
+ }, {
+ .module_id = MODULE_LGE,
+ .msg = "MODULE_LGE"
+ }, {
+ .module_id = MODULE_NCSI,
+ .msg = "MODULE_NCSI"
+ }, {
+ .module_id = MODULE_PPP,
+ .msg = "MODULE_PPP"
+ }, {
+ .module_id = MODULE_QCN,
+ .msg = "MODULE_QCN"
+ }, {
+ .module_id = MODULE_RCB_RX,
+ .msg = "MODULE_RCB_RX"
+ }, {
+ .module_id = MODULE_RTC,
+ .msg = "MODULE_RTC"
+ }, {
+ .module_id = MODULE_SSU,
+ .msg = "MODULE_SSU"
+ }, {
+ .module_id = MODULE_TM,
+ .msg = "MODULE_TM"
+ }, {
+ .module_id = MODULE_RCB_TX,
+ .msg = "MODULE_RCB_TX"
+ }, {
+ .module_id = MODULE_TXDMA,
+ .msg = "MODULE_TXDMA"
+ }, {
+ .module_id = MODULE_MASTER,
+ .msg = "MODULE_MASTER"
+ }, {
+ .module_id = MODULE_HIMAC,
+ .msg = "MODULE_HIMAC"
+ }, {
+ .module_id = MODULE_ROCEE_TOP,
+ .msg = "MODULE_ROCEE_TOP"
+ }, {
+ .module_id = MODULE_ROCEE_TIMER,
+ .msg = "MODULE_ROCEE_TIMER"
+ }, {
+ .module_id = MODULE_ROCEE_MDB,
+ .msg = "MODULE_ROCEE_MDB"
+ }, {
+ .module_id = MODULE_ROCEE_TSP,
+ .msg = "MODULE_ROCEE_TSP"
+ }, {
+ .module_id = MODULE_ROCEE_TRP,
+ .msg = "MODULE_ROCEE_TRP"
+ }, {
+ .module_id = MODULE_ROCEE_SCC,
+ .msg = "MODULE_ROCEE_SCC"
+ }, {
+ .module_id = MODULE_ROCEE_CAEP,
+ .msg = "MODULE_ROCEE_CAEP"
+ }, {
+ .module_id = MODULE_ROCEE_GEN_AC,
+ .msg = "MODULE_ROCEE_GEN_AC"
+ }, {
+ .module_id = MODULE_ROCEE_QMM,
+ .msg = "MODULE_ROCEE_QMM"
+ }, {
+ .module_id = MODULE_ROCEE_LSAN,
+ .msg = "MODULE_ROCEE_LSAN"
+ }
+};
+
+static const struct hclge_hw_type_id hclge_hw_type_id_st[] = {
+ {
+ .type_id = NONE_ERROR,
+ .msg = "none_error"
+ }, {
+ .type_id = FIFO_ERROR,
+ .msg = "fifo_error"
+ }, {
+ .type_id = MEMORY_ERROR,
+ .msg = "memory_error"
+ }, {
+ .type_id = POISON_ERROR,
+ .msg = "poison_error"
+ }, {
+ .type_id = MSIX_ECC_ERROR,
+ .msg = "msix_ecc_error"
+ }, {
+ .type_id = TQP_INT_ECC_ERROR,
+ .msg = "tqp_int_ecc_error"
+ }, {
+ .type_id = PF_ABNORMAL_INT_ERROR,
+ .msg = "pf_abnormal_int_error"
+ }, {
+ .type_id = MPF_ABNORMAL_INT_ERROR,
+ .msg = "mpf_abnormal_int_error"
+ }, {
+ .type_id = COMMON_ERROR,
+ .msg = "common_error"
+ }, {
+ .type_id = PORT_ERROR,
+ .msg = "port_error"
+ }, {
+ .type_id = ETS_ERROR,
+ .msg = "ets_error"
+ }, {
+ .type_id = NCSI_ERROR,
+ .msg = "ncsi_error"
+ }, {
+ .type_id = GLB_ERROR,
+ .msg = "glb_error"
+ }, {
+ .type_id = LINK_ERROR,
+ .msg = "link_error"
+ }, {
+ .type_id = PTP_ERROR,
+ .msg = "ptp_error"
+ }, {
+ .type_id = ROCEE_NORMAL_ERR,
+ .msg = "rocee_normal_error"
+ }, {
+ .type_id = ROCEE_OVF_ERR,
+ .msg = "rocee_ovf_error"
+ }, {
+ .type_id = ROCEE_BUS_ERR,
+ .msg = "rocee_bus_error"
+ },
};
static void hclge_log_error(struct device *dev, char *reg,
@@ -694,7 +1399,7 @@ static int hclge_config_common_hw_err_int(struct hclge_dev *hdev, bool en)
/* configure common error interrupts */
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_COMMON_ECC_INT_CFG, false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_COMMON_ECC_INT_CFG, false);
if (en) {
@@ -729,7 +1434,7 @@ static int hclge_config_ncsi_hw_err_int(struct hclge_dev *hdev, bool en)
struct hclge_desc desc;
int ret;
- if (hdev->pdev->revision < 0x21)
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
return 0;
/* configure NCSI error interrupts */
@@ -753,8 +1458,9 @@ static int hclge_config_igu_egu_hw_err_int(struct hclge_dev *hdev, bool en)
/* configure IGU,EGU error interrupts */
hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_COMMON_INT_EN, false);
+ desc.data[0] = cpu_to_le32(HCLGE_IGU_ERR_INT_TYPE);
if (en)
- desc.data[0] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN);
+ desc.data[0] |= cpu_to_le32(HCLGE_IGU_ERR_INT_EN);
desc.data[1] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN_MASK);
@@ -792,7 +1498,7 @@ static int hclge_config_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd,
/* configure PPP error interrupts */
hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
if (cmd == HCLGE_PPP_CMD0_INT_CMD) {
@@ -808,7 +1514,7 @@ static int hclge_config_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd,
cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN_MASK);
desc[1].data[1] =
cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN_MASK);
- if (hdev->pdev->revision >= 0x21)
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
desc[1].data[2] =
cpu_to_le32(HCLGE_PPP_PF_ERR_INT_EN_MASK);
} else if (cmd == HCLGE_PPP_CMD1_INT_CMD) {
@@ -865,15 +1571,12 @@ static int hclge_config_tm_hw_err_int(struct hclge_dev *hdev, bool en)
}
/* configure TM QCN hw errors */
- ret = hclge_cmd_query_error(hdev, &desc, HCLGE_TM_QCN_MEM_INT_CFG, 0);
- if (ret) {
- dev_err(dev, "fail(%d) to read TM QCN CFG status\n", ret);
- return ret;
- }
-
- hclge_cmd_reuse_desc(&desc, false);
- if (en)
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_TM_QCN_MEM_INT_CFG, false);
+ desc.data[0] = cpu_to_le32(HCLGE_TM_QCN_ERR_INT_TYPE);
+ if (en) {
+ desc.data[0] |= cpu_to_le32(HCLGE_TM_QCN_FIFO_INT_EN);
desc.data[1] = cpu_to_le32(HCLGE_TM_QCN_MEM_ERR_INT_EN);
+ }
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
@@ -930,7 +1633,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
/* configure PPU error interrupts */
if (cmd == HCLGE_PPU_MPF_ECC_INT_CMD) {
hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
if (en) {
desc[0].data[0] =
@@ -1015,7 +1718,7 @@ static int hclge_config_ssu_hw_err_int(struct hclge_dev *hdev, bool en)
/* configure SSU ecc error interrupts */
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_SSU_ECC_INT_CMD, false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_ECC_INT_CMD, false);
if (en) {
desc[0].data[0] = cpu_to_le32(HCLGE_SSU_1BIT_ECC_ERR_INT_EN);
@@ -1037,11 +1740,11 @@ static int hclge_config_ssu_hw_err_int(struct hclge_dev *hdev, bool en)
/* configure SSU common error interrupts */
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_SSU_COMMON_INT_CMD, false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_COMMON_INT_CMD, false);
if (en) {
- if (hdev->pdev->revision >= 0x21)
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
desc[0].data[0] =
cpu_to_le32(HCLGE_SSU_COMMON_INT_EN);
else
@@ -1073,7 +1776,7 @@ static int hclge_config_ssu_hw_err_int(struct hclge_dev *hdev, bool en)
* This function querys number of mpf and pf buffer descriptors.
*/
static int hclge_query_bd_num(struct hclge_dev *hdev, bool is_ras,
- int *mpf_bd_num, int *pf_bd_num)
+ u32 *mpf_bd_num, u32 *pf_bd_num)
{
struct device *dev = &hdev->pdev->dev;
u32 mpf_min_bd_num, pf_min_bd_num;
@@ -1102,7 +1805,7 @@ static int hclge_query_bd_num(struct hclge_dev *hdev, bool is_ras,
*mpf_bd_num = le32_to_cpu(desc_bd.data[0]);
*pf_bd_num = le32_to_cpu(desc_bd.data[1]);
if (*mpf_bd_num < mpf_min_bd_num || *pf_bd_num < pf_min_bd_num) {
- dev_err(dev, "Invalid bd num: mpf(%d), pf(%d)\n",
+ dev_err(dev, "Invalid bd num: mpf(%u), pf(%u)\n",
*mpf_bd_num, *pf_bd_num);
return -EINVAL;
}
@@ -1260,7 +1963,7 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
&ae_dev->hw_err_reset_req);
/* clear all main PF RAS errors */
- hclge_cmd_reuse_desc(&desc[0], false);
+ hclge_comm_cmd_reuse_desc(&desc[0], false);
ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
if (ret)
dev_err(dev, "clear all mpf ras int cmd failed (%d)\n", ret);
@@ -1274,7 +1977,7 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
* @num: number of extended command structures
*
* This function handles all the PF RAS errors in the
- * hw register/s using command.
+ * hw registers using command.
*/
static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
struct hclge_desc *desc,
@@ -1333,7 +2036,7 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
}
/* clear all PF RAS errors */
- hclge_cmd_reuse_desc(&desc[0], false);
+ hclge_comm_cmd_reuse_desc(&desc[0], false);
ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
if (ret)
dev_err(dev, "clear all pf ras int cmd failed (%d)\n", ret);
@@ -1384,8 +2087,8 @@ static int hclge_log_rocee_axi_error(struct hclge_dev *hdev)
true);
hclge_cmd_setup_basic_desc(&desc[2], HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD,
true);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
ret = hclge_cmd_send(&hdev->hw, &desc[0], 3);
if (ret) {
@@ -1416,7 +2119,7 @@ static int hclge_log_rocee_ecc_error(struct hclge_dev *hdev)
ret = hclge_cmd_query_error(hdev, &desc[0],
HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD,
- HCLGE_CMD_FLAG_NEXT);
+ HCLGE_COMM_CMD_FLAG_NEXT);
if (ret) {
dev_err(dev, "failed(%d) to query ROCEE ECC error sts\n", ret);
return ret;
@@ -1497,7 +2200,6 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
}
status = le32_to_cpu(desc[0].data[0]);
-
if (status & HCLGE_ROCEE_AXI_ERR_INT_MASK) {
if (status & HCLGE_ROCEE_RERR_INT_MASK)
dev_err(dev, "ROCEE RAS AXI rresp error\n");
@@ -1507,6 +2209,8 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
reset_type = HNAE3_FUNC_RESET;
+ hclge_report_hw_error(hdev, HNAE3_ROCEE_AXI_RESP_ERROR);
+
ret = hclge_log_rocee_axi_error(hdev);
if (ret)
return HNAE3_GLOBAL_RESET;
@@ -1531,7 +2235,7 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
}
/* clear error status */
- hclge_cmd_reuse_desc(&desc[0], false);
+ hclge_comm_cmd_reuse_desc(&desc[0], false);
ret = hclge_cmd_send(&hdev->hw, &desc[0], 1);
if (ret) {
dev_err(dev, "failed(%d) to clear ROCEE RAS error\n", ret);
@@ -1548,7 +2252,8 @@ int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en)
struct hclge_desc desc;
int ret;
- if (hdev->pdev->revision < 0x21 || !hnae3_dev_roce_supported(hdev))
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 ||
+ !hnae3_dev_roce_supported(hdev))
return 0;
hclge_cmd_setup_basic_desc(&desc, HCLGE_CONFIG_ROCEE_RAS_INT_EN, false);
@@ -1574,8 +2279,7 @@ static void hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev)
struct hclge_dev *hdev = ae_dev->priv;
enum hnae3_reset_type reset_type;
- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
- hdev->pdev->revision < 0x21)
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
return;
reset_type = hclge_log_and_clear_rocee_ras_error(hdev);
@@ -1585,41 +2289,59 @@ static void hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev)
static const struct hclge_hw_blk hw_blk[] = {
{
- .msk = BIT(0), .name = "IGU_EGU",
- .config_err_int = hclge_config_igu_egu_hw_err_int,
- },
- {
- .msk = BIT(1), .name = "PPP",
- .config_err_int = hclge_config_ppp_hw_err_int,
- },
- {
- .msk = BIT(2), .name = "SSU",
- .config_err_int = hclge_config_ssu_hw_err_int,
- },
- {
- .msk = BIT(3), .name = "PPU",
- .config_err_int = hclge_config_ppu_hw_err_int,
- },
- {
- .msk = BIT(4), .name = "TM",
- .config_err_int = hclge_config_tm_hw_err_int,
- },
- {
- .msk = BIT(5), .name = "COMMON",
- .config_err_int = hclge_config_common_hw_err_int,
- },
- {
- .msk = BIT(8), .name = "MAC",
- .config_err_int = hclge_config_mac_err_int,
- },
- { /* sentinel */ }
+ .msk = BIT(0),
+ .name = "IGU_EGU",
+ .config_err_int = hclge_config_igu_egu_hw_err_int,
+ }, {
+ .msk = BIT(1),
+ .name = "PPP",
+ .config_err_int = hclge_config_ppp_hw_err_int,
+ }, {
+ .msk = BIT(2),
+ .name = "SSU",
+ .config_err_int = hclge_config_ssu_hw_err_int,
+ }, {
+ .msk = BIT(3),
+ .name = "PPU",
+ .config_err_int = hclge_config_ppu_hw_err_int,
+ }, {
+ .msk = BIT(4),
+ .name = "TM",
+ .config_err_int = hclge_config_tm_hw_err_int,
+ }, {
+ .msk = BIT(5),
+ .name = "COMMON",
+ .config_err_int = hclge_config_common_hw_err_int,
+ }, {
+ .msk = BIT(8),
+ .name = "MAC",
+ .config_err_int = hclge_config_mac_err_int,
+ }, {
+ /* sentinel */
+ }
};
+static void hclge_config_all_msix_error(struct hclge_dev *hdev, bool enable)
+{
+ u32 reg_val;
+
+ reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
+
+ if (enable)
+ reg_val |= BIT(HCLGE_VECTOR0_ALL_MSIX_ERR_B);
+ else
+ reg_val &= ~BIT(HCLGE_VECTOR0_ALL_MSIX_ERR_B);
+
+ hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
+}
+
int hclge_config_nic_hw_error(struct hclge_dev *hdev, bool state)
{
const struct hclge_hw_blk *module = hw_blk;
int ret = 0;
+ hclge_config_all_msix_error(hdev, state);
+
while (module->name) {
if (module->config_err_int) {
ret = module->config_err_int(hdev, state);
@@ -1645,7 +2367,6 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
}
status = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG);
-
if (status & HCLGE_RAS_REG_NFE_MASK ||
status & HCLGE_RAS_REG_ROCEE_ERR_MASK)
ae_dev->hw_err_reset_req = 0;
@@ -1661,15 +2382,12 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
}
/* Handling Non-fatal Rocee RAS errors */
- if (hdev->pdev->revision >= 0x21 &&
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 &&
status & HCLGE_RAS_REG_ROCEE_ERR_MASK) {
dev_err(dev, "ROCEE Non-Fatal RAS error identified\n");
hclge_handle_rocee_ras_error(ae_dev);
}
- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
- goto out;
-
if (ae_dev->hw_err_reset_req)
return PCI_ERS_RESULT_NEED_RESET;
@@ -1687,7 +2405,8 @@ static int hclge_clear_hw_msix_error(struct hclge_dev *hdev,
else
desc[0].opcode = cpu_to_le16(HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT);
- desc[0].flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
+ desc[0].flag = cpu_to_le16(HCLGE_COMM_CMD_FLAG_NO_INTR |
+ HCLGE_COMM_CMD_FLAG_IN);
return hclge_cmd_send(&hdev->hw, &desc[0], bd_num);
}
@@ -1742,12 +2461,12 @@ static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
return;
}
- dev_err(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vf_id(%u), queue_id(%u)\n",
+ dev_err(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vport(%u), queue_id(%u)\n",
vf_id, q_id);
if (vf_id) {
if (vf_id >= hdev->num_alloc_vport) {
- dev_err(dev, "invalid vf id(%u)\n", vf_id);
+ dev_err(dev, "invalid vport(%u)\n", vf_id);
return;
}
@@ -1760,8 +2479,8 @@ static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
ret = hclge_inform_reset_assert_to_vf(&hdev->vport[vf_id]);
if (ret)
- dev_err(dev, "inform reset to vf(%u) failed %d!\n",
- hdev->vport->vport_id, ret);
+ dev_err(dev, "inform reset to vport(%u) failed %d!\n",
+ vf_id, ret);
} else {
set_bit(HNAE3_FUNC_RESET, reset_requests);
}
@@ -1884,11 +2603,8 @@ static int hclge_handle_pf_msix_error(struct hclge_dev *hdev,
static int hclge_handle_all_hw_msix_error(struct hclge_dev *hdev,
unsigned long *reset_requests)
{
- struct hclge_mac_tnl_stats mac_tnl_stats;
- struct device *dev = &hdev->pdev->dev;
u32 mpf_bd_num, pf_bd_num, bd_num;
struct hclge_desc *desc;
- u32 status;
int ret;
/* query the number of bds for the MSIx int status */
@@ -1911,16 +2627,45 @@ static int hclge_handle_all_hw_msix_error(struct hclge_dev *hdev,
if (ret)
goto msi_error;
+ ret = hclge_handle_mac_tnl(hdev);
+
+msi_error:
+ kfree(desc);
+out:
+ return ret;
+}
+
+int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
+ unsigned long *reset_requests)
+{
+ struct device *dev = &hdev->pdev->dev;
+
+ if (!test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state)) {
+ dev_err(dev,
+ "failed to handle msix error during dev init\n");
+ return -EAGAIN;
+ }
+
+ return hclge_handle_all_hw_msix_error(hdev, reset_requests);
+}
+
+int hclge_handle_mac_tnl(struct hclge_dev *hdev)
+{
+ struct hclge_mac_tnl_stats mac_tnl_stats;
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc;
+ u32 status;
+ int ret;
+
/* query and clear mac tnl interruptions */
- hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_MAC_TNL_INT,
- true);
- ret = hclge_cmd_send(&hdev->hw, &desc[0], 1);
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_TNL_INT, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
- dev_err(dev, "query mac tnl int cmd failed (%d)\n", ret);
- goto msi_error;
+ dev_err(dev, "failed to query mac tnl int, ret = %d.\n", ret);
+ return ret;
}
- status = le32_to_cpu(desc->data[0]);
+ status = le32_to_cpu(desc.data[0]);
if (status) {
/* When mac tnl interrupt occurs, we record current time and
* register status here in a fifo, then clear the status. So
@@ -1932,33 +2677,15 @@ static int hclge_handle_all_hw_msix_error(struct hclge_dev *hdev,
kfifo_put(&hdev->mac_tnl_log, mac_tnl_stats);
ret = hclge_clear_mac_tnl_int(hdev);
if (ret)
- dev_err(dev, "clear mac tnl int failed (%d)\n", ret);
+ dev_err(dev, "failed to clear mac tnl int, ret = %d.\n",
+ ret);
}
-msi_error:
- kfree(desc);
-out:
return ret;
}
-int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
- unsigned long *reset_requests)
-{
- struct device *dev = &hdev->pdev->dev;
-
- if (!test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state)) {
- dev_err(dev,
- "Can't handle - MSIx error reported during dev init\n");
- return 0;
- }
-
- return hclge_handle_all_hw_msix_error(hdev, reset_requests);
-}
-
void hclge_handle_all_hns_hw_errors(struct hnae3_ae_dev *ae_dev)
{
-#define HCLGE_DESC_NO_DATA_LEN 8
-
struct hclge_dev *hdev = ae_dev->priv;
struct device *dev = &hdev->pdev->dev;
u32 mpf_bd_num, pf_bd_num, bd_num;
@@ -2007,3 +2734,207 @@ void hclge_handle_all_hns_hw_errors(struct hnae3_ae_dev *ae_dev)
msi_error:
kfree(desc);
}
+
+bool hclge_find_error_source(struct hclge_dev *hdev)
+{
+ u32 msix_src_flag, hw_err_src_flag;
+
+ msix_src_flag = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
+ HCLGE_VECTOR0_REG_MSIX_MASK;
+
+ hw_err_src_flag = hclge_read_dev(&hdev->hw,
+ HCLGE_RAS_PF_OTHER_INT_STS_REG) &
+ HCLGE_RAS_REG_ERR_MASK;
+
+ return msix_src_flag || hw_err_src_flag;
+}
+
+void hclge_handle_occurred_error(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+
+ if (hclge_find_error_source(hdev))
+ hclge_handle_error_info_log(ae_dev);
+}
+
+static void
+hclge_handle_error_type_reg_log(struct device *dev,
+ struct hclge_mod_err_info *mod_info,
+ struct hclge_type_reg_err_info *type_reg_info)
+{
+#define HCLGE_ERR_TYPE_MASK 0x7F
+#define HCLGE_ERR_TYPE_IS_RAS_OFFSET 7
+
+ u8 mod_id, total_module, type_id, total_type, i, is_ras;
+ u8 index_module = MODULE_NONE;
+ u8 index_type = NONE_ERROR;
+
+ mod_id = mod_info->mod_id;
+ type_id = type_reg_info->type_id & HCLGE_ERR_TYPE_MASK;
+ is_ras = type_reg_info->type_id >> HCLGE_ERR_TYPE_IS_RAS_OFFSET;
+
+ total_module = ARRAY_SIZE(hclge_hw_module_id_st);
+ total_type = ARRAY_SIZE(hclge_hw_type_id_st);
+
+ for (i = 0; i < total_module; i++) {
+ if (mod_id == hclge_hw_module_id_st[i].module_id) {
+ index_module = i;
+ break;
+ }
+ }
+
+ for (i = 0; i < total_type; i++) {
+ if (type_id == hclge_hw_type_id_st[i].type_id) {
+ index_type = i;
+ break;
+ }
+ }
+
+ if (index_module != MODULE_NONE && index_type != NONE_ERROR)
+ dev_err(dev,
+ "found %s %s, is %s error.\n",
+ hclge_hw_module_id_st[index_module].msg,
+ hclge_hw_type_id_st[index_type].msg,
+ is_ras ? "ras" : "msix");
+ else
+ dev_err(dev,
+ "unknown module[%u] or type[%u].\n", mod_id, type_id);
+
+ dev_err(dev, "reg_value:\n");
+ for (i = 0; i < type_reg_info->reg_num; i++)
+ dev_err(dev, "0x%08x\n", type_reg_info->hclge_reg[i]);
+}
+
+static void hclge_handle_error_module_log(struct hnae3_ae_dev *ae_dev,
+ const u32 *buf, u32 buf_size)
+{
+ struct hclge_type_reg_err_info *type_reg_info;
+ struct hclge_dev *hdev = ae_dev->priv;
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_mod_err_info *mod_info;
+ struct hclge_sum_err_info *sum_info;
+ u8 mod_num, err_num, i;
+ u32 offset = 0;
+
+ sum_info = (struct hclge_sum_err_info *)&buf[offset++];
+ if (sum_info->reset_type &&
+ sum_info->reset_type != HNAE3_NONE_RESET)
+ set_bit(sum_info->reset_type, &ae_dev->hw_err_reset_req);
+ mod_num = sum_info->mod_num;
+
+ while (mod_num--) {
+ if (offset >= buf_size) {
+ dev_err(dev, "The offset(%u) exceeds buf's size(%u).\n",
+ offset, buf_size);
+ return;
+ }
+ mod_info = (struct hclge_mod_err_info *)&buf[offset++];
+ err_num = mod_info->err_num;
+
+ for (i = 0; i < err_num; i++) {
+ if (offset >= buf_size) {
+ dev_err(dev,
+ "The offset(%u) exceeds buf size(%u).\n",
+ offset, buf_size);
+ return;
+ }
+
+ type_reg_info = (struct hclge_type_reg_err_info *)
+ &buf[offset++];
+ hclge_handle_error_type_reg_log(dev, mod_info,
+ type_reg_info);
+
+ offset += type_reg_info->reg_num;
+ }
+ }
+}
+
+static int hclge_query_all_err_bd_num(struct hclge_dev *hdev, u32 *bd_num)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc_bd;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_ALL_ERR_BD_NUM, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1);
+ if (ret) {
+ dev_err(dev, "failed to query error bd_num, ret = %d.\n", ret);
+ return ret;
+ }
+
+ *bd_num = le32_to_cpu(desc_bd.data[0]);
+ if (!(*bd_num)) {
+ dev_err(dev, "The value of bd_num is 0!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hclge_query_all_err_info(struct hclge_dev *hdev,
+ struct hclge_desc *desc, u32 bd_num)
+{
+ struct device *dev = &hdev->pdev->dev;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(desc, HCLGE_QUERY_ALL_ERR_INFO, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
+ if (ret)
+ dev_err(dev, "failed to query error info, ret = %d.\n", ret);
+
+ return ret;
+}
+
+int hclge_handle_error_info_log(struct hnae3_ae_dev *ae_dev)
+{
+ u32 bd_num, desc_len, buf_len, buf_size, i;
+ struct hclge_dev *hdev = ae_dev->priv;
+ struct hclge_desc *desc;
+ __le32 *desc_data;
+ u32 *buf;
+ int ret;
+
+ ret = hclge_query_all_err_bd_num(hdev, &bd_num);
+ if (ret)
+ goto out;
+
+ desc_len = bd_num * sizeof(struct hclge_desc);
+ desc = kzalloc(desc_len, GFP_KERNEL);
+ if (!desc) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = hclge_query_all_err_info(hdev, desc, bd_num);
+ if (ret)
+ goto err_desc;
+
+ buf_len = bd_num * sizeof(struct hclge_desc) - HCLGE_DESC_NO_DATA_LEN;
+ buf_size = buf_len / sizeof(u32);
+
+ desc_data = kzalloc(buf_len, GFP_KERNEL);
+ if (!desc_data) {
+ ret = -ENOMEM;
+ goto err_desc;
+ }
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_buf_alloc;
+ }
+
+ memcpy(desc_data, &desc[0].data[0], buf_len);
+ for (i = 0; i < buf_size; i++)
+ buf[i] = le32_to_cpu(desc_data[i]);
+
+ hclge_handle_error_module_log(ae_dev, buf, buf_size);
+ kfree(buf);
+
+err_buf_alloc:
+ kfree(desc_data);
+err_desc:
+ kfree(desc);
+out:
+ return ret;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
index 876fd81ad2f1..86be6fb32990 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
@@ -15,8 +15,9 @@
#define HCLGE_RAS_PF_OTHER_INT_STS_REG 0x20B00
#define HCLGE_RAS_REG_NFE_MASK 0xFF00
#define HCLGE_RAS_REG_ROCEE_ERR_MASK 0x3000000
+#define HCLGE_RAS_REG_ERR_MASK \
+ (HCLGE_RAS_REG_NFE_MASK | HCLGE_RAS_REG_ROCEE_ERR_MASK)
-#define HCLGE_VECTOR0_PF_OTHER_INT_STS_REG 0x20800
#define HCLGE_VECTOR0_REG_MSIX_MASK 0x1FF00
#define HCLGE_IMP_TCM_ECC_ERR_INT_EN 0xFFFF0000
@@ -33,7 +34,8 @@
#define HCLGE_TQP_ECC_ERR_INT_EN_MASK 0x0FFF
#define HCLGE_MSIX_SRAM_ECC_ERR_INT_EN_MASK 0x0F000000
#define HCLGE_MSIX_SRAM_ECC_ERR_INT_EN 0x0F000000
-#define HCLGE_IGU_ERR_INT_EN 0x0000066F
+#define HCLGE_IGU_ERR_INT_EN 0x0000000F
+#define HCLGE_IGU_ERR_INT_TYPE 0x00000660
#define HCLGE_IGU_ERR_INT_EN_MASK 0x000F
#define HCLGE_IGU_TNL_ERR_INT_EN 0x0002AABF
#define HCLGE_IGU_TNL_ERR_INT_EN_MASK 0x003F
@@ -48,6 +50,8 @@
#define HCLGE_PPP_MPF_ECC_ERR_INT3_EN 0x003F
#define HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK 0x003F
#define HCLGE_TM_SCH_ECC_ERR_INT_EN 0x3
+#define HCLGE_TM_QCN_ERR_INT_TYPE 0x29
+#define HCLGE_TM_QCN_FIFO_INT_EN 0xFFFF00
#define HCLGE_TM_QCN_MEM_ERR_INT_EN 0xFFFFFF
#define HCLGE_NCSI_ERR_INT_EN 0x3
#define HCLGE_NCSI_ERR_INT_TYPE 0x9
@@ -107,6 +111,10 @@
#define HCLGE_ROCEE_OVF_ERR_INT_MASK 0x10000
#define HCLGE_ROCEE_OVF_ERR_TYPE_MASK 0x3F
+#define HCLGE_DESC_DATA_MAX 8
+#define HCLGE_REG_NUM_MAX 256
+#define HCLGE_DESC_NO_DATA_LEN 8
+
enum hclge_err_int_type {
HCLGE_ERR_INT_MSIX = 0,
HCLGE_ERR_INT_RAS_CE = 1,
@@ -114,6 +122,60 @@ enum hclge_err_int_type {
HCLGE_ERR_INT_RAS_FE = 3,
};
+enum hclge_mod_name_list {
+ MODULE_NONE = 0,
+ MODULE_BIOS_COMMON = 1,
+ MODULE_GE = 2,
+ MODULE_IGU_EGU = 3,
+ MODULE_LGE = 4,
+ MODULE_NCSI = 5,
+ MODULE_PPP = 6,
+ MODULE_QCN = 7,
+ MODULE_RCB_RX = 8,
+ MODULE_RTC = 9,
+ MODULE_SSU = 10,
+ MODULE_TM = 11,
+ MODULE_RCB_TX = 12,
+ MODULE_TXDMA = 13,
+ MODULE_MASTER = 14,
+ MODULE_HIMAC = 15,
+ /* add new MODULE NAME for NIC here in order */
+ MODULE_ROCEE_TOP = 40,
+ MODULE_ROCEE_TIMER = 41,
+ MODULE_ROCEE_MDB = 42,
+ MODULE_ROCEE_TSP = 43,
+ MODULE_ROCEE_TRP = 44,
+ MODULE_ROCEE_SCC = 45,
+ MODULE_ROCEE_CAEP = 46,
+ MODULE_ROCEE_GEN_AC = 47,
+ MODULE_ROCEE_QMM = 48,
+ MODULE_ROCEE_LSAN = 49,
+ /* add new MODULE NAME for RoCEE here in order */
+};
+
+enum hclge_err_type_list {
+ NONE_ERROR = 0,
+ FIFO_ERROR = 1,
+ MEMORY_ERROR = 2,
+ POISON_ERROR = 3,
+ MSIX_ECC_ERROR = 4,
+ TQP_INT_ECC_ERROR = 5,
+ PF_ABNORMAL_INT_ERROR = 6,
+ MPF_ABNORMAL_INT_ERROR = 7,
+ COMMON_ERROR = 8,
+ PORT_ERROR = 9,
+ ETS_ERROR = 10,
+ NCSI_ERROR = 11,
+ GLB_ERROR = 12,
+ LINK_ERROR = 13,
+ PTP_ERROR = 14,
+ /* add new ERROR TYPE for NIC here in order */
+ ROCEE_NORMAL_ERR = 40,
+ ROCEE_OVF_ERR = 41,
+ ROCEE_BUS_ERR = 42,
+ /* add new ERROR TYPE for ROCEE here in order */
+};
+
struct hclge_hw_blk {
u32 msk;
const char *name;
@@ -126,11 +188,44 @@ struct hclge_hw_error {
enum hnae3_reset_type reset_level;
};
+struct hclge_hw_module_id {
+ enum hclge_mod_name_list module_id;
+ const char *msg;
+};
+
+struct hclge_hw_type_id {
+ enum hclge_err_type_list type_id;
+ const char *msg;
+};
+
+struct hclge_sum_err_info {
+ u8 reset_type;
+ u8 mod_num;
+ u8 rsv[2];
+};
+
+struct hclge_mod_err_info {
+ u8 mod_id;
+ u8 err_num;
+ u8 rsv[2];
+};
+
+struct hclge_type_reg_err_info {
+ u8 type_id;
+ u8 reg_num;
+ u8 rsv[2];
+ u32 hclge_reg[HCLGE_REG_NUM_MAX];
+};
+
int hclge_config_mac_tnl_int(struct hclge_dev *hdev, bool en);
int hclge_config_nic_hw_error(struct hclge_dev *hdev, bool state);
int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en);
void hclge_handle_all_hns_hw_errors(struct hnae3_ae_dev *ae_dev);
+bool hclge_find_error_source(struct hclge_dev *hdev);
+void hclge_handle_occurred_error(struct hclge_dev *hdev);
pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev);
int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
unsigned long *reset_requests);
+int hclge_handle_error_info_log(struct hnae3_ae_dev *ae_dev);
+int hclge_handle_mac_tnl(struct hclge_dev *hdev);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index d3b0cd74ecd2..987271da6e9b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/if_vlan.h>
#include <linux/crash_dump.h>
+#include <net/ipv6.h>
#include <net/rtnetlink.h>
#include "hclge_cmd.h"
#include "hclge_dcb.h"
@@ -22,10 +23,10 @@
#include "hclge_tm.h"
#include "hclge_err.h"
#include "hnae3.h"
+#include "hclge_devlink.h"
+#include "hclge_comm_cmd.h"
#define HCLGE_NAME "hclge"
-#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
-#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
#define HCLGE_BUF_SIZE_UNIT 256U
#define HCLGE_BUF_MUL_BY 2
@@ -55,21 +56,23 @@
#define HCLGE_LINK_STATUS_MS 10
-#define HCLGE_VF_VPORT_START_NUM 1
-
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
-static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
- u16 *allocated_size, bool is_alloc);
static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
-static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
+static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
unsigned long *addr);
static int hclge_set_default_loopback(struct hclge_dev *hdev);
+static void hclge_sync_mac_table(struct hclge_dev *hdev);
+static void hclge_restore_hw_table(struct hclge_dev *hdev);
+static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
+static void hclge_sync_fd_table(struct hclge_dev *hdev);
+static void hclge_update_fec_stats(struct hclge_dev *hdev);
+
static struct hnae3_ae_algo ae_algo;
static struct workqueue_struct *hclge_wq;
@@ -82,29 +85,30 @@ static const struct pci_device_id ae_algo_pci_tbl[] = {
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
/* required last entry */
{0, }
};
MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
-static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
- HCLGE_CMDQ_TX_ADDR_H_REG,
- HCLGE_CMDQ_TX_DEPTH_REG,
- HCLGE_CMDQ_TX_TAIL_REG,
- HCLGE_CMDQ_TX_HEAD_REG,
- HCLGE_CMDQ_RX_ADDR_L_REG,
- HCLGE_CMDQ_RX_ADDR_H_REG,
- HCLGE_CMDQ_RX_DEPTH_REG,
- HCLGE_CMDQ_RX_TAIL_REG,
- HCLGE_CMDQ_RX_HEAD_REG,
- HCLGE_VECTOR0_CMDQ_SRC_REG,
- HCLGE_CMDQ_INTR_STS_REG,
- HCLGE_CMDQ_INTR_EN_REG,
- HCLGE_CMDQ_INTR_GEN_REG};
+static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG,
+ HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG,
+ HCLGE_COMM_NIC_CSQ_DEPTH_REG,
+ HCLGE_COMM_NIC_CSQ_TAIL_REG,
+ HCLGE_COMM_NIC_CSQ_HEAD_REG,
+ HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG,
+ HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG,
+ HCLGE_COMM_NIC_CRQ_DEPTH_REG,
+ HCLGE_COMM_NIC_CRQ_TAIL_REG,
+ HCLGE_COMM_NIC_CRQ_HEAD_REG,
+ HCLGE_COMM_VECTOR0_CMDQ_SRC_REG,
+ HCLGE_COMM_CMDQ_INTR_STS_REG,
+ HCLGE_COMM_CMDQ_INTR_EN_REG,
+ HCLGE_COMM_CMDQ_INTR_GEN_REG};
static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
- HCLGE_VECTOR0_OTER_EN_REG,
+ HCLGE_PF_OTHER_INT_REG,
HCLGE_MISC_RESET_STS_REG,
HCLGE_MISC_VECTOR_INT_STS,
HCLGE_GLOBAL_RESET_REG,
@@ -145,181 +149,218 @@ static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
HCLGE_TQP_INTR_RL_REG};
static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
- "App Loopback test",
- "Serdes serial Loopback test",
- "Serdes parallel Loopback test",
- "Phy Loopback test"
+ "External Loopback test",
+ "App Loopback test",
+ "Serdes serial Loopback test",
+ "Serdes parallel Loopback test",
+ "Phy Loopback test"
};
static const struct hclge_comm_stats_str g_mac_stats_string[] = {
- {"mac_tx_mac_pause_num",
+ {"mac_tx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
- {"mac_rx_mac_pause_num",
+ {"mac_rx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
- {"mac_tx_control_pkt_num",
+ {"mac_tx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pause_xoff_time)},
+ {"mac_rx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pause_xoff_time)},
+ {"mac_tx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
- {"mac_rx_control_pkt_num",
+ {"mac_rx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
- {"mac_tx_pfc_pkt_num",
+ {"mac_tx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
- {"mac_tx_pfc_pri0_pkt_num",
+ {"mac_tx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
- {"mac_tx_pfc_pri1_pkt_num",
+ {"mac_tx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
- {"mac_tx_pfc_pri2_pkt_num",
+ {"mac_tx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
- {"mac_tx_pfc_pri3_pkt_num",
+ {"mac_tx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
- {"mac_tx_pfc_pri4_pkt_num",
+ {"mac_tx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
- {"mac_tx_pfc_pri5_pkt_num",
+ {"mac_tx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
- {"mac_tx_pfc_pri6_pkt_num",
+ {"mac_tx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
- {"mac_tx_pfc_pri7_pkt_num",
+ {"mac_tx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
- {"mac_rx_pfc_pkt_num",
+ {"mac_tx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_xoff_time)},
+ {"mac_tx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_xoff_time)},
+ {"mac_tx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_xoff_time)},
+ {"mac_tx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_xoff_time)},
+ {"mac_tx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_xoff_time)},
+ {"mac_tx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_xoff_time)},
+ {"mac_tx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_xoff_time)},
+ {"mac_tx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_xoff_time)},
+ {"mac_rx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
- {"mac_rx_pfc_pri0_pkt_num",
+ {"mac_rx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
- {"mac_rx_pfc_pri1_pkt_num",
+ {"mac_rx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
- {"mac_rx_pfc_pri2_pkt_num",
+ {"mac_rx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
- {"mac_rx_pfc_pri3_pkt_num",
+ {"mac_rx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
- {"mac_rx_pfc_pri4_pkt_num",
+ {"mac_rx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
- {"mac_rx_pfc_pri5_pkt_num",
+ {"mac_rx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
- {"mac_rx_pfc_pri6_pkt_num",
+ {"mac_rx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
- {"mac_rx_pfc_pri7_pkt_num",
+ {"mac_rx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
- {"mac_tx_total_pkt_num",
+ {"mac_rx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_xoff_time)},
+ {"mac_rx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_xoff_time)},
+ {"mac_rx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_xoff_time)},
+ {"mac_rx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_xoff_time)},
+ {"mac_rx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_xoff_time)},
+ {"mac_rx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_xoff_time)},
+ {"mac_rx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_xoff_time)},
+ {"mac_rx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_xoff_time)},
+ {"mac_tx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
- {"mac_tx_total_oct_num",
+ {"mac_tx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
- {"mac_tx_good_pkt_num",
+ {"mac_tx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
- {"mac_tx_bad_pkt_num",
+ {"mac_tx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
- {"mac_tx_good_oct_num",
+ {"mac_tx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
- {"mac_tx_bad_oct_num",
+ {"mac_tx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
- {"mac_tx_uni_pkt_num",
+ {"mac_tx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
- {"mac_tx_multi_pkt_num",
+ {"mac_tx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
- {"mac_tx_broad_pkt_num",
+ {"mac_tx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
- {"mac_tx_undersize_pkt_num",
+ {"mac_tx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
- {"mac_tx_oversize_pkt_num",
+ {"mac_tx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
- {"mac_tx_64_oct_pkt_num",
+ {"mac_tx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
- {"mac_tx_65_127_oct_pkt_num",
+ {"mac_tx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
- {"mac_tx_128_255_oct_pkt_num",
+ {"mac_tx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
- {"mac_tx_256_511_oct_pkt_num",
+ {"mac_tx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
- {"mac_tx_512_1023_oct_pkt_num",
+ {"mac_tx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
- {"mac_tx_1024_1518_oct_pkt_num",
+ {"mac_tx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
- {"mac_tx_1519_2047_oct_pkt_num",
+ {"mac_tx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
- {"mac_tx_2048_4095_oct_pkt_num",
+ {"mac_tx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
- {"mac_tx_4096_8191_oct_pkt_num",
+ {"mac_tx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
- {"mac_tx_8192_9216_oct_pkt_num",
+ {"mac_tx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
- {"mac_tx_9217_12287_oct_pkt_num",
+ {"mac_tx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
- {"mac_tx_12288_16383_oct_pkt_num",
+ {"mac_tx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
- {"mac_tx_1519_max_good_pkt_num",
+ {"mac_tx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
- {"mac_tx_1519_max_bad_pkt_num",
+ {"mac_tx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
- {"mac_rx_total_pkt_num",
+ {"mac_rx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
- {"mac_rx_total_oct_num",
+ {"mac_rx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
- {"mac_rx_good_pkt_num",
+ {"mac_rx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
- {"mac_rx_bad_pkt_num",
+ {"mac_rx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
- {"mac_rx_good_oct_num",
+ {"mac_rx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
- {"mac_rx_bad_oct_num",
+ {"mac_rx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
- {"mac_rx_uni_pkt_num",
+ {"mac_rx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
- {"mac_rx_multi_pkt_num",
+ {"mac_rx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
- {"mac_rx_broad_pkt_num",
+ {"mac_rx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
- {"mac_rx_undersize_pkt_num",
+ {"mac_rx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
- {"mac_rx_oversize_pkt_num",
+ {"mac_rx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
- {"mac_rx_64_oct_pkt_num",
+ {"mac_rx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
- {"mac_rx_65_127_oct_pkt_num",
+ {"mac_rx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
- {"mac_rx_128_255_oct_pkt_num",
+ {"mac_rx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
- {"mac_rx_256_511_oct_pkt_num",
+ {"mac_rx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
- {"mac_rx_512_1023_oct_pkt_num",
+ {"mac_rx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
- {"mac_rx_1024_1518_oct_pkt_num",
+ {"mac_rx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
- {"mac_rx_1519_2047_oct_pkt_num",
+ {"mac_rx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
- {"mac_rx_2048_4095_oct_pkt_num",
+ {"mac_rx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
- {"mac_rx_4096_8191_oct_pkt_num",
+ {"mac_rx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
- {"mac_rx_8192_9216_oct_pkt_num",
+ {"mac_rx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
- {"mac_rx_9217_12287_oct_pkt_num",
+ {"mac_rx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
- {"mac_rx_12288_16383_oct_pkt_num",
+ {"mac_rx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
- {"mac_rx_1519_max_good_pkt_num",
+ {"mac_rx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
- {"mac_rx_1519_max_bad_pkt_num",
+ {"mac_rx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
- {"mac_tx_fragment_pkt_num",
+ {"mac_tx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
- {"mac_tx_undermin_pkt_num",
+ {"mac_tx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
- {"mac_tx_jabber_pkt_num",
+ {"mac_tx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
- {"mac_tx_err_all_pkt_num",
+ {"mac_tx_err_all_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
- {"mac_tx_from_app_good_pkt_num",
+ {"mac_tx_from_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
- {"mac_tx_from_app_bad_pkt_num",
+ {"mac_tx_from_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
- {"mac_rx_fragment_pkt_num",
+ {"mac_rx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
- {"mac_rx_undermin_pkt_num",
+ {"mac_rx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
- {"mac_rx_jabber_pkt_num",
+ {"mac_rx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
- {"mac_rx_fcs_err_pkt_num",
+ {"mac_rx_fcs_err_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
- {"mac_rx_send_app_good_pkt_num",
+ {"mac_rx_send_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
- {"mac_rx_send_app_bad_pkt_num",
+ {"mac_rx_send_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
};
@@ -332,14 +373,6 @@ static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
},
};
-static const u8 hclge_hash_key[] = {
- 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
- 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
- 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
- 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
- 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
-};
-
static const u32 hclge_dfx_bd_offset_list[] = {
HCLGE_DFX_BIOS_BD_OFFSET,
HCLGE_DFX_SSU_0_BD_OFFSET,
@@ -371,49 +404,89 @@ static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
};
static const struct key_info meta_data_key_info[] = {
- { PACKET_TYPE_ID, 6},
- { IP_FRAGEMENT, 1},
- { ROCE_TYPE, 1},
- { NEXT_KEY, 5},
- { VLAN_NUMBER, 2},
- { SRC_VPORT, 12},
- { DST_VPORT, 12},
- { TUNNEL_PACKET, 1},
+ { PACKET_TYPE_ID, 6 },
+ { IP_FRAGEMENT, 1 },
+ { ROCE_TYPE, 1 },
+ { NEXT_KEY, 5 },
+ { VLAN_NUMBER, 2 },
+ { SRC_VPORT, 12 },
+ { DST_VPORT, 12 },
+ { TUNNEL_PACKET, 1 },
};
static const struct key_info tuple_key_info[] = {
- { OUTER_DST_MAC, 48},
- { OUTER_SRC_MAC, 48},
- { OUTER_VLAN_TAG_FST, 16},
- { OUTER_VLAN_TAG_SEC, 16},
- { OUTER_ETH_TYPE, 16},
- { OUTER_L2_RSV, 16},
- { OUTER_IP_TOS, 8},
- { OUTER_IP_PROTO, 8},
- { OUTER_SRC_IP, 32},
- { OUTER_DST_IP, 32},
- { OUTER_L3_RSV, 16},
- { OUTER_SRC_PORT, 16},
- { OUTER_DST_PORT, 16},
- { OUTER_L4_RSV, 32},
- { OUTER_TUN_VNI, 24},
- { OUTER_TUN_FLOW_ID, 8},
- { INNER_DST_MAC, 48},
- { INNER_SRC_MAC, 48},
- { INNER_VLAN_TAG_FST, 16},
- { INNER_VLAN_TAG_SEC, 16},
- { INNER_ETH_TYPE, 16},
- { INNER_L2_RSV, 16},
- { INNER_IP_TOS, 8},
- { INNER_IP_PROTO, 8},
- { INNER_SRC_IP, 32},
- { INNER_DST_IP, 32},
- { INNER_L3_RSV, 16},
- { INNER_SRC_PORT, 16},
- { INNER_DST_PORT, 16},
- { INNER_L4_RSV, 32},
+ { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
+ { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
+ { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
+ { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
+ { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
+ { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
+ { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
+ { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
+ { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
+ { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
+ { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
+ { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
+ { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
+ { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
+ { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
+ { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
+ { INNER_DST_MAC, 48, KEY_OPT_MAC,
+ offsetof(struct hclge_fd_rule, tuples.dst_mac),
+ offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
+ { INNER_SRC_MAC, 48, KEY_OPT_MAC,
+ offsetof(struct hclge_fd_rule, tuples.src_mac),
+ offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
+ { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
+ offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
+ offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
+ { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
+ { INNER_ETH_TYPE, 16, KEY_OPT_LE16,
+ offsetof(struct hclge_fd_rule, tuples.ether_proto),
+ offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
+ { INNER_L2_RSV, 16, KEY_OPT_LE16,
+ offsetof(struct hclge_fd_rule, tuples.l2_user_def),
+ offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
+ { INNER_IP_TOS, 8, KEY_OPT_U8,
+ offsetof(struct hclge_fd_rule, tuples.ip_tos),
+ offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
+ { INNER_IP_PROTO, 8, KEY_OPT_U8,
+ offsetof(struct hclge_fd_rule, tuples.ip_proto),
+ offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
+ { INNER_SRC_IP, 32, KEY_OPT_IP,
+ offsetof(struct hclge_fd_rule, tuples.src_ip),
+ offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
+ { INNER_DST_IP, 32, KEY_OPT_IP,
+ offsetof(struct hclge_fd_rule, tuples.dst_ip),
+ offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
+ { INNER_L3_RSV, 16, KEY_OPT_LE16,
+ offsetof(struct hclge_fd_rule, tuples.l3_user_def),
+ offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
+ { INNER_SRC_PORT, 16, KEY_OPT_LE16,
+ offsetof(struct hclge_fd_rule, tuples.src_port),
+ offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
+ { INNER_DST_PORT, 16, KEY_OPT_LE16,
+ offsetof(struct hclge_fd_rule, tuples.dst_port),
+ offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
+ { INNER_L4_RSV, 32, KEY_OPT_LE32,
+ offsetof(struct hclge_fd_rule, tuples.l4_user_def),
+ offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
};
+/**
+ * hclge_cmd_send - send command to command queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor for describing the command
+ * @num : the number of descriptors to be sent
+ *
+ * This is the main send command for command queue, it
+ * sends the queue, cleans the queue, etc
+ **/
+int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
+{
+ return hclge_comm_cmd_send(&hw->hw, desc, num);
+}
+
static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
{
#define HCLGE_MAC_CMD_NUM 21
@@ -421,8 +494,9 @@ static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
u64 *data = (u64 *)(&hdev->mac_stats);
struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
__le64 *desc_data;
- int i, k, n;
+ u32 data_size;
int ret;
+ u32 i;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
@@ -433,33 +507,37 @@ static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
return ret;
}
- for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
- /* for special opcode 0032, only the first desc has the head */
- if (unlikely(i == 0)) {
- desc_data = (__le64 *)(&desc[i].data[0]);
- n = HCLGE_RD_FIRST_STATS_NUM;
- } else {
- desc_data = (__le64 *)(&desc[i]);
- n = HCLGE_RD_OTHER_STATS_NUM;
- }
+ /* The first desc has a 64-bit header, so data size need to minus 1 */
+ data_size = sizeof(desc) / (sizeof(u64)) - 1;
- for (k = 0; k < n; k++) {
- *data += le64_to_cpu(*desc_data);
- data++;
- desc_data++;
- }
+ desc_data = (__le64 *)(&desc[0].data[0]);
+ for (i = 0; i < data_size; i++) {
+ /* data memory is continuous becase only the first desc has a
+ * header in this command
+ */
+ *data += le64_to_cpu(*desc_data);
+ data++;
+ desc_data++;
}
return 0;
}
-static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
+static int hclge_mac_update_stats_complete(struct hclge_dev *hdev)
{
+#define HCLGE_REG_NUM_PER_DESC 4
+
+ u32 reg_num = hdev->ae_dev->dev_specs.mac_stats_num;
u64 *data = (u64 *)(&hdev->mac_stats);
struct hclge_desc *desc;
__le64 *desc_data;
- u16 i, k, n;
+ u32 data_size;
+ u32 desc_num;
int ret;
+ u32 i;
+
+ /* The first desc has a 64-bit header, so need to consider it */
+ desc_num = reg_num / HCLGE_REG_NUM_PER_DESC + 1;
/* This may be called inside atomic sections,
* so GFP_ATOMIC is more suitalbe here
@@ -475,21 +553,16 @@ static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
return ret;
}
- for (i = 0; i < desc_num; i++) {
- /* for special opcode 0034, only the first desc has the head */
- if (i == 0) {
- desc_data = (__le64 *)(&desc[i].data[0]);
- n = HCLGE_RD_FIRST_STATS_NUM;
- } else {
- desc_data = (__le64 *)(&desc[i]);
- n = HCLGE_RD_OTHER_STATS_NUM;
- }
+ data_size = min_t(u32, sizeof(hdev->mac_stats) / sizeof(u64), reg_num);
- for (k = 0; k < n; k++) {
- *data += le64_to_cpu(*desc_data);
- data++;
- desc_data++;
- }
+ desc_data = (__le64 *)(&desc[0].data[0]);
+ for (i = 0; i < data_size; i++) {
+ /* data memory is continuous becase only the first desc has a
+ * header in this command
+ */
+ *data += le64_to_cpu(*desc_data);
+ data++;
+ desc_data++;
}
kfree(desc);
@@ -497,164 +570,82 @@ static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
return 0;
}
-static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
+static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *reg_num)
{
struct hclge_desc desc;
- __le32 *desc_data;
- u32 reg_num;
int ret;
+ /* Driver needs total register number of both valid registers and
+ * reserved registers, but the old firmware only returns number
+ * of valid registers in device V2. To be compatible with these
+ * devices, driver uses a fixed value.
+ */
+ if (hdev->ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) {
+ *reg_num = HCLGE_MAC_STATS_MAX_NUM_V1;
+ return 0;
+ }
+
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to query mac statistic reg number, ret = %d\n",
+ ret);
return ret;
-
- desc_data = (__le32 *)(&desc.data[0]);
- reg_num = le32_to_cpu(*desc_data);
-
- *desc_num = 1 + ((reg_num - 3) >> 2) +
- (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
-
- return 0;
-}
-
-static int hclge_mac_update_stats(struct hclge_dev *hdev)
-{
- u32 desc_num;
- int ret;
-
- ret = hclge_mac_query_reg_num(hdev, &desc_num);
-
- /* The firmware supports the new statistics acquisition method */
- if (!ret)
- ret = hclge_mac_update_stats_complete(hdev, desc_num);
- else if (ret == -EOPNOTSUPP)
- ret = hclge_mac_update_stats_defective(hdev);
- else
- dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
-
- return ret;
-}
-
-static int hclge_tqps_update_stats(struct hnae3_handle *handle)
-{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
- struct hnae3_queue *queue;
- struct hclge_desc desc[1];
- struct hclge_tqp *tqp;
- int ret, i;
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- queue = handle->kinfo.tqp[i];
- tqp = container_of(queue, struct hclge_tqp, q);
- /* command : HCLGE_OPC_QUERY_IGU_STAT */
- hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
- true);
-
- desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
- ret = hclge_cmd_send(&hdev->hw, desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Query tqp stat fail, status = %d,queue = %d\n",
- ret, i);
- return ret;
- }
- tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
- le32_to_cpu(desc[0].data[1]);
}
- for (i = 0; i < kinfo->num_tqps; i++) {
- queue = handle->kinfo.tqp[i];
- tqp = container_of(queue, struct hclge_tqp, q);
- /* command : HCLGE_OPC_QUERY_IGU_STAT */
- hclge_cmd_setup_basic_desc(&desc[0],
- HCLGE_OPC_QUERY_TX_STATUS,
- true);
-
- desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
- ret = hclge_cmd_send(&hdev->hw, desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Query tqp stat fail, status = %d,queue = %d\n",
- ret, i);
- return ret;
- }
- tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
- le32_to_cpu(desc[0].data[1]);
+ *reg_num = le32_to_cpu(desc.data[0]);
+ if (*reg_num == 0) {
+ dev_err(&hdev->pdev->dev,
+ "mac statistic reg number is invalid!\n");
+ return -ENODATA;
}
return 0;
}
-static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
+int hclge_mac_update_stats(struct hclge_dev *hdev)
{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- struct hclge_tqp *tqp;
- u64 *buff = data;
- int i;
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
- *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
- }
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
- *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
- }
-
- return buff;
-}
-
-static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
-{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
-
- /* each tqp has TX & RX two queues */
- return kinfo->num_tqps * (2);
+ /* The firmware supports the new statistics acquisition method */
+ if (hdev->ae_dev->dev_specs.mac_stats_num)
+ return hclge_mac_update_stats_complete(hdev);
+ else
+ return hclge_mac_update_stats_defective(hdev);
}
-static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
+static int hclge_comm_get_count(struct hclge_dev *hdev,
+ const struct hclge_comm_stats_str strs[],
+ u32 size)
{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- u8 *buff = data;
- int i = 0;
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
- struct hclge_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
- tqp->index);
- buff = buff + ETH_GSTRING_LEN;
- }
+ int count = 0;
+ u32 i;
- for (i = 0; i < kinfo->num_tqps; i++) {
- struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
- struct hclge_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
- tqp->index);
- buff = buff + ETH_GSTRING_LEN;
- }
+ for (i = 0; i < size; i++)
+ if (strs[i].stats_num <= hdev->ae_dev->dev_specs.mac_stats_num)
+ count++;
- return buff;
+ return count;
}
-static u64 *hclge_comm_get_stats(const void *comm_stats,
+static u64 *hclge_comm_get_stats(struct hclge_dev *hdev,
const struct hclge_comm_stats_str strs[],
int size, u64 *data)
{
u64 *buf = data;
u32 i;
- for (i = 0; i < size; i++)
- buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
+ for (i = 0; i < size; i++) {
+ if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
+ continue;
+
+ *buf = HCLGE_STATS_READ(&hdev->mac_stats, strs[i].offset);
+ buf++;
+ }
- return buf + size;
+ return buf;
}
-static u8 *hclge_comm_get_strings(u32 stringset,
+static u8 *hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset,
const struct hclge_comm_stats_str strs[],
int size, u8 *data)
{
@@ -665,6 +656,9 @@ static u8 *hclge_comm_get_strings(u32 stringset,
return buff;
for (i = 0; i < size; i++) {
+ if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
+ continue;
+
snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
buff = buff + ETH_GSTRING_LEN;
}
@@ -679,7 +673,7 @@ static void hclge_update_stats_for_all(struct hclge_dev *hdev)
handle = &hdev->vport[0].nic;
if (handle->client) {
- status = hclge_tqps_update_stats(handle);
+ status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
if (status) {
dev_err(&hdev->pdev->dev,
"Update TQPS stats fail, status = %d.\n",
@@ -687,6 +681,8 @@ static void hclge_update_stats_for_all(struct hclge_dev *hdev)
}
}
+ hclge_update_fec_stats(hdev);
+
status = hclge_mac_update_stats(hdev);
if (status)
dev_err(&hdev->pdev->dev,
@@ -709,7 +705,7 @@ static void hclge_update_stats(struct hnae3_handle *handle,
"Update MAC stats fail, status = %d.\n",
status);
- status = hclge_tqps_update_stats(handle);
+ status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
if (status)
dev_err(&hdev->pdev->dev,
"Update TQPS stats fail, status = %d.\n",
@@ -720,10 +716,11 @@ static void hclge_update_stats(struct hnae3_handle *handle,
static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
{
-#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
- HNAE3_SUPPORT_PHY_LOOPBACK |\
- HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
- HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
+#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \
+ HNAE3_SUPPORT_PHY_LOOPBACK | \
+ HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \
+ HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK | \
+ HNAE3_SUPPORT_EXTERNAL_LOOPBACK)
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -737,7 +734,7 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
if (stringset == ETH_SS_TEST) {
/* clear loopback bit flags at first */
handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
- if (hdev->pdev->revision >= 0x21 ||
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
@@ -745,18 +742,23 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
}
- count += 2;
+ count += 1;
handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
+ count += 1;
handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
+ count += 1;
+ handle->flags |= HNAE3_SUPPORT_EXTERNAL_LOOPBACK;
- if (hdev->hw.mac.phydev) {
+ if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
+ hdev->hw.mac.phydev->drv->set_loopback) ||
+ hnae3_dev_phy_imp_supported(hdev)) {
count += 1;
handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
}
-
} else if (stringset == ETH_SS_STATS) {
- count = ARRAY_SIZE(g_mac_stats_string) +
- hclge_tqps_get_sset_count(handle, stringset);
+ count = hclge_comm_get_count(hdev, g_mac_stats_string,
+ ARRAY_SIZE(g_mac_stats_string)) +
+ hclge_comm_tqps_get_sset_count(handle);
}
return count;
@@ -765,15 +767,22 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
u8 *data)
{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
u8 *p = (char *)data;
int size;
if (stringset == ETH_SS_STATS) {
size = ARRAY_SIZE(g_mac_stats_string);
- p = hclge_comm_get_strings(stringset, g_mac_stats_string,
+ p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string,
size, p);
- p = hclge_tqps_get_strings(handle, p);
+ p = hclge_comm_tqps_get_strings(handle, p);
} else if (stringset == ETH_SS_TEST) {
+ if (handle->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK) {
+ memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_EXTERNAL],
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
ETH_GSTRING_LEN);
@@ -804,9 +813,9 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
struct hclge_dev *hdev = vport->back;
u64 *p;
- p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
+ p = hclge_comm_get_stats(hdev, g_mac_stats_string,
ARRAY_SIZE(g_mac_stats_string), data);
- p = hclge_tqps_get_stats(handle, p);
+ p = hclge_comm_tqps_get_stats(handle, p);
}
static void hclge_get_mac_stat(struct hnae3_handle *handle,
@@ -824,6 +833,8 @@ static void hclge_get_mac_stat(struct hnae3_handle *handle,
static int hclge_parse_func_status(struct hclge_dev *hdev,
struct hclge_func_status_cmd *status)
{
+#define HCLGE_MAC_ID_MASK 0xF
+
if (!(status->pf_state & HCLGE_PF_STATE_DONE))
return -EINVAL;
@@ -833,6 +844,7 @@ static int hclge_parse_func_status(struct hclge_dev *hdev,
else
hdev->flag &= ~HCLGE_FLAG_MAIN;
+ hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
return 0;
}
@@ -880,7 +892,8 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
}
req = (struct hclge_pf_res_cmd *)desc.data;
- hdev->num_tqps = le16_to_cpu(req->tqp_num);
+ hdev->num_tqps = le16_to_cpu(req->tqp_num) +
+ le16_to_cpu(req->ext_tqp_num);
hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
if (req->tx_buf_size)
@@ -899,67 +912,59 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
+ hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
+ if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
+ dev_err(&hdev->pdev->dev,
+ "only %u msi resources available, not enough for pf(min:2).\n",
+ hdev->num_nic_msi);
+ return -EINVAL;
+ }
+
if (hnae3_dev_roce_supported(hdev)) {
- hdev->roce_base_msix_offset =
- hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
- HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
hdev->num_roce_msi =
- hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
- HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
-
- /* nic's msix numbers is always equals to the roce's. */
- hdev->num_nic_msi = hdev->num_roce_msi;
+ le16_to_cpu(req->pf_intr_vector_number_roce);
/* PF should have NIC vectors and Roce vectors,
* NIC vectors are queued before Roce vectors.
*/
- hdev->num_msi = hdev->num_roce_msi +
- hdev->roce_base_msix_offset;
+ hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
} else {
- hdev->num_msi =
- hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
- HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
-
- hdev->num_nic_msi = hdev->num_msi;
- }
-
- if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
- dev_err(&hdev->pdev->dev,
- "Just %u msi resources, not enough for pf(min:2).\n",
- hdev->num_nic_msi);
- return -EINVAL;
+ hdev->num_msi = hdev->num_nic_msi;
}
return 0;
}
-static int hclge_parse_speed(int speed_cmd, int *speed)
+static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
{
switch (speed_cmd) {
- case 6:
+ case HCLGE_FW_MAC_SPEED_10M:
*speed = HCLGE_MAC_SPEED_10M;
break;
- case 7:
+ case HCLGE_FW_MAC_SPEED_100M:
*speed = HCLGE_MAC_SPEED_100M;
break;
- case 0:
+ case HCLGE_FW_MAC_SPEED_1G:
*speed = HCLGE_MAC_SPEED_1G;
break;
- case 1:
+ case HCLGE_FW_MAC_SPEED_10G:
*speed = HCLGE_MAC_SPEED_10G;
break;
- case 2:
+ case HCLGE_FW_MAC_SPEED_25G:
*speed = HCLGE_MAC_SPEED_25G;
break;
- case 3:
+ case HCLGE_FW_MAC_SPEED_40G:
*speed = HCLGE_MAC_SPEED_40G;
break;
- case 4:
+ case HCLGE_FW_MAC_SPEED_50G:
*speed = HCLGE_MAC_SPEED_50G;
break;
- case 5:
+ case HCLGE_FW_MAC_SPEED_100G:
*speed = HCLGE_MAC_SPEED_100G;
break;
+ case HCLGE_FW_MAC_SPEED_200G:
+ *speed = HCLGE_MAC_SPEED_200G;
+ break;
default:
return -EINVAL;
}
@@ -967,41 +972,43 @@ static int hclge_parse_speed(int speed_cmd, int *speed)
return 0;
}
+static const struct hclge_speed_bit_map speed_bit_map[] = {
+ {HCLGE_MAC_SPEED_10M, HCLGE_SUPPORT_10M_BIT},
+ {HCLGE_MAC_SPEED_100M, HCLGE_SUPPORT_100M_BIT},
+ {HCLGE_MAC_SPEED_1G, HCLGE_SUPPORT_1G_BIT},
+ {HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT},
+ {HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT},
+ {HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT},
+ {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BIT},
+ {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BIT},
+ {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT},
+};
+
+static int hclge_get_speed_bit(u32 speed, u32 *speed_bit)
+{
+ u16 i;
+
+ for (i = 0; i < ARRAY_SIZE(speed_bit_map); i++) {
+ if (speed == speed_bit_map[i].speed) {
+ *speed_bit = speed_bit_map[i].speed_bit;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
u32 speed_ability = hdev->hw.mac.speed_ability;
u32 speed_bit = 0;
+ int ret;
- switch (speed) {
- case HCLGE_MAC_SPEED_10M:
- speed_bit = HCLGE_SUPPORT_10M_BIT;
- break;
- case HCLGE_MAC_SPEED_100M:
- speed_bit = HCLGE_SUPPORT_100M_BIT;
- break;
- case HCLGE_MAC_SPEED_1G:
- speed_bit = HCLGE_SUPPORT_1G_BIT;
- break;
- case HCLGE_MAC_SPEED_10G:
- speed_bit = HCLGE_SUPPORT_10G_BIT;
- break;
- case HCLGE_MAC_SPEED_25G:
- speed_bit = HCLGE_SUPPORT_25G_BIT;
- break;
- case HCLGE_MAC_SPEED_40G:
- speed_bit = HCLGE_SUPPORT_40G_BIT;
- break;
- case HCLGE_MAC_SPEED_50G:
- speed_bit = HCLGE_SUPPORT_50G_BIT;
- break;
- case HCLGE_MAC_SPEED_100G:
- speed_bit = HCLGE_SUPPORT_100G_BIT;
- break;
- default:
- return -EINVAL;
- }
+ ret = hclge_get_speed_bit(speed, &speed_bit);
+ if (ret)
+ return ret;
if (speed_bit & speed_ability)
return 0;
@@ -1009,118 +1016,159 @@ static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
return -EINVAL;
}
-static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
+static void hclge_update_fec_support(struct hclge_mac *mac)
+{
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, mac->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
+
+ if (mac->fec_ability & BIT(HNAE3_FEC_BASER))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ mac->supported);
+ if (mac->fec_ability & BIT(HNAE3_FEC_RS))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ mac->supported);
+ if (mac->fec_ability & BIT(HNAE3_FEC_LLRS))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
+ mac->supported);
+ if (mac->fec_ability & BIT(HNAE3_FEC_NONE))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
+ mac->supported);
+}
+
+static void hclge_convert_setting_sr(u16 speed_ability,
+ unsigned long *link_mode)
{
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_25G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_40G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_50G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
- mac->supported);
+ link_mode);
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
+ link_mode);
}
-static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
+static void hclge_convert_setting_lr(u16 speed_ability,
+ unsigned long *link_mode)
{
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_25G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_50G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_40G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
- mac->supported);
+ link_mode);
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
+ linkmode_set_bit(
+ ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
+ link_mode);
}
-static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
+static void hclge_convert_setting_cr(u16 speed_ability,
+ unsigned long *link_mode)
{
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_25G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_40G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_50G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
- mac->supported);
+ link_mode);
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
+ link_mode);
}
-static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
+static void hclge_convert_setting_kr(u16 speed_ability,
+ unsigned long *link_mode)
{
if (speed_ability & HCLGE_SUPPORT_1G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_25G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_40G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_50G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
- mac->supported);
+ link_mode);
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
+ link_mode);
}
static void hclge_convert_setting_fec(struct hclge_mac *mac)
{
- linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
- linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
+ /* If firmware has reported fec_ability, don't need to convert by speed */
+ if (mac->fec_ability)
+ goto out;
switch (mac->speed) {
case HCLGE_MAC_SPEED_10G:
case HCLGE_MAC_SPEED_40G:
- linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
- mac->supported);
- mac->fec_ability =
- BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
+ mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO) |
+ BIT(HNAE3_FEC_NONE);
break;
case HCLGE_MAC_SPEED_25G:
case HCLGE_MAC_SPEED_50G:
- linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
- mac->supported);
- mac->fec_ability =
- BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
- BIT(HNAE3_FEC_AUTO);
+ mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
+ BIT(HNAE3_FEC_AUTO) | BIT(HNAE3_FEC_NONE);
break;
case HCLGE_MAC_SPEED_100G:
- linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
- mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
+ mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) |
+ BIT(HNAE3_FEC_NONE);
+ break;
+ case HCLGE_MAC_SPEED_200G:
+ mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) |
+ BIT(HNAE3_FEC_LLRS);
break;
default:
mac->fec_ability = 0;
break;
}
+
+out:
+ hclge_update_fec_support(mac);
}
static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
- u8 speed_ability)
+ u16 speed_ability)
{
struct hclge_mac *mac = &hdev->hw.mac;
@@ -1128,32 +1176,37 @@ static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
mac->supported);
- hclge_convert_setting_sr(mac, speed_ability);
- hclge_convert_setting_lr(mac, speed_ability);
- hclge_convert_setting_cr(mac, speed_ability);
- if (hdev->pdev->revision >= 0x21)
+ hclge_convert_setting_sr(speed_ability, mac->supported);
+ hclge_convert_setting_lr(speed_ability, mac->supported);
+ hclge_convert_setting_cr(speed_ability, mac->supported);
+ if (hnae3_dev_fec_supported(hdev))
hclge_convert_setting_fec(mac);
+ if (hnae3_dev_pause_supported(hdev))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
+
linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
}
static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
- u8 speed_ability)
+ u16 speed_ability)
{
struct hclge_mac *mac = &hdev->hw.mac;
- hclge_convert_setting_kr(mac, speed_ability);
- if (hdev->pdev->revision >= 0x21)
+ hclge_convert_setting_kr(speed_ability, mac->supported);
+ if (hnae3_dev_fec_supported(hdev))
hclge_convert_setting_fec(mac);
+
+ if (hnae3_dev_pause_supported(hdev))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
+
linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
}
static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
- u8 speed_ability)
+ u16 speed_ability)
{
unsigned long *supported = hdev->hw.mac.supported;
@@ -1177,13 +1230,16 @@ static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
}
+ if (hnae3_dev_pause_supported(hdev)) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
+ }
+
linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
}
-static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
+static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
{
u8 media_type = hdev->hw.mac.media_type;
@@ -1195,8 +1251,11 @@ static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
hclge_parse_backplane_link_mode(hdev, speed_ability);
}
-static u32 hclge_get_max_speed(u8 speed_ability)
+static u32 hclge_get_max_speed(u16 speed_ability)
{
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
+ return HCLGE_MAC_SPEED_200G;
+
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
return HCLGE_MAC_SPEED_100G;
@@ -1226,17 +1285,18 @@ static u32 hclge_get_max_speed(u8 speed_ability)
static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
{
+#define HCLGE_TX_SPARE_SIZE_UNIT 4096
+#define SPEED_ABILITY_EXT_SHIFT 8
+
struct hclge_cfg_param_cmd *req;
u64 mac_addr_tmp_high;
+ u16 speed_ability_ext;
u64 mac_addr_tmp;
unsigned int i;
req = (struct hclge_cfg_param_cmd *)desc[0].data;
/* get the configuration */
- cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
- HCLGE_CFG_VMDQ_M,
- HCLGE_CFG_VMDQ_S);
cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
@@ -1263,9 +1323,9 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
HCLGE_CFG_DEFAULT_SPEED_M,
HCLGE_CFG_DEFAULT_SPEED_S);
- cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
- HCLGE_CFG_RSS_SIZE_M,
- HCLGE_CFG_RSS_SIZE_S);
+ cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
+ HCLGE_CFG_RSS_SIZE_M,
+ HCLGE_CFG_RSS_SIZE_S);
for (i = 0; i < ETH_ALEN; i++)
cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
@@ -1276,11 +1336,42 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
HCLGE_CFG_SPEED_ABILITY_M,
HCLGE_CFG_SPEED_ABILITY_S);
+ speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_SPEED_ABILITY_EXT_M,
+ HCLGE_CFG_SPEED_ABILITY_EXT_S);
+ cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
+
+ cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_VLAN_FLTR_CAP_M,
+ HCLGE_CFG_VLAN_FLTR_CAP_S);
+
cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
HCLGE_CFG_UMV_TBL_SPACE_M,
HCLGE_CFG_UMV_TBL_SPACE_S);
- if (!cfg->umv_space)
- cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
+
+ cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
+ HCLGE_CFG_PF_RSS_SIZE_M,
+ HCLGE_CFG_PF_RSS_SIZE_S);
+
+ /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
+ * power of 2, instead of reading out directly. This would
+ * be more flexible for future changes and expansions.
+ * When VF max rss size field is HCLGE_CFG_RSS_SIZE_S,
+ * it does not make sense if PF's field is 0. In this case, PF and VF
+ * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
+ */
+ cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
+ 1U << cfg->pf_rss_size_max :
+ cfg->vf_rss_size_max;
+
+ /* The unit of the tx spare buffer size queried from configuration
+ * file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is
+ * needed here.
+ */
+ cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]),
+ HCLGE_CFG_TX_SPARE_BUF_SIZE_M,
+ HCLGE_CFG_TX_SPARE_BUF_SIZE_S);
+ cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT;
}
/* hclge_get_cfg: query the static parameter from flash
@@ -1319,6 +1410,115 @@ static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
return 0;
}
+static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
+{
+#define HCLGE_MAX_NON_TSO_BD_NUM 8U
+
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+
+ ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
+ ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
+ ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
+ ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
+ ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
+ ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
+ ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
+ ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
+}
+
+static void hclge_parse_dev_specs(struct hclge_dev *hdev,
+ struct hclge_desc *desc)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ struct hclge_dev_specs_0_cmd *req0;
+ struct hclge_dev_specs_1_cmd *req1;
+
+ req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
+ req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
+
+ ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
+ ae_dev->dev_specs.rss_ind_tbl_size =
+ le16_to_cpu(req0->rss_ind_tbl_size);
+ ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
+ ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
+ ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
+ ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
+ ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
+ ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
+ ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size);
+ ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size);
+}
+
+static void hclge_check_dev_specs(struct hclge_dev *hdev)
+{
+ struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
+
+ if (!dev_specs->max_non_tso_bd_num)
+ dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
+ if (!dev_specs->rss_ind_tbl_size)
+ dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
+ if (!dev_specs->rss_key_size)
+ dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
+ if (!dev_specs->max_tm_rate)
+ dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
+ if (!dev_specs->max_qset_num)
+ dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
+ if (!dev_specs->max_int_gl)
+ dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
+ if (!dev_specs->max_frm_size)
+ dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
+ if (!dev_specs->umv_size)
+ dev_specs->umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
+}
+
+static int hclge_query_mac_stats_num(struct hclge_dev *hdev)
+{
+ u32 reg_num = 0;
+ int ret;
+
+ ret = hclge_mac_query_reg_num(hdev, &reg_num);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+
+ hdev->ae_dev->dev_specs.mac_stats_num = reg_num;
+ return 0;
+}
+
+static int hclge_query_dev_specs(struct hclge_dev *hdev)
+{
+ struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
+ int ret;
+ int i;
+
+ ret = hclge_query_mac_stats_num(hdev);
+ if (ret)
+ return ret;
+
+ /* set default specifications as devices lower than version V3 do not
+ * support querying specifications from firmware.
+ */
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
+ hclge_set_default_dev_specs(hdev);
+ return 0;
+ }
+
+ for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
+ true);
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ }
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
+
+ ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
+ if (ret)
+ return ret;
+
+ hclge_parse_dev_specs(hdev, desc);
+ hclge_check_dev_specs(hdev);
+
+ return 0;
+}
+
static int hclge_get_cap(struct hclge_dev *hdev)
{
int ret;
@@ -1346,26 +1546,52 @@ static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
"Running kdump kernel. Using minimal resources\n");
/* minimal queue pairs equals to the number of vports */
- hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
+ hdev->num_tqps = hdev->num_req_vfs + 1;
hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
}
+static void hclge_init_tc_config(struct hclge_dev *hdev)
+{
+ unsigned int i;
+
+ if (hdev->tc_max > HNAE3_MAX_TC ||
+ hdev->tc_max < 1) {
+ dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
+ hdev->tc_max);
+ hdev->tc_max = 1;
+ }
+
+ /* Dev does not support DCB */
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ hdev->tc_max = 1;
+ hdev->pfc_max = 0;
+ } else {
+ hdev->pfc_max = hdev->tc_max;
+ }
+
+ hdev->tm_info.num_tc = 1;
+
+ /* Currently not support uncontiuous tc */
+ for (i = 0; i < hdev->tm_info.num_tc; i++)
+ hnae3_set_bit(hdev->hw_tc_map, i, 1);
+
+ hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
+}
+
static int hclge_configure(struct hclge_dev *hdev)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
struct hclge_cfg cfg;
- unsigned int i;
int ret;
ret = hclge_get_cfg(hdev, &cfg);
- if (ret) {
- dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
+ if (ret)
return ret;
- }
- hdev->num_vmdq_vport = cfg.vmdq_vport_num;
hdev->base_tqp_pid = 0;
- hdev->rss_size_max = cfg.rss_size_max;
+ hdev->vf_rss_size_max = cfg.vf_rss_size_max;
+ hdev->pf_rss_size_max = cfg.pf_rss_size_max;
hdev->rx_buf_len = cfg.rx_buf_len;
ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
hdev->hw.mac.media_type = cfg.media_type;
@@ -1375,16 +1601,24 @@ static int hclge_configure(struct hclge_dev *hdev)
hdev->tm_info.num_pg = 1;
hdev->tc_max = cfg.tc_num;
hdev->tm_info.hw_pfc_map = 0;
- hdev->wanted_umv_size = cfg.umv_space;
+ if (cfg.umv_space)
+ hdev->wanted_umv_size = cfg.umv_space;
+ else
+ hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size;
+ hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
+ hdev->gro_en = true;
+ if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
+ set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
- if (hnae3_dev_fd_supported(hdev)) {
+ if (hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
hdev->fd_en = true;
hdev->fd_active_type = HCLGE_FD_RULE_NONE;
}
ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
if (ret) {
- dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
+ dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
+ cfg.default_speed, ret);
return ret;
}
@@ -1392,77 +1626,40 @@ static int hclge_configure(struct hclge_dev *hdev)
hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
- if ((hdev->tc_max > HNAE3_MAX_TC) ||
- (hdev->tc_max < 1)) {
- dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
- hdev->tc_max);
- hdev->tc_max = 1;
- }
-
- /* Dev does not support DCB */
- if (!hnae3_dev_dcb_supported(hdev)) {
- hdev->tc_max = 1;
- hdev->pfc_max = 0;
- } else {
- hdev->pfc_max = hdev->tc_max;
- }
-
- hdev->tm_info.num_tc = 1;
-
- /* Currently not support uncontiuous tc */
- for (i = 0; i < hdev->tm_info.num_tc; i++)
- hnae3_set_bit(hdev->hw_tc_map, i, 1);
-
- hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
-
+ hclge_init_tc_config(hdev);
hclge_init_kdump_kernel_config(hdev);
- /* Set the init affinity based on pci func number */
- i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
- i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
- cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
- &hdev->affinity_mask);
-
return ret;
}
-static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
- unsigned int tso_mss_max)
+static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
+ u16 tso_mss_max)
{
struct hclge_cfg_tso_status_cmd *req;
struct hclge_desc desc;
- u16 tso_mss;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
req = (struct hclge_cfg_tso_status_cmd *)desc.data;
-
- tso_mss = 0;
- hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
- HCLGE_TSO_MSS_MIN_S, tso_mss_min);
- req->tso_mss_min = cpu_to_le16(tso_mss);
-
- tso_mss = 0;
- hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
- HCLGE_TSO_MSS_MIN_S, tso_mss_max);
- req->tso_mss_max = cpu_to_le16(tso_mss);
+ req->tso_mss_min = cpu_to_le16(tso_mss_min);
+ req->tso_mss_max = cpu_to_le16(tso_mss_max);
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
-static int hclge_config_gro(struct hclge_dev *hdev, bool en)
+static int hclge_config_gro(struct hclge_dev *hdev)
{
struct hclge_cfg_gro_status_cmd *req;
struct hclge_desc desc;
int ret;
- if (!hnae3_dev_gro_supported(hdev))
+ if (!hnae3_ae_dev_gro_supported(hdev->ae_dev))
return 0;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
req = (struct hclge_cfg_gro_status_cmd *)desc.data;
- req->gro_en = cpu_to_le16(en ? 1 : 0);
+ req->gro_en = hdev->gro_en ? 1 : 0;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
@@ -1474,11 +1671,12 @@ static int hclge_config_gro(struct hclge_dev *hdev, bool en)
static int hclge_alloc_tqps(struct hclge_dev *hdev)
{
- struct hclge_tqp *tqp;
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ struct hclge_comm_tqp *tqp;
int i;
hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
- sizeof(struct hclge_tqp), GFP_KERNEL);
+ sizeof(struct hclge_comm_tqp), GFP_KERNEL);
if (!hdev->htqp)
return -ENOMEM;
@@ -1492,8 +1690,28 @@ static int hclge_alloc_tqps(struct hclge_dev *hdev)
tqp->q.buf_size = hdev->rx_buf_len;
tqp->q.tx_desc_num = hdev->num_tx_desc;
tqp->q.rx_desc_num = hdev->num_rx_desc;
- tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
- i * HCLGE_TQP_REG_SIZE;
+
+ /* need an extended offset to configure queues >=
+ * HCLGE_TQP_MAX_SIZE_DEV_V2
+ */
+ if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
+ tqp->q.io_base = hdev->hw.hw.io_base +
+ HCLGE_TQP_REG_OFFSET +
+ i * HCLGE_TQP_REG_SIZE;
+ else
+ tqp->q.io_base = hdev->hw.hw.io_base +
+ HCLGE_TQP_REG_OFFSET +
+ HCLGE_TQP_EXT_REG_OFFSET +
+ (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
+ HCLGE_TQP_REG_SIZE;
+
+ /* when device supports tx push and has device memory,
+ * the queue can execute push mode or doorbell mode on
+ * device memory.
+ */
+ if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps))
+ tqp->q.mem_base = hdev->hw.hw.mem_base +
+ HCLGE_TQP_MEM_OFFSET(hdev, i);
tqp++;
}
@@ -1544,7 +1762,7 @@ static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
}
}
vport->alloc_tqps = alloced;
- kinfo->rss_size = min_t(u16, hdev->rss_size_max,
+ kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
vport->alloc_tqps / hdev->tm_info.num_tc);
/* ensure one to one mapping between irq and queue at default */
@@ -1567,6 +1785,7 @@ static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
kinfo->num_rx_desc = num_rx_desc;
kinfo->rx_buf_len = hdev->rx_buf_len;
+ kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size;
kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
sizeof(struct hnae3_queue *), GFP_KERNEL);
@@ -1589,8 +1808,8 @@ static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
kinfo = &nic->kinfo;
for (i = 0; i < vport->alloc_tqps; i++) {
- struct hclge_tqp *q =
- container_of(kinfo->tqp[i], struct hclge_tqp, q);
+ struct hclge_comm_tqp *q =
+ container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
bool is_pf;
int ret;
@@ -1609,8 +1828,8 @@ static int hclge_map_tqp(struct hclge_dev *hdev)
struct hclge_vport *vport = hdev->vport;
u16 i, num_vport;
- num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
- for (i = 0; i < num_vport; i++) {
+ num_vport = hdev->num_req_vfs + 1;
+ for (i = 0; i < num_vport; i++) {
int ret;
ret = hclge_map_tqp_to_vport(hdev, vport);
@@ -1632,6 +1851,7 @@ static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
nic->pdev = hdev->pdev;
nic->ae_algo = &ae_algo;
nic->numa_node_mask = hdev->numa_node_mask;
+ nic->kinfo.io_base = hdev->hw.hw.io_base;
ret = hclge_knic_setup(vport, num_tqps,
hdev->num_tx_desc, hdev->num_rx_desc);
@@ -1651,7 +1871,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
int ret;
/* We need to alloc a vport for main NIC of PF */
- num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
+ num_vport = hdev->num_req_vfs + 1;
if (hdev->num_tqps < num_vport) {
dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
@@ -1680,10 +1900,13 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
vport->mps = HCLGE_MAC_DEFAULT_FRAME;
vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
+ vport->port_base_vlan_cfg.tbl_sta = true;
vport->rxvlan_cfg.rx_vlan_offload_en = true;
+ vport->req_vlan_fltr_en = true;
INIT_LIST_HEAD(&vport->vlan_list);
INIT_LIST_HEAD(&vport->uc_mac_list);
INIT_LIST_HEAD(&vport->mc_mac_list);
+ spin_lock_init(&vport->mac_list_lock);
if (i == 0)
ret = hclge_vport_setup(vport, tqp_main_vport);
@@ -2026,7 +2249,6 @@ static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
COMPENSATE_HALF_MPS_NUM * half_mps;
min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
-
if (rx_priv < min_rx_priv)
return false;
@@ -2055,7 +2277,7 @@ static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
* @hdev: pointer to struct hclge_dev
* @buf_alloc: pointer to buffer calculation data
- * @return: 0: calculate sucessful, negative: fail
+ * @return: 0: calculate successful, negative: fail
*/
static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc)
@@ -2139,9 +2361,9 @@ static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
/* The first descriptor set the NEXT bit to 1 */
if (i == 0)
- desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
else
- desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
@@ -2184,9 +2406,9 @@ static int hclge_common_thrd_config(struct hclge_dev *hdev,
/* The first descriptor set the NEXT bit to 1 */
if (i == 0)
- desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
else
- desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
@@ -2305,17 +2527,18 @@ static int hclge_init_roce_base_info(struct hclge_vport *vport)
{
struct hnae3_handle *roce = &vport->roce;
struct hnae3_handle *nic = &vport->nic;
+ struct hclge_dev *hdev = vport->back;
roce->rinfo.num_vectors = vport->back->num_roce_msi;
- if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
- vport->back->num_msi_left == 0)
+ if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
return -EINVAL;
- roce->rinfo.base_vector = vport->back->roce_base_vector;
+ roce->rinfo.base_vector = hdev->num_nic_msi;
roce->rinfo.netdev = nic->kinfo.netdev;
- roce->rinfo.roce_io_base = vport->back->hw.io_base;
+ roce->rinfo.roce_io_base = hdev->hw.hw.io_base;
+ roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base;
roce->pdev = nic->pdev;
roce->ae_algo = nic->ae_algo;
@@ -2347,10 +2570,6 @@ static int hclge_init_msi(struct hclge_dev *hdev)
hdev->num_msi = vectors;
hdev->num_msi_left = vectors;
- hdev->base_msi_vector = pdev->irq;
- hdev->roce_base_vector = hdev->base_msi_vector +
- hdev->roce_base_msix_offset;
-
hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
sizeof(u16), GFP_KERNEL);
if (!hdev->vector_status) {
@@ -2379,11 +2598,38 @@ static u8 hclge_check_speed_dup(u8 duplex, int speed)
return duplex;
}
+static struct hclge_mac_speed_map hclge_mac_speed_map_to_fw[] = {
+ {HCLGE_MAC_SPEED_10M, HCLGE_FW_MAC_SPEED_10M},
+ {HCLGE_MAC_SPEED_100M, HCLGE_FW_MAC_SPEED_100M},
+ {HCLGE_MAC_SPEED_1G, HCLGE_FW_MAC_SPEED_1G},
+ {HCLGE_MAC_SPEED_10G, HCLGE_FW_MAC_SPEED_10G},
+ {HCLGE_MAC_SPEED_25G, HCLGE_FW_MAC_SPEED_25G},
+ {HCLGE_MAC_SPEED_40G, HCLGE_FW_MAC_SPEED_40G},
+ {HCLGE_MAC_SPEED_50G, HCLGE_FW_MAC_SPEED_50G},
+ {HCLGE_MAC_SPEED_100G, HCLGE_FW_MAC_SPEED_100G},
+ {HCLGE_MAC_SPEED_200G, HCLGE_FW_MAC_SPEED_200G},
+};
+
+static int hclge_convert_to_fw_speed(u32 speed_drv, u32 *speed_fw)
+{
+ u16 i;
+
+ for (i = 0; i < ARRAY_SIZE(hclge_mac_speed_map_to_fw); i++) {
+ if (hclge_mac_speed_map_to_fw[i].speed_drv == speed_drv) {
+ *speed_fw = hclge_mac_speed_map_to_fw[i].speed_fw;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
- u8 duplex)
+ u8 duplex, u8 lane_num)
{
struct hclge_config_mac_speed_dup_cmd *req;
struct hclge_desc desc;
+ u32 speed_fw;
int ret;
req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
@@ -2393,46 +2639,17 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
if (duplex)
hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
- switch (speed) {
- case HCLGE_MAC_SPEED_10M:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 6);
- break;
- case HCLGE_MAC_SPEED_100M:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 7);
- break;
- case HCLGE_MAC_SPEED_1G:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 0);
- break;
- case HCLGE_MAC_SPEED_10G:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 1);
- break;
- case HCLGE_MAC_SPEED_25G:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 2);
- break;
- case HCLGE_MAC_SPEED_40G:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 3);
- break;
- case HCLGE_MAC_SPEED_50G:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 4);
- break;
- case HCLGE_MAC_SPEED_100G:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 5);
- break;
- default:
+ ret = hclge_convert_to_fw_speed(speed, &speed_fw);
+ if (ret) {
dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
- return -EINVAL;
+ return ret;
}
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, HCLGE_CFG_SPEED_S,
+ speed_fw);
hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
1);
+ req->lane_num = lane_num;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
@@ -2444,33 +2661,35 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
return 0;
}
-int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
+int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lane_num)
{
struct hclge_mac *mac = &hdev->hw.mac;
int ret;
duplex = hclge_check_speed_dup(duplex, speed);
if (!mac->support_autoneg && mac->speed == speed &&
- mac->duplex == duplex)
+ mac->duplex == duplex && (mac->lane_num == lane_num || lane_num == 0))
return 0;
- ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
+ ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex, lane_num);
if (ret)
return ret;
hdev->hw.mac.speed = speed;
hdev->hw.mac.duplex = duplex;
+ if (!lane_num)
+ hdev->hw.mac.lane_num = lane_num;
return 0;
}
static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
- u8 duplex)
+ u8 duplex, u8 lane_num)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
+ return hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num);
}
static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
@@ -2550,6 +2769,157 @@ static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
return 0;
}
+static void hclge_parse_fec_stats_lanes(struct hclge_dev *hdev,
+ struct hclge_desc *desc, u32 desc_len)
+{
+ u32 lane_size = HCLGE_FEC_STATS_MAX_LANES * 2;
+ u32 desc_index = 0;
+ u32 data_index = 0;
+ u32 i;
+
+ for (i = 0; i < lane_size; i++) {
+ if (data_index >= HCLGE_DESC_DATA_LEN) {
+ desc_index++;
+ data_index = 0;
+ }
+
+ if (desc_index >= desc_len)
+ return;
+
+ hdev->fec_stats.per_lanes[i] +=
+ le32_to_cpu(desc[desc_index].data[data_index]);
+ data_index++;
+ }
+}
+
+static void hclge_parse_fec_stats(struct hclge_dev *hdev,
+ struct hclge_desc *desc, u32 desc_len)
+{
+ struct hclge_query_fec_stats_cmd *req;
+
+ req = (struct hclge_query_fec_stats_cmd *)desc[0].data;
+
+ hdev->fec_stats.base_r_lane_num = req->base_r_lane_num;
+ hdev->fec_stats.rs_corr_blocks +=
+ le32_to_cpu(req->rs_fec_corr_blocks);
+ hdev->fec_stats.rs_uncorr_blocks +=
+ le32_to_cpu(req->rs_fec_uncorr_blocks);
+ hdev->fec_stats.rs_error_blocks +=
+ le32_to_cpu(req->rs_fec_error_blocks);
+ hdev->fec_stats.base_r_corr_blocks +=
+ le32_to_cpu(req->base_r_fec_corr_blocks);
+ hdev->fec_stats.base_r_uncorr_blocks +=
+ le32_to_cpu(req->base_r_fec_uncorr_blocks);
+
+ hclge_parse_fec_stats_lanes(hdev, &desc[1], desc_len - 1);
+}
+
+static int hclge_update_fec_stats_hw(struct hclge_dev *hdev)
+{
+ struct hclge_desc desc[HCLGE_FEC_STATS_CMD_NUM];
+ int ret;
+ u32 i;
+
+ for (i = 0; i < HCLGE_FEC_STATS_CMD_NUM; i++) {
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_FEC_STATS,
+ true);
+ if (i != (HCLGE_FEC_STATS_CMD_NUM - 1))
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_FEC_STATS_CMD_NUM);
+ if (ret)
+ return ret;
+
+ hclge_parse_fec_stats(hdev, desc, HCLGE_FEC_STATS_CMD_NUM);
+
+ return 0;
+}
+
+static void hclge_update_fec_stats(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ int ret;
+
+ if (!hnae3_ae_dev_fec_stats_supported(ae_dev) ||
+ test_and_set_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state))
+ return;
+
+ ret = hclge_update_fec_stats_hw(hdev);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to update fec stats, ret = %d\n", ret);
+
+ clear_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state);
+}
+
+static void hclge_get_fec_stats_total(struct hclge_dev *hdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ fec_stats->corrected_blocks.total = hdev->fec_stats.rs_corr_blocks;
+ fec_stats->uncorrectable_blocks.total =
+ hdev->fec_stats.rs_uncorr_blocks;
+}
+
+static void hclge_get_fec_stats_lanes(struct hclge_dev *hdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ u32 i;
+
+ if (hdev->fec_stats.base_r_lane_num == 0 ||
+ hdev->fec_stats.base_r_lane_num > HCLGE_FEC_STATS_MAX_LANES) {
+ dev_err(&hdev->pdev->dev,
+ "fec stats lane number(%llu) is invalid\n",
+ hdev->fec_stats.base_r_lane_num);
+ return;
+ }
+
+ for (i = 0; i < hdev->fec_stats.base_r_lane_num; i++) {
+ fec_stats->corrected_blocks.lanes[i] =
+ hdev->fec_stats.base_r_corr_per_lanes[i];
+ fec_stats->uncorrectable_blocks.lanes[i] =
+ hdev->fec_stats.base_r_uncorr_per_lanes[i];
+ }
+}
+
+static void hclge_comm_get_fec_stats(struct hclge_dev *hdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ u32 fec_mode = hdev->hw.mac.fec_mode;
+
+ switch (fec_mode) {
+ case BIT(HNAE3_FEC_RS):
+ case BIT(HNAE3_FEC_LLRS):
+ hclge_get_fec_stats_total(hdev, fec_stats);
+ break;
+ case BIT(HNAE3_FEC_BASER):
+ hclge_get_fec_stats_lanes(hdev, fec_stats);
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "fec stats is not supported by current fec mode(0x%x)\n",
+ fec_mode);
+ break;
+ }
+}
+
+static void hclge_get_fec_stats(struct hnae3_handle *handle,
+ struct ethtool_fec_stats *fec_stats)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 fec_mode = hdev->hw.mac.fec_mode;
+
+ if (fec_mode == BIT(HNAE3_FEC_NONE) ||
+ fec_mode == BIT(HNAE3_FEC_AUTO) ||
+ fec_mode == BIT(HNAE3_FEC_USER_DEF))
+ return;
+
+ hclge_update_fec_stats(hdev);
+
+ hclge_comm_get_fec_stats(hdev, fec_stats);
+}
+
static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
{
struct hclge_config_fec_cmd *req;
@@ -2564,6 +2934,9 @@ static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
if (fec_mode & BIT(HNAE3_FEC_RS))
hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
+ if (fec_mode & BIT(HNAE3_FEC_LLRS))
+ hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
+ HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_LLRS);
if (fec_mode & BIT(HNAE3_FEC_BASER))
hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
@@ -2616,7 +2989,7 @@ static int hclge_mac_init(struct hclge_dev *hdev)
hdev->support_sfp_query = true;
hdev->hw.mac.duplex = HCLGE_MAC_FULL;
ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
- hdev->hw.mac.duplex);
+ hdev->hw.mac.duplex, hdev->hw.mac.lane_num);
if (ret)
return ret;
@@ -2655,33 +3028,40 @@ static int hclge_mac_init(struct hclge_dev *hdev)
static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
- !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
- mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
- hclge_wq, &hdev->service_task, 0);
+ !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) {
+ hdev->last_mbx_scheduled = jiffies;
+ mod_delayed_work(hclge_wq, &hdev->service_task, 0);
+ }
}
static void hclge_reset_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
- !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
- mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
- hclge_wq, &hdev->service_task, 0);
+ test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) &&
+ !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) {
+ hdev->last_rst_scheduled = jiffies;
+ mod_delayed_work(hclge_wq, &hdev->service_task, 0);
+ }
+}
+
+static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
+{
+ if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
+ !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
+ mod_delayed_work(hclge_wq, &hdev->service_task, 0);
}
void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
- mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
- hclge_wq, &hdev->service_task,
- delay_time);
+ mod_delayed_work(hclge_wq, &hdev->service_task, delay_time);
}
-static int hclge_get_mac_link_status(struct hclge_dev *hdev)
+static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
{
struct hclge_link_status_cmd *req;
struct hclge_desc desc;
- int link_status;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
@@ -2693,43 +3073,57 @@ static int hclge_get_mac_link_status(struct hclge_dev *hdev)
}
req = (struct hclge_link_status_cmd *)desc.data;
- link_status = req->status & HCLGE_LINK_STATUS_UP_M;
+ *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
+ HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
- return !!link_status;
+ return 0;
}
-static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
+static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
{
- unsigned int mac_state;
- int link_stat;
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+
+ *link_status = HCLGE_LINK_STATUS_DOWN;
if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
return 0;
- mac_state = hclge_get_mac_link_status(hdev);
+ if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
+ return 0;
- if (hdev->hw.mac.phydev) {
- if (hdev->hw.mac.phydev->state == PHY_RUNNING)
- link_stat = mac_state &
- hdev->hw.mac.phydev->link;
- else
- link_stat = 0;
+ return hclge_get_mac_link_status(hdev, link_status);
+}
- } else {
- link_stat = mac_state;
- }
+static void hclge_push_link_status(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport;
+ int ret;
+ u16 i;
+
+ for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
+ vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
+
+ if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) ||
+ vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO)
+ continue;
- return !!link_stat;
+ ret = hclge_push_vf_link_status(vport);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to push link status to vf%u, ret = %d\n",
+ i, ret);
+ }
+ }
}
static void hclge_update_link_status(struct hclge_dev *hdev)
{
+ struct hnae3_handle *rhandle = &hdev->vport[0].roce;
+ struct hnae3_handle *handle = &hdev->vport[0].nic;
struct hnae3_client *rclient = hdev->roce_client;
struct hnae3_client *client = hdev->nic_client;
- struct hnae3_handle *rhandle;
- struct hnae3_handle *handle;
int state;
- int i;
+ int ret;
if (!client)
return;
@@ -2737,27 +3131,109 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
return;
- state = hclge_get_mac_phy_link(hdev);
+ ret = hclge_get_mac_phy_link(hdev, &state);
+ if (ret) {
+ clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
+ return;
+ }
+
if (state != hdev->hw.mac.link) {
- for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
- handle = &hdev->vport[i].nic;
- client->ops->link_status_change(handle, state);
- hclge_config_mac_tnl_int(hdev, state);
- rhandle = &hdev->vport[i].roce;
- if (rclient && rclient->ops->link_status_change)
- rclient->ops->link_status_change(rhandle,
- state);
- }
hdev->hw.mac.link = state;
+ client->ops->link_status_change(handle, state);
+ hclge_config_mac_tnl_int(hdev, state);
+ if (rclient && rclient->ops->link_status_change)
+ rclient->ops->link_status_change(rhandle, state);
+
+ hclge_push_link_status(hdev);
}
clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
}
-static void hclge_update_port_capability(struct hclge_mac *mac)
+static void hclge_update_speed_advertising(struct hclge_mac *mac)
{
- /* update fec ability by speed */
- hclge_convert_setting_fec(mac);
+ u32 speed_ability;
+
+ if (hclge_get_speed_bit(mac->speed, &speed_ability))
+ return;
+
+ switch (mac->module_type) {
+ case HNAE3_MODULE_TYPE_FIBRE_LR:
+ hclge_convert_setting_lr(speed_ability, mac->advertising);
+ break;
+ case HNAE3_MODULE_TYPE_FIBRE_SR:
+ case HNAE3_MODULE_TYPE_AOC:
+ hclge_convert_setting_sr(speed_ability, mac->advertising);
+ break;
+ case HNAE3_MODULE_TYPE_CR:
+ hclge_convert_setting_cr(speed_ability, mac->advertising);
+ break;
+ case HNAE3_MODULE_TYPE_KR:
+ hclge_convert_setting_kr(speed_ability, mac->advertising);
+ break;
+ default:
+ break;
+ }
+}
+
+static void hclge_update_fec_advertising(struct hclge_mac *mac)
+{
+ if (mac->fec_mode & BIT(HNAE3_FEC_RS))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ mac->advertising);
+ else if (mac->fec_mode & BIT(HNAE3_FEC_LLRS))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
+ mac->advertising);
+ else if (mac->fec_mode & BIT(HNAE3_FEC_BASER))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ mac->advertising);
+ else
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
+ mac->advertising);
+}
+
+static void hclge_update_pause_advertising(struct hclge_dev *hdev)
+{
+ struct hclge_mac *mac = &hdev->hw.mac;
+ bool rx_en, tx_en;
+
+ switch (hdev->fc_mode_last_time) {
+ case HCLGE_FC_RX_PAUSE:
+ rx_en = true;
+ tx_en = false;
+ break;
+ case HCLGE_FC_TX_PAUSE:
+ rx_en = false;
+ tx_en = true;
+ break;
+ case HCLGE_FC_FULL:
+ rx_en = true;
+ tx_en = true;
+ break;
+ default:
+ rx_en = false;
+ tx_en = false;
+ break;
+ }
+
+ linkmode_set_pause(mac->advertising, tx_en, rx_en);
+}
+
+static void hclge_update_advertising(struct hclge_dev *hdev)
+{
+ struct hclge_mac *mac = &hdev->hw.mac;
+
+ linkmode_zero(mac->advertising);
+ hclge_update_speed_advertising(mac);
+ hclge_update_fec_advertising(mac);
+ hclge_update_pause_advertising(hdev);
+}
+
+static void hclge_update_port_capability(struct hclge_dev *hdev,
+ struct hclge_mac *mac)
+{
+ if (hnae3_dev_fec_supported(hdev))
+ hclge_convert_setting_fec(mac);
/* firmware can not identify back plane type, the media type
* read from configuration can help deal it
@@ -2774,7 +3250,7 @@ static void hclge_update_port_capability(struct hclge_mac *mac)
} else {
linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
mac->supported);
- linkmode_zero(mac->advertising);
+ hclge_update_advertising(hdev);
}
}
@@ -2838,10 +3314,12 @@ static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
mac->autoneg = resp->autoneg;
mac->support_autoneg = resp->autoneg_ability;
mac->speed_type = QUERY_ACTIVE_SPEED;
+ mac->lane_num = resp->lane_num;
if (!resp->active_fec)
mac->fec_mode = 0;
else
mac->fec_mode = BIT(resp->active_fec);
+ mac->fec_ability = resp->fec_ability;
} else {
mac->speed_type = QUERY_SFP_SPEED;
}
@@ -2849,24 +3327,162 @@ static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
return 0;
}
+static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_phy_link_ksetting_0_cmd *req0;
+ struct hclge_phy_link_ksetting_1_cmd *req1;
+ u32 supported, advertising, lp_advertising;
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
+ true);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
+ true);
+
+ ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get phy link ksetting, ret = %d.\n", ret);
+ return ret;
+ }
+
+ req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
+ cmd->base.autoneg = req0->autoneg;
+ cmd->base.speed = le32_to_cpu(req0->speed);
+ cmd->base.duplex = req0->duplex;
+ cmd->base.port = req0->port;
+ cmd->base.transceiver = req0->transceiver;
+ cmd->base.phy_address = req0->phy_address;
+ cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
+ cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
+ supported = le32_to_cpu(req0->supported);
+ advertising = le32_to_cpu(req0->advertising);
+ lp_advertising = le32_to_cpu(req0->lp_advertising);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
+ lp_advertising);
+
+ req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
+ cmd->base.master_slave_cfg = req1->master_slave_cfg;
+ cmd->base.master_slave_state = req1->master_slave_state;
+
+ return 0;
+}
+
+static int
+hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_phy_link_ksetting_0_cmd *req0;
+ struct hclge_phy_link_ksetting_1_cmd *req1;
+ struct hclge_dev *hdev = vport->back;
+ u32 advertising;
+ int ret;
+
+ if (cmd->base.autoneg == AUTONEG_DISABLE &&
+ ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
+ (cmd->base.duplex != DUPLEX_HALF &&
+ cmd->base.duplex != DUPLEX_FULL)))
+ return -EINVAL;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
+ false);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
+ false);
+
+ req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
+ req0->autoneg = cmd->base.autoneg;
+ req0->speed = cpu_to_le32(cmd->base.speed);
+ req0->duplex = cmd->base.duplex;
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
+ req0->advertising = cpu_to_le32(advertising);
+ req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
+
+ req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
+ req1->master_slave_cfg = cmd->base.master_slave_cfg;
+
+ ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to set phy link ksettings, ret = %d.\n", ret);
+ return ret;
+ }
+
+ hdev->hw.mac.autoneg = cmd->base.autoneg;
+ hdev->hw.mac.speed = cmd->base.speed;
+ hdev->hw.mac.duplex = cmd->base.duplex;
+ linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
+
+ return 0;
+}
+
+static int hclge_update_tp_port_info(struct hclge_dev *hdev)
+{
+ struct ethtool_link_ksettings cmd;
+ int ret;
+
+ if (!hnae3_dev_phy_imp_supported(hdev))
+ return 0;
+
+ ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
+ if (ret)
+ return ret;
+
+ hdev->hw.mac.autoneg = cmd.base.autoneg;
+ hdev->hw.mac.speed = cmd.base.speed;
+ hdev->hw.mac.duplex = cmd.base.duplex;
+
+ return 0;
+}
+
+static int hclge_tp_port_init(struct hclge_dev *hdev)
+{
+ struct ethtool_link_ksettings cmd;
+
+ if (!hnae3_dev_phy_imp_supported(hdev))
+ return 0;
+
+ cmd.base.autoneg = hdev->hw.mac.autoneg;
+ cmd.base.speed = hdev->hw.mac.speed;
+ cmd.base.duplex = hdev->hw.mac.duplex;
+ linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
+
+ return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
+}
+
static int hclge_update_port_info(struct hclge_dev *hdev)
{
struct hclge_mac *mac = &hdev->hw.mac;
- int speed = HCLGE_MAC_SPEED_UNKNOWN;
+ int speed;
int ret;
/* get the port info from SFP cmd if not copper port */
if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
- return 0;
+ return hclge_update_tp_port_info(hdev);
/* if IMP does not support get SFP/qSFP info, return directly */
if (!hdev->support_sfp_query)
return 0;
- if (hdev->pdev->revision >= 0x21)
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
+ speed = mac->speed;
ret = hclge_get_sfp_info(hdev, mac);
- else
+ } else {
+ speed = HCLGE_MAC_SPEED_UNKNOWN;
ret = hclge_get_sfp_speed(hdev, &speed);
+ }
if (ret == -EOPNOTSUPP) {
hdev->support_sfp_query = false;
@@ -2875,19 +3491,21 @@ static int hclge_update_port_info(struct hclge_dev *hdev)
return ret;
}
- if (hdev->pdev->revision >= 0x21) {
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
if (mac->speed_type == QUERY_ACTIVE_SPEED) {
- hclge_update_port_capability(mac);
+ hclge_update_port_capability(hdev, mac);
+ if (mac->speed != speed)
+ (void)hclge_tm_port_shaper_cfg(hdev);
return 0;
}
return hclge_cfg_mac_speed_dup(hdev, mac->speed,
- HCLGE_MAC_FULL);
+ HCLGE_MAC_FULL, mac->lane_num);
} else {
if (speed == HCLGE_MAC_SPEED_UNKNOWN)
return 0; /* do nothing if no SFP */
/* must config full duplex for SFP */
- return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
+ return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL, 0);
}
}
@@ -2950,25 +3568,41 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
+ int link_state_old;
+ int ret;
vport = hclge_get_vf_vport(hdev, vf);
if (!vport)
return -EINVAL;
+ link_state_old = vport->vf_info.link_state;
vport->vf_info.link_state = link_state;
- return 0;
+ /* return success directly if the VF is unalive, VF will
+ * query link state itself when it starts work.
+ */
+ if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
+ return 0;
+
+ ret = hclge_push_vf_link_status(vport);
+ if (ret) {
+ vport->vf_info.link_state = link_state_old;
+ dev_err(&hdev->pdev->dev,
+ "failed to push vf%d link status, ret = %d\n", vf, ret);
+ }
+
+ return ret;
}
static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
{
- u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
+ u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
/* fetch the events from their corresponding regs */
- rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
- msix_src_reg = hclge_read_dev(&hdev->hw,
- HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
+ msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
+ hw_err_src_reg = hclge_read_dev(&hdev->hw,
+ HCLGE_RAS_PF_OTHER_INT_STS_REG);
/* Assumption: If by any chance reset and mailbox events are reported
* together then we will only process reset event in this go and will
@@ -2978,28 +3612,33 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
*
* check for vector0 reset event sources
*/
- if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
+ if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
hdev->rst_stats.imp_rst_cnt++;
return HCLGE_VECTOR0_EVENT_RST;
}
- if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
+ if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
dev_info(&hdev->pdev->dev, "global reset interrupt\n");
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
hdev->rst_stats.global_rst_cnt++;
return HCLGE_VECTOR0_EVENT_RST;
}
- /* check for vector0 msix event source */
- if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
- *clearval = msix_src_reg;
+ /* check for vector0 msix event and hardware error event source */
+ if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK ||
+ hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK)
return HCLGE_VECTOR0_EVENT_ERR;
+
+ /* check for vector0 ptp event source */
+ if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) {
+ *clearval = msix_src_reg;
+ return HCLGE_VECTOR0_EVENT_PTP;
}
/* check for vector0 mailbox(=CMDQ RX) event source */
@@ -3011,9 +3650,8 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
/* print other vector0 event source */
dev_info(&hdev->pdev->dev,
- "CMDQ INT status:0x%x, other INT status:0x%x\n",
- cmdq_src_reg, msix_src_reg);
- *clearval = msix_src_reg;
+ "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n",
+ cmdq_src_reg, hw_err_src_reg, msix_src_reg);
return HCLGE_VECTOR0_EVENT_OTHER;
}
@@ -3022,6 +3660,7 @@ static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
u32 regclr)
{
switch (event_type) {
+ case HCLGE_VECTOR0_EVENT_PTP:
case HCLGE_VECTOR0_EVENT_RST:
hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
break;
@@ -3050,30 +3689,26 @@ static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
{
struct hclge_dev *hdev = data;
+ unsigned long flags;
u32 clearval = 0;
u32 event_cause;
hclge_enable_vector(&hdev->misc_vector, false);
event_cause = hclge_check_event_cause(hdev, &clearval);
- /* vector 0 interrupt is shared with reset and mailbox source events.*/
+ /* vector 0 interrupt is shared with reset and mailbox source events. */
switch (event_cause) {
case HCLGE_VECTOR0_EVENT_ERR:
- /* we do not know what type of reset is required now. This could
- * only be decided after we fetch the type of errors which
- * caused this event. Therefore, we will do below for now:
- * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
- * have defered type of reset to be used.
- * 2. Schedule the reset serivce task.
- * 3. When service task receives HNAE3_UNKNOWN_RESET type it
- * will fetch the correct type of reset. This would be done
- * by first decoding the types of errors.
- */
- set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
- /* fall through */
+ hclge_errhand_task_schedule(hdev);
+ break;
case HCLGE_VECTOR0_EVENT_RST:
hclge_reset_task_schedule(hdev);
break;
+ case HCLGE_VECTOR0_EVENT_PTP:
+ spin_lock_irqsave(&hdev->ptp->lock, flags);
+ hclge_ptp_clean_tx_hwts(hdev);
+ spin_unlock_irqrestore(&hdev->ptp->lock, flags);
+ break;
case HCLGE_VECTOR0_EVENT_MBX:
/* If we are here then,
* 1. Either we are not handling any mbx task and we are not
@@ -3094,15 +3729,11 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
hclge_clear_event_cause(hdev, event_cause, clearval);
- /* Enable interrupt if it is not cause by reset. And when
- * clearval equal to 0, it means interrupt status may be
- * cleared by hardware before driver reads status register.
- * For this case, vector0 interrupt also should be enabled.
- */
- if (!clearval ||
- event_cause == HCLGE_VECTOR0_EVENT_MBX) {
+ /* Enable interrupt if it is not caused by reset event or error event */
+ if (event_cause == HCLGE_VECTOR0_EVENT_PTP ||
+ event_cause == HCLGE_VECTOR0_EVENT_MBX ||
+ event_cause == HCLGE_VECTOR0_EVENT_OTHER)
hclge_enable_vector(&hdev->misc_vector, true);
- }
return IRQ_HANDLED;
}
@@ -3126,43 +3757,13 @@ static void hclge_get_misc_vector(struct hclge_dev *hdev)
vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
- vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
+ vector->addr = hdev->hw.hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
hdev->vector_status[0] = 0;
hdev->num_msi_left -= 1;
hdev->num_msi_used += 1;
}
-static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
- const cpumask_t *mask)
-{
- struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
- affinity_notify);
-
- cpumask_copy(&hdev->affinity_mask, mask);
-}
-
-static void hclge_irq_affinity_release(struct kref *ref)
-{
-}
-
-static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
-{
- irq_set_affinity_hint(hdev->misc_vector.vector_irq,
- &hdev->affinity_mask);
-
- hdev->affinity_notify.notify = hclge_irq_affinity_notify;
- hdev->affinity_notify.release = hclge_irq_affinity_release;
- irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
- &hdev->affinity_notify);
-}
-
-static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
-{
- irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
- irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
-}
-
static int hclge_misc_irq_init(struct hclge_dev *hdev)
{
int ret;
@@ -3192,8 +3793,9 @@ static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
int hclge_notify_client(struct hclge_dev *hdev,
enum hnae3_reset_notify_type type)
{
+ struct hnae3_handle *handle = &hdev->vport[0].nic;
struct hnae3_client *client = hdev->nic_client;
- u16 i;
+ int ret;
if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
return 0;
@@ -3201,27 +3803,20 @@ int hclge_notify_client(struct hclge_dev *hdev,
if (!client->ops->reset_notify)
return -EOPNOTSUPP;
- for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
- struct hnae3_handle *handle = &hdev->vport[i].nic;
- int ret;
-
- ret = client->ops->reset_notify(handle, type);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "notify nic client failed %d(%d)\n", type, ret);
- return ret;
- }
- }
+ ret = client->ops->reset_notify(handle, type);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
+ type, ret);
- return 0;
+ return ret;
}
static int hclge_notify_roce_client(struct hclge_dev *hdev,
enum hnae3_reset_notify_type type)
{
+ struct hnae3_handle *handle = &hdev->vport[0].roce;
struct hnae3_client *client = hdev->roce_client;
- int ret = 0;
- u16 i;
+ int ret;
if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
return 0;
@@ -3229,17 +3824,10 @@ static int hclge_notify_roce_client(struct hclge_dev *hdev,
if (!client->ops->reset_notify)
return -EOPNOTSUPP;
- for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
- struct hnae3_handle *handle = &hdev->vport[i].roce;
-
- ret = client->ops->reset_notify(handle, type);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "notify roce client failed %d(%d)",
- type, ret);
- return ret;
- }
- }
+ ret = client->ops->reset_notify(handle, type);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
+ type, ret);
return ret;
}
@@ -3307,7 +3895,7 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
{
int i;
- for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
+ for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
struct hclge_vport *vport = &hdev->vport[i];
int ret;
@@ -3316,7 +3904,8 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
if (ret) {
dev_err(&hdev->pdev->dev,
"set vf(%u) rst failed %d!\n",
- vport->vport_id, ret);
+ vport->vport_id - HCLGE_VF_VPORT_START_NUM,
+ ret);
return ret;
}
@@ -3331,7 +3920,8 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
if (ret)
dev_warn(&hdev->pdev->dev,
"inform reset to vf(%u) failed %d!\n",
- vport->vport_id, ret);
+ vport->vport_id - HCLGE_VF_VPORT_START_NUM,
+ ret);
}
return 0;
@@ -3340,10 +3930,17 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
static void hclge_mailbox_service_task(struct hclge_dev *hdev)
{
if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
- test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
+ test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state) ||
test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
return;
+ if (time_is_before_jiffies(hdev->last_mbx_scheduled +
+ HCLGE_MBX_SCHED_TIMEOUT))
+ dev_warn(&hdev->pdev->dev,
+ "mbx service task is scheduled after %ums on cpu%u!\n",
+ jiffies_to_msecs(jiffies - hdev->last_mbx_scheduled),
+ smp_processor_id());
+
hclge_mbx_handler(hdev);
clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
@@ -3378,7 +3975,7 @@ static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
return;
}
msleep(HCLGE_PF_RESET_SYNC_TIME);
- hclge_cmd_reuse_desc(&desc, true);
+ hclge_comm_cmd_reuse_desc(&desc, true);
} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
@@ -3388,14 +3985,12 @@ void hclge_report_hw_error(struct hclge_dev *hdev,
enum hnae3_hw_error_type type)
{
struct hnae3_client *client = hdev->nic_client;
- u16 i;
if (!client || !client->ops->process_hw_error ||
!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
return;
- for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
- client->ops->process_hw_error(&hdev->vport[i].nic, type);
+ client->ops->process_hw_error(&hdev->vport[0].nic, type);
}
static void hclge_handle_imp_error(struct hclge_dev *hdev)
@@ -3441,7 +4036,7 @@ static void hclge_do_reset(struct hclge_dev *hdev)
u32 val;
if (hclge_get_hw_reset_stat(handle)) {
- dev_info(&pdev->dev, "Hardware reset not finish\n");
+ dev_info(&pdev->dev, "hardware reset not finish\n");
dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
@@ -3449,21 +4044,27 @@ static void hclge_do_reset(struct hclge_dev *hdev)
}
switch (hdev->reset_type) {
+ case HNAE3_IMP_RESET:
+ dev_info(&pdev->dev, "IMP reset requested\n");
+ val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
+ hnae3_set_bit(val, HCLGE_TRIGGER_IMP_RESET_B, 1);
+ hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val);
+ break;
case HNAE3_GLOBAL_RESET:
+ dev_info(&pdev->dev, "global reset requested\n");
val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
- dev_info(&pdev->dev, "Global Reset requested\n");
break;
case HNAE3_FUNC_RESET:
- dev_info(&pdev->dev, "PF Reset requested\n");
+ dev_info(&pdev->dev, "PF reset requested\n");
/* schedule again to check later */
set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
hclge_reset_task_schedule(hdev);
break;
default:
dev_warn(&pdev->dev,
- "Unsupported reset type: %d\n", hdev->reset_type);
+ "unsupported reset type: %d\n", hdev->reset_type);
break;
}
}
@@ -3474,28 +4075,6 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
struct hclge_dev *hdev = ae_dev->priv;
- /* first, resolve any unknown reset type to the known type(s) */
- if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
- u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
- HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
- /* we will intentionally ignore any errors from this function
- * as we will end up in *some* reset request in any case
- */
- if (hclge_handle_hw_msix_error(hdev, addr))
- dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
- msix_sts_reg);
-
- clear_bit(HNAE3_UNKNOWN_RESET, addr);
- /* We defered the clearing of the error event which caused
- * interrupt since it was not posssible to do that in
- * interrupt context (and this is the reason we introduced
- * new UNKNOWN reset type). Now, the errors have been
- * handled and cleared in hardware we can safely enable
- * interrupts. This is an exception to the norm.
- */
- hclge_enable_vector(&hdev->misc_vector, true);
- }
-
/* return the highest priority reset level amongst all */
if (test_bit(HNAE3_IMP_RESET, addr)) {
rst_level = HNAE3_IMP_RESET;
@@ -3542,7 +4121,7 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
/* For revision 0x20, the reset interrupt source
* can only be cleared after hardware reset done
*/
- if (hdev->pdev->revision == 0x20)
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
clearval);
@@ -3553,13 +4132,13 @@ static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
{
u32 reg_val;
- reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
+ reg_val = hclge_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG);
if (enable)
- reg_val |= HCLGE_NIC_SW_RST_RDY;
+ reg_val |= HCLGE_COMM_NIC_SW_RST_RDY;
else
- reg_val &= ~HCLGE_NIC_SW_RST_RDY;
+ reg_val &= ~HCLGE_COMM_NIC_SW_RST_RDY;
- hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
+ hclge_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val);
}
static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
@@ -3596,9 +4175,9 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
/* After performaning pf reset, it is not necessary to do the
* mailbox handling or send any command to firmware, because
* any mailbox handling or command to firmware is only valid
- * after hclge_cmd_init is called.
+ * after hclge_comm_cmd_init is called.
*/
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
hdev->rst_stats.pf_rst_cnt++;
break;
case HNAE3_FLR_RESET:
@@ -3624,6 +4203,21 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
return ret;
}
+static void hclge_show_rst_info(struct hclge_dev *hdev)
+{
+ char *buf;
+
+ buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN);
+
+ dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf);
+
+ kfree(buf);
+}
+
static bool hclge_reset_err_handle(struct hclge_dev *hdev)
{
#define MAX_RESET_FAIL_CNT 5
@@ -3654,13 +4248,34 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev)
dev_err(&hdev->pdev->dev, "Reset fail!\n");
- hclge_dbg_dump_rst_info(hdev);
+ hclge_show_rst_info(hdev);
set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
return false;
}
+static void hclge_update_reset_level(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ enum hnae3_reset_type reset_level;
+
+ /* reset request will not be set during reset, so clear
+ * pending reset request to avoid unnecessary reset
+ * caused by the same reason.
+ */
+ hclge_get_reset_level(ae_dev, &hdev->reset_request);
+
+ /* if default_reset_request has a higher level reset request,
+ * it should be handled as soon as possible. since some errors
+ * need this kind of reset to fix.
+ */
+ reset_level = hclge_get_reset_level(ae_dev,
+ &hdev->default_reset_request);
+ if (reset_level != HNAE3_NONE_RESET)
+ set_bit(reset_level, &hdev->reset_request);
+}
+
static int hclge_set_rst_done(struct hclge_dev *hdev)
{
struct hclge_pf_rst_done_cmd *req;
@@ -3695,12 +4310,10 @@ static int hclge_reset_prepare_up(struct hclge_dev *hdev)
switch (hdev->reset_type) {
case HNAE3_FUNC_RESET:
- /* fall through */
case HNAE3_FLR_RESET:
ret = hclge_set_all_vf_rst(hdev, false);
break;
case HNAE3_GLOBAL_RESET:
- /* fall through */
case HNAE3_IMP_RESET:
ret = hclge_set_rst_done(hdev);
break;
@@ -3726,22 +4339,13 @@ static int hclge_reset_stack(struct hclge_dev *hdev)
if (ret)
return ret;
- ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
- if (ret)
- return ret;
-
- return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
+ return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
}
static int hclge_reset_prepare(struct hclge_dev *hdev)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
int ret;
- /* Initialize ae_dev reset status as well, in case enet layer wants to
- * know if device is undergoing reset
- */
- ae_dev->reset_type = hdev->reset_type;
hdev->rst_stats.reset_cnt++;
/* perform reset of the stack & ae device for a client */
ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
@@ -3759,8 +4363,6 @@ static int hclge_reset_prepare(struct hclge_dev *hdev)
static int hclge_reset_rebuild(struct hclge_dev *hdev)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
- enum hnae3_reset_type reset_level;
int ret;
hdev->rst_stats.hw_reset_done_cnt++;
@@ -3777,11 +4379,6 @@ static int hclge_reset_rebuild(struct hclge_dev *hdev)
hclge_clear_reset_cause(hdev);
- ret = hclge_reset_prepare_up(hdev);
- if (ret)
- return ret;
-
-
ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
* times
@@ -3790,6 +4387,10 @@ static int hclge_reset_rebuild(struct hclge_dev *hdev)
hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
return ret;
+ ret = hclge_reset_prepare_up(hdev);
+ if (ret)
+ return ret;
+
rtnl_lock();
ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
rtnl_unlock();
@@ -3803,17 +4404,9 @@ static int hclge_reset_rebuild(struct hclge_dev *hdev)
hdev->last_reset_time = jiffies;
hdev->rst_stats.reset_fail_cnt = 0;
hdev->rst_stats.reset_done_cnt++;
- ae_dev->reset_type = HNAE3_NONE_RESET;
clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
- /* if default_reset_request has a higher level reset request,
- * it should be handled as soon as possible. since some errors
- * need this kind of reset to fix.
- */
- reset_level = hclge_get_reset_level(ae_dev,
- &hdev->default_reset_request);
- if (reset_level != HNAE3_NONE_RESET)
- set_bit(reset_level, &hdev->reset_request);
+ hclge_update_reset_level(hdev);
return 0;
}
@@ -3846,7 +4439,6 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
* normalcy is to reset.
* 2. A new reset request from the stack due to timeout
*
- * For the first case,error event might not have ae handle available.
* check if this is a new reset request and we are not here just because
* last reset attempt did not succeed and watchdog hit us again. We will
* know this if last reset request did not occur very recently (watchdog
@@ -3856,14 +4448,14 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
* want to make sure we throttle the reset request. Therefore, we will
* not allow it again before 3*HZ times.
*/
- if (!handle)
- handle = &hdev->vport[0].nic;
if (time_before(jiffies, (hdev->last_reset_time +
HCLGE_RESET_INTERVAL))) {
mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
return;
- } else if (hdev->default_reset_request) {
+ }
+
+ if (hdev->default_reset_request) {
hdev->reset_level =
hclge_get_reset_level(ae_dev,
&hdev->default_reset_request);
@@ -3931,11 +4523,80 @@ static void hclge_reset_subtask(struct hclge_dev *hdev)
hdev->reset_type = HNAE3_NONE_RESET;
}
+static void hclge_handle_err_reset_request(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ enum hnae3_reset_type reset_type;
+
+ if (ae_dev->hw_err_reset_req) {
+ reset_type = hclge_get_reset_level(ae_dev,
+ &ae_dev->hw_err_reset_req);
+ hclge_set_def_reset_request(ae_dev, reset_type);
+ }
+
+ if (hdev->default_reset_request && ae_dev->ops->reset_event)
+ ae_dev->ops->reset_event(hdev->pdev, NULL);
+
+ /* enable interrupt after error handling complete */
+ hclge_enable_vector(&hdev->misc_vector, true);
+}
+
+static void hclge_handle_err_recovery(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+
+ ae_dev->hw_err_reset_req = 0;
+
+ if (hclge_find_error_source(hdev)) {
+ hclge_handle_error_info_log(ae_dev);
+ hclge_handle_mac_tnl(hdev);
+ }
+
+ hclge_handle_err_reset_request(hdev);
+}
+
+static void hclge_misc_err_recovery(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ struct device *dev = &hdev->pdev->dev;
+ u32 msix_sts_reg;
+
+ msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
+ if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
+ if (hclge_handle_hw_msix_error
+ (hdev, &hdev->default_reset_request))
+ dev_info(dev, "received msix interrupt 0x%x\n",
+ msix_sts_reg);
+ }
+
+ hclge_handle_hw_ras_error(ae_dev);
+
+ hclge_handle_err_reset_request(hdev);
+}
+
+static void hclge_errhand_service_task(struct hclge_dev *hdev)
+{
+ if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
+ return;
+
+ if (hnae3_dev_ras_imp_supported(hdev))
+ hclge_handle_err_recovery(hdev);
+ else
+ hclge_misc_err_recovery(hdev);
+}
+
static void hclge_reset_service_task(struct hclge_dev *hdev)
{
if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
return;
+ if (time_is_before_jiffies(hdev->last_rst_scheduled +
+ HCLGE_RESET_SCHED_TIMEOUT))
+ dev_warn(&hdev->pdev->dev,
+ "reset service task is scheduled after %ums on cpu%u!\n",
+ jiffies_to_msecs(jiffies - hdev->last_rst_scheduled),
+ smp_processor_id());
+
down(&hdev->reset_sem);
set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
@@ -3966,10 +4627,16 @@ static void hclge_periodic_service_task(struct hclge_dev *hdev)
{
unsigned long delta = round_jiffies_relative(HZ);
+ if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
+ return;
+
/* Always handle the link updating to make sure link state is
* updated when it is triggered by mbx.
*/
hclge_update_link_status(hdev);
+ hclge_sync_mac_table(hdev);
+ hclge_sync_promisc_mode(hdev);
+ hclge_sync_fd_table(hdev);
if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
delta = jiffies - hdev->last_serv_processed;
@@ -4003,19 +4670,43 @@ out:
hclge_task_schedule(hdev, delta);
}
+static void hclge_ptp_service_task(struct hclge_dev *hdev)
+{
+ unsigned long flags;
+
+ if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) ||
+ !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) ||
+ !time_is_before_jiffies(hdev->ptp->tx_start + HZ))
+ return;
+
+ /* to prevent concurrence with the irq handler */
+ spin_lock_irqsave(&hdev->ptp->lock, flags);
+
+ /* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq
+ * handler may handle it just before spin_lock_irqsave().
+ */
+ if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state))
+ hclge_ptp_clean_tx_hwts(hdev);
+
+ spin_unlock_irqrestore(&hdev->ptp->lock, flags);
+}
+
static void hclge_service_task(struct work_struct *work)
{
struct hclge_dev *hdev =
container_of(work, struct hclge_dev, service_task.work);
+ hclge_errhand_service_task(hdev);
hclge_reset_service_task(hdev);
+ hclge_ptp_service_task(hdev);
hclge_mailbox_service_task(hdev);
hclge_periodic_service_task(hdev);
- /* Handle reset and mbx again in case periodical task delays the
- * handling by calling hclge_task_schedule() in
+ /* Handle error recovery, reset and mbx again in case periodical task
+ * delays the handling by calling hclge_task_schedule() in
* hclge_periodic_service_task().
*/
+ hclge_errhand_service_task(hdev);
hclge_reset_service_task(hdev);
hclge_mailbox_service_task(hdev);
}
@@ -4031,6 +4722,30 @@ struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
return container_of(handle, struct hclge_vport, nic);
}
+static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
+ struct hnae3_vector_info *vector_info)
+{
+#define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 64
+
+ vector_info->vector = pci_irq_vector(hdev->pdev, idx);
+
+ /* need an extend offset to config vector >= 64 */
+ if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
+ vector_info->io_addr = hdev->hw.hw.io_base +
+ HCLGE_VECTOR_REG_BASE +
+ (idx - 1) * HCLGE_VECTOR_REG_OFFSET;
+ else
+ vector_info->io_addr = hdev->hw.hw.io_base +
+ HCLGE_VECTOR_EXT_REG_BASE +
+ (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
+ HCLGE_VECTOR_REG_OFFSET_H +
+ (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
+ HCLGE_VECTOR_REG_OFFSET;
+
+ hdev->vector_status[idx] = hdev->vport[0].vport_id;
+ hdev->vector_irq[idx] = vector_info->vector;
+}
+
static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
struct hnae3_vector_info *vector_info)
{
@@ -4038,23 +4753,16 @@ static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
struct hnae3_vector_info *vector = vector_info;
struct hclge_dev *hdev = vport->back;
int alloc = 0;
- int i, j;
+ u16 i = 0;
+ u16 j;
vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
vector_num = min(hdev->num_msi_left, vector_num);
for (j = 0; j < vector_num; j++) {
- for (i = 1; i < hdev->num_msi; i++) {
+ while (++i < hdev->num_nic_msi) {
if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
- vector->vector = pci_irq_vector(hdev->pdev, i);
- vector->io_addr = hdev->hw.io_base +
- HCLGE_VECTOR_REG_BASE +
- (i - 1) * HCLGE_VECTOR_REG_OFFSET +
- vport->vport_id *
- HCLGE_VECTOR_VF_OFFSET;
- hdev->vector_status[i] = vport->vport_id;
- hdev->vector_irq[i] = vector->vector;
-
+ hclge_get_vector_info(hdev, i, vector);
vector++;
alloc++;
@@ -4097,188 +4805,17 @@ static int hclge_put_vector(struct hnae3_handle *handle, int vector)
return 0;
}
-static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
-{
- return HCLGE_RSS_KEY_SIZE;
-}
-
-static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
-{
- return HCLGE_RSS_IND_TBL_SIZE;
-}
-
-static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
- const u8 hfunc, const u8 *key)
-{
- struct hclge_rss_config_cmd *req;
- unsigned int key_offset = 0;
- struct hclge_desc desc;
- int key_counts;
- int key_size;
- int ret;
-
- key_counts = HCLGE_RSS_KEY_SIZE;
- req = (struct hclge_rss_config_cmd *)desc.data;
-
- while (key_counts) {
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
- false);
-
- req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
- req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
-
- key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
- memcpy(req->hash_key,
- key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
-
- key_counts -= key_size;
- key_offset++;
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Configure RSS config fail, status = %d\n",
- ret);
- return ret;
- }
- }
- return 0;
-}
-
-static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
-{
- struct hclge_rss_indirection_table_cmd *req;
- struct hclge_desc desc;
- int i, j;
- int ret;
-
- req = (struct hclge_rss_indirection_table_cmd *)desc.data;
-
- for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
- hclge_cmd_setup_basic_desc
- (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
-
- req->start_table_index =
- cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
- req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
-
- for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
- req->rss_result[j] =
- indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Configure rss indir table fail,status = %d\n",
- ret);
- return ret;
- }
- }
- return 0;
-}
-
-static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
- u16 *tc_size, u16 *tc_offset)
-{
- struct hclge_rss_tc_mode_cmd *req;
- struct hclge_desc desc;
- int ret;
- int i;
-
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
- req = (struct hclge_rss_tc_mode_cmd *)desc.data;
-
- for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
- u16 mode = 0;
-
- hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
- hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
- HCLGE_RSS_TC_SIZE_S, tc_size[i]);
- hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
- HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
-
- req->rss_tc_mode[i] = cpu_to_le16(mode);
- }
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "Configure rss tc mode fail, status = %d\n", ret);
-
- return ret;
-}
-
-static void hclge_get_rss_type(struct hclge_vport *vport)
-{
- if (vport->rss_tuple_sets.ipv4_tcp_en ||
- vport->rss_tuple_sets.ipv4_udp_en ||
- vport->rss_tuple_sets.ipv4_sctp_en ||
- vport->rss_tuple_sets.ipv6_tcp_en ||
- vport->rss_tuple_sets.ipv6_udp_en ||
- vport->rss_tuple_sets.ipv6_sctp_en)
- vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
- else if (vport->rss_tuple_sets.ipv4_fragment_en ||
- vport->rss_tuple_sets.ipv6_fragment_en)
- vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
- else
- vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
-}
-
-static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
-{
- struct hclge_rss_input_tuple_cmd *req;
- struct hclge_desc desc;
- int ret;
-
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
-
- req = (struct hclge_rss_input_tuple_cmd *)desc.data;
-
- /* Get the tuple cfg from pf */
- req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
- req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
- req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
- req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
- req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
- req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
- req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
- req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
- hclge_get_rss_type(&hdev->vport[0]);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "Configure rss input fail, status = %d\n", ret);
- return ret;
-}
-
static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
u8 *key, u8 *hfunc)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hclge_vport *vport = hclge_get_vport(handle);
- int i;
-
- /* Get hash algorithm */
- if (hfunc) {
- switch (vport->rss_algo) {
- case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
- *hfunc = ETH_RSS_HASH_TOP;
- break;
- case HCLGE_RSS_HASH_ALGO_SIMPLE:
- *hfunc = ETH_RSS_HASH_XOR;
- break;
- default:
- *hfunc = ETH_RSS_HASH_UNKNOWN;
- break;
- }
- }
+ struct hclge_comm_rss_cfg *rss_cfg = &vport->back->rss_cfg;
- /* Get the RSS Key required by the user */
- if (key)
- memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
+ hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc);
- /* Get indirect table */
- if (indir)
- for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
- indir[i] = vport->rss_indirection_tbl[i];
+ hclge_comm_get_rss_indir_tbl(rss_cfg, indir,
+ ae_dev->dev_specs.rss_ind_tbl_size);
return 0;
}
@@ -4286,67 +4823,25 @@ static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
const u8 *key, const u8 hfunc)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- u8 hash_algo;
+ struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
int ret, i;
- /* Set the RSS Hash Key if specififed by the user */
- if (key) {
- switch (hfunc) {
- case ETH_RSS_HASH_TOP:
- hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
- break;
- case ETH_RSS_HASH_XOR:
- hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
- break;
- case ETH_RSS_HASH_NO_CHANGE:
- hash_algo = vport->rss_algo;
- break;
- default:
- return -EINVAL;
- }
-
- ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
- if (ret)
- return ret;
-
- /* Update the shadow RSS key with user specified qids */
- memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
- vport->rss_algo = hash_algo;
+ ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key, hfunc);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
+ return ret;
}
/* Update the shadow RSS table with user specified qids */
- for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
- vport->rss_indirection_tbl[i] = indir[i];
+ for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
+ rss_cfg->rss_indirection_tbl[i] = indir[i];
/* Update the hardware */
- return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
-}
-
-static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
-{
- u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
-
- if (nfc->data & RXH_L4_B_2_3)
- hash_sets |= HCLGE_D_PORT_BIT;
- else
- hash_sets &= ~HCLGE_D_PORT_BIT;
-
- if (nfc->data & RXH_IP_SRC)
- hash_sets |= HCLGE_S_IP_BIT;
- else
- hash_sets &= ~HCLGE_S_IP_BIT;
-
- if (nfc->data & RXH_IP_DST)
- hash_sets |= HCLGE_D_IP_BIT;
- else
- hash_sets &= ~HCLGE_D_IP_BIT;
-
- if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
- hash_sets |= HCLGE_V_TAG_BIT;
-
- return hash_sets;
+ return hclge_comm_set_rss_indir_table(ae_dev, &hdev->hw.hw,
+ rss_cfg->rss_indirection_tbl);
}
static int hclge_set_rss_tuple(struct hnae3_handle *handle,
@@ -4354,77 +4849,17 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- struct hclge_rss_input_tuple_cmd *req;
- struct hclge_desc desc;
- u8 tuple_sets;
int ret;
- if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3))
- return -EINVAL;
-
- req = (struct hclge_rss_input_tuple_cmd *)desc.data;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
-
- req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
- req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
- req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
- req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
- req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
- req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
- req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
- req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
-
- tuple_sets = hclge_get_rss_hash_bits(nfc);
- switch (nfc->flow_type) {
- case TCP_V4_FLOW:
- req->ipv4_tcp_en = tuple_sets;
- break;
- case TCP_V6_FLOW:
- req->ipv6_tcp_en = tuple_sets;
- break;
- case UDP_V4_FLOW:
- req->ipv4_udp_en = tuple_sets;
- break;
- case UDP_V6_FLOW:
- req->ipv6_udp_en = tuple_sets;
- break;
- case SCTP_V4_FLOW:
- req->ipv4_sctp_en = tuple_sets;
- break;
- case SCTP_V6_FLOW:
- if ((nfc->data & RXH_L4_B_0_1) ||
- (nfc->data & RXH_L4_B_2_3))
- return -EINVAL;
-
- req->ipv6_sctp_en = tuple_sets;
- break;
- case IPV4_FLOW:
- req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
- break;
- case IPV6_FLOW:
- req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
- break;
- default:
- return -EINVAL;
- }
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw,
+ &hdev->rss_cfg, nfc);
if (ret) {
dev_err(&hdev->pdev->dev,
- "Set rss tuple fail, status = %d\n", ret);
+ "failed to set rss tuple, ret = %d.\n", ret);
return ret;
}
- vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
- vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
- vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
- vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
- vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
- vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
- vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
- vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
- hclge_get_rss_type(vport);
+ hclge_comm_get_rss_type(&vport->nic, &hdev->rss_cfg.rss_tuple_sets);
return 0;
}
@@ -4433,47 +4868,16 @@ static int hclge_get_rss_tuple(struct hnae3_handle *handle,
{
struct hclge_vport *vport = hclge_get_vport(handle);
u8 tuple_sets;
+ int ret;
nfc->data = 0;
- switch (nfc->flow_type) {
- case TCP_V4_FLOW:
- tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
- break;
- case UDP_V4_FLOW:
- tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
- break;
- case TCP_V6_FLOW:
- tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
- break;
- case UDP_V6_FLOW:
- tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
- break;
- case SCTP_V4_FLOW:
- tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
- break;
- case SCTP_V6_FLOW:
- tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
- break;
- case IPV4_FLOW:
- case IPV6_FLOW:
- tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
- break;
- default:
- return -EINVAL;
- }
-
- if (!tuple_sets)
- return 0;
+ ret = hclge_comm_get_rss_tuple(&vport->back->rss_cfg, nfc->flow_type,
+ &tuple_sets);
+ if (ret || !tuple_sets)
+ return ret;
- if (tuple_sets & HCLGE_D_PORT_BIT)
- nfc->data |= RXH_L4_B_2_3;
- if (tuple_sets & HCLGE_S_PORT_BIT)
- nfc->data |= RXH_L4_B_0_1;
- if (tuple_sets & HCLGE_D_IP_BIT)
- nfc->data |= RXH_IP_DST;
- if (tuple_sets & HCLGE_S_IP_BIT)
- nfc->data |= RXH_IP_SRC;
+ nfc->data = hclge_comm_convert_rss_tuple(tuple_sets);
return 0;
}
@@ -4483,108 +4887,76 @@ static int hclge_get_tc_size(struct hnae3_handle *handle)
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- return hdev->rss_size_max;
+ return hdev->pf_rss_size_max;
}
-int hclge_rss_init_hw(struct hclge_dev *hdev)
+static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
{
+ struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
struct hclge_vport *vport = hdev->vport;
- u8 *rss_indir = vport[0].rss_indirection_tbl;
- u16 rss_size = vport[0].alloc_rss_size;
u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
+ u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
- u8 *key = vport[0].rss_hash_key;
- u8 hfunc = vport[0].rss_algo;
- u16 tc_valid[HCLGE_MAX_TC_NUM];
+ struct hnae3_tc_info *tc_info;
u16 roundup_size;
- unsigned int i;
- int ret;
-
- ret = hclge_set_rss_indir_table(hdev, rss_indir);
- if (ret)
- return ret;
-
- ret = hclge_set_rss_algo_key(hdev, hfunc, key);
- if (ret)
- return ret;
-
- ret = hclge_set_rss_input_tuple(hdev);
- if (ret)
- return ret;
-
- /* Each TC have the same queue size, and tc_size set to hardware is
- * the log2 of roundup power of two of rss_size, the acutal queue
- * size is limited by indirection table.
- */
- if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
- dev_err(&hdev->pdev->dev,
- "Configure rss tc size failed, invalid TC_SIZE = %u\n",
- rss_size);
- return -EINVAL;
- }
-
- roundup_size = roundup_pow_of_two(rss_size);
- roundup_size = ilog2(roundup_size);
+ u16 rss_size;
+ int i;
+ tc_info = &vport->nic.kinfo.tc_info;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ rss_size = tc_info->tqp_count[i];
tc_valid[i] = 0;
if (!(hdev->hw_tc_map & BIT(i)))
continue;
+ /* tc_size set to hardware is the log2 of roundup power of two
+ * of rss_size, the acutal queue size is limited by indirection
+ * table.
+ */
+ if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
+ rss_size == 0) {
+ dev_err(&hdev->pdev->dev,
+ "Configure rss tc size failed, invalid TC_SIZE = %u\n",
+ rss_size);
+ return -EINVAL;
+ }
+
+ roundup_size = roundup_pow_of_two(rss_size);
+ roundup_size = ilog2(roundup_size);
+
tc_valid[i] = 1;
tc_size[i] = roundup_size;
- tc_offset[i] = rss_size * i;
+ tc_offset[i] = tc_info->tqp_offset[i];
}
- return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
+ return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid,
+ tc_size);
}
-void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
-{
- struct hclge_vport *vport = hdev->vport;
- int i, j;
-
- for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
- for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
- vport[j].rss_indirection_tbl[i] =
- i % vport[j].alloc_rss_size;
- }
-}
-
-static void hclge_rss_init_cfg(struct hclge_dev *hdev)
+int hclge_rss_init_hw(struct hclge_dev *hdev)
{
- int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
- struct hclge_vport *vport = hdev->vport;
-
- if (hdev->pdev->revision >= 0x21)
- rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
+ u16 *rss_indir = hdev->rss_cfg.rss_indirection_tbl;
+ u8 *key = hdev->rss_cfg.rss_hash_key;
+ u8 hfunc = hdev->rss_cfg.rss_algo;
+ int ret;
- for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
- vport[i].rss_tuple_sets.ipv4_tcp_en =
- HCLGE_RSS_INPUT_TUPLE_OTHER;
- vport[i].rss_tuple_sets.ipv4_udp_en =
- HCLGE_RSS_INPUT_TUPLE_OTHER;
- vport[i].rss_tuple_sets.ipv4_sctp_en =
- HCLGE_RSS_INPUT_TUPLE_SCTP;
- vport[i].rss_tuple_sets.ipv4_fragment_en =
- HCLGE_RSS_INPUT_TUPLE_OTHER;
- vport[i].rss_tuple_sets.ipv6_tcp_en =
- HCLGE_RSS_INPUT_TUPLE_OTHER;
- vport[i].rss_tuple_sets.ipv6_udp_en =
- HCLGE_RSS_INPUT_TUPLE_OTHER;
- vport[i].rss_tuple_sets.ipv6_sctp_en =
- HCLGE_RSS_INPUT_TUPLE_SCTP;
- vport[i].rss_tuple_sets.ipv6_fragment_en =
- HCLGE_RSS_INPUT_TUPLE_OTHER;
+ ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw,
+ rss_indir);
+ if (ret)
+ return ret;
- vport[i].rss_algo = rss_algo;
+ ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw, hfunc, key);
+ if (ret)
+ return ret;
- memcpy(vport[i].rss_hash_key, hclge_hash_key,
- HCLGE_RSS_KEY_SIZE);
- }
+ ret = hclge_comm_set_rss_input_tuple(&hdev->vport[0].nic,
+ &hdev->hw.hw, true,
+ &hdev->rss_cfg);
+ if (ret)
+ return ret;
- hclge_rss_indir_init_cfg(hdev);
+ return hclge_init_rss_tc_mode(hdev);
}
int hclge_bind_ring_with_vector(struct hclge_vport *vport,
@@ -4596,14 +4968,19 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport,
struct hclge_desc desc;
struct hclge_ctrl_vector_chain_cmd *req =
(struct hclge_ctrl_vector_chain_cmd *)desc.data;
- enum hclge_cmd_status status;
+ enum hclge_comm_cmd_status status;
enum hclge_opcode_type op;
u16 tqp_type_and_id;
int i;
op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
hclge_cmd_setup_basic_desc(&desc, op, false);
- req->int_vector_id = vector_id;
+ req->int_vector_id_l = hnae3_get_field(vector_id,
+ HCLGE_VECTOR_ID_L_M,
+ HCLGE_VECTOR_ID_L_S);
+ req->int_vector_id_h = hnae3_get_field(vector_id,
+ HCLGE_VECTOR_ID_H_M,
+ HCLGE_VECTOR_ID_H_S);
i = 0;
for (node = ring_chain; node; node = node->next) {
@@ -4635,7 +5012,14 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport,
hclge_cmd_setup_basic_desc(&desc,
op,
false);
- req->int_vector_id = vector_id;
+ req->int_vector_id_l =
+ hnae3_get_field(vector_id,
+ HCLGE_VECTOR_ID_L_M,
+ HCLGE_VECTOR_ID_L_S);
+ req->int_vector_id_h =
+ hnae3_get_field(vector_id,
+ HCLGE_VECTOR_ID_H_M,
+ HCLGE_VECTOR_ID_H_S);
}
}
@@ -4696,79 +5080,362 @@ static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
return ret;
}
-static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
- struct hclge_promisc_param *param)
+static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
+ bool en_uc, bool en_mc, bool en_bc)
{
+ struct hclge_vport *vport = &hdev->vport[vf_id];
+ struct hnae3_handle *handle = &vport->nic;
struct hclge_promisc_cfg_cmd *req;
struct hclge_desc desc;
+ bool uc_tx_en = en_uc;
+ u8 promisc_cfg = 0;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
req = (struct hclge_promisc_cfg_cmd *)desc.data;
- req->vf_id = param->vf_id;
+ req->vf_id = vf_id;
- /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
- * pdev revision(0x20), new revision support them. The
- * value of this two fields will not return error when driver
- * send command to fireware in revision(0x20).
- */
- req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
- HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
+ if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
+ uc_tx_en = false;
+
+ hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
+ hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
+ hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
+ hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
+ hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
+ hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
+ req->extend_promisc = promisc_cfg;
+
+ /* to be compatible with DEVICE_VERSION_V1/2 */
+ promisc_cfg = 0;
+ hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
+ hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
+ hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
+ hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
+ hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
+ req->promisc = promisc_cfg;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
dev_err(&hdev->pdev->dev,
- "Set promisc mode fail, status is %d.\n", ret);
+ "failed to set vport %u promisc mode, ret = %d.\n",
+ vf_id, ret);
return ret;
}
-static void hclge_promisc_param_init(struct hclge_promisc_param *param,
- bool en_uc, bool en_mc, bool en_bc,
- int vport_id)
-{
- if (!param)
- return;
-
- memset(param, 0, sizeof(struct hclge_promisc_param));
- if (en_uc)
- param->enable = HCLGE_PROMISC_EN_UC;
- if (en_mc)
- param->enable |= HCLGE_PROMISC_EN_MC;
- if (en_bc)
- param->enable |= HCLGE_PROMISC_EN_BC;
- param->vf_id = vport_id;
-}
-
int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
bool en_mc_pmc, bool en_bc_pmc)
{
- struct hclge_dev *hdev = vport->back;
- struct hclge_promisc_param param;
-
- hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
- vport->vport_id);
- return hclge_cmd_set_promisc_mode(hdev, &param);
+ return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
+ en_uc_pmc, en_mc_pmc, en_bc_pmc);
}
static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
bool en_mc_pmc)
{
struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
bool en_bc_pmc = true;
- /* For revision 0x20, if broadcast promisc enabled, vlan filter is
- * always bypassed. So broadcast promisc should be disabled until
- * user enable promisc mode
+ /* For device whose version below V2, if broadcast promisc enabled,
+ * vlan filter is always bypassed. So broadcast promisc should be
+ * disabled until user enable promisc mode
*/
- if (handle->pdev->revision == 0x20)
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
en_bc_pmc);
}
+static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
+}
+
+static void hclge_sync_fd_state(struct hclge_dev *hdev)
+{
+ if (hlist_empty(&hdev->fd_rule_list))
+ hdev->fd_active_type = HCLGE_FD_RULE_NONE;
+}
+
+static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
+{
+ if (!test_bit(location, hdev->fd_bmap)) {
+ set_bit(location, hdev->fd_bmap);
+ hdev->hclge_fd_rule_num++;
+ }
+}
+
+static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
+{
+ if (test_bit(location, hdev->fd_bmap)) {
+ clear_bit(location, hdev->fd_bmap);
+ hdev->hclge_fd_rule_num--;
+ }
+}
+
+static void hclge_fd_free_node(struct hclge_dev *hdev,
+ struct hclge_fd_rule *rule)
+{
+ hlist_del(&rule->rule_node);
+ kfree(rule);
+ hclge_sync_fd_state(hdev);
+}
+
+static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
+ struct hclge_fd_rule *old_rule,
+ struct hclge_fd_rule *new_rule,
+ enum HCLGE_FD_NODE_STATE state)
+{
+ switch (state) {
+ case HCLGE_FD_TO_ADD:
+ case HCLGE_FD_ACTIVE:
+ /* 1) if the new state is TO_ADD, just replace the old rule
+ * with the same location, no matter its state, because the
+ * new rule will be configured to the hardware.
+ * 2) if the new state is ACTIVE, it means the new rule
+ * has been configured to the hardware, so just replace
+ * the old rule node with the same location.
+ * 3) for it doesn't add a new node to the list, so it's
+ * unnecessary to update the rule number and fd_bmap.
+ */
+ new_rule->rule_node.next = old_rule->rule_node.next;
+ new_rule->rule_node.pprev = old_rule->rule_node.pprev;
+ memcpy(old_rule, new_rule, sizeof(*old_rule));
+ kfree(new_rule);
+ break;
+ case HCLGE_FD_DELETED:
+ hclge_fd_dec_rule_cnt(hdev, old_rule->location);
+ hclge_fd_free_node(hdev, old_rule);
+ break;
+ case HCLGE_FD_TO_DEL:
+ /* if new request is TO_DEL, and old rule is existent
+ * 1) the state of old rule is TO_DEL, we need do nothing,
+ * because we delete rule by location, other rule content
+ * is unncessary.
+ * 2) the state of old rule is ACTIVE, we need to change its
+ * state to TO_DEL, so the rule will be deleted when periodic
+ * task being scheduled.
+ * 3) the state of old rule is TO_ADD, it means the rule hasn't
+ * been added to hardware, so we just delete the rule node from
+ * fd_rule_list directly.
+ */
+ if (old_rule->state == HCLGE_FD_TO_ADD) {
+ hclge_fd_dec_rule_cnt(hdev, old_rule->location);
+ hclge_fd_free_node(hdev, old_rule);
+ return;
+ }
+ old_rule->state = HCLGE_FD_TO_DEL;
+ break;
+ }
+}
+
+static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
+ u16 location,
+ struct hclge_fd_rule **parent)
+{
+ struct hclge_fd_rule *rule;
+ struct hlist_node *node;
+
+ hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
+ if (rule->location == location)
+ return rule;
+ else if (rule->location > location)
+ return NULL;
+ /* record the parent node, use to keep the nodes in fd_rule_list
+ * in ascend order.
+ */
+ *parent = rule;
+ }
+
+ return NULL;
+}
+
+/* insert fd rule node in ascend order according to rule->location */
+static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
+ struct hclge_fd_rule *rule,
+ struct hclge_fd_rule *parent)
+{
+ INIT_HLIST_NODE(&rule->rule_node);
+
+ if (parent)
+ hlist_add_behind(&rule->rule_node, &parent->rule_node);
+ else
+ hlist_add_head(&rule->rule_node, hlist);
+}
+
+static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
+ struct hclge_fd_user_def_cfg *cfg)
+{
+ struct hclge_fd_user_def_cfg_cmd *req;
+ struct hclge_desc desc;
+ u16 data = 0;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
+
+ req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
+
+ hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
+ hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
+ HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
+ req->ol2_cfg = cpu_to_le16(data);
+
+ data = 0;
+ hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
+ hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
+ HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
+ req->ol3_cfg = cpu_to_le16(data);
+
+ data = 0;
+ hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
+ hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
+ HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
+ req->ol4_cfg = cpu_to_le16(data);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to set fd user def data, ret= %d\n", ret);
+ return ret;
+}
+
+static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
+{
+ int ret;
+
+ if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
+ return;
+
+ if (!locked)
+ spin_lock_bh(&hdev->fd_rule_lock);
+
+ ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
+ if (ret)
+ set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
+
+ if (!locked)
+ spin_unlock_bh(&hdev->fd_rule_lock);
+}
+
+static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
+ struct hclge_fd_rule *rule)
+{
+ struct hlist_head *hlist = &hdev->fd_rule_list;
+ struct hclge_fd_rule *fd_rule, *parent = NULL;
+ struct hclge_fd_user_def_info *info, *old_info;
+ struct hclge_fd_user_def_cfg *cfg;
+
+ if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
+ rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
+ return 0;
+
+ /* for valid layer is start from 1, so need minus 1 to get the cfg */
+ cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
+ info = &rule->ep.user_def;
+
+ if (!cfg->ref_cnt || cfg->offset == info->offset)
+ return 0;
+
+ if (cfg->ref_cnt > 1)
+ goto error;
+
+ fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
+ if (fd_rule) {
+ old_info = &fd_rule->ep.user_def;
+ if (info->layer == old_info->layer)
+ return 0;
+ }
+
+error:
+ dev_err(&hdev->pdev->dev,
+ "No available offset for layer%d fd rule, each layer only support one user def offset.\n",
+ info->layer + 1);
+ return -ENOSPC;
+}
+
+static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
+ struct hclge_fd_rule *rule)
+{
+ struct hclge_fd_user_def_cfg *cfg;
+
+ if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
+ rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
+ return;
+
+ cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
+ if (!cfg->ref_cnt) {
+ cfg->offset = rule->ep.user_def.offset;
+ set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
+ }
+ cfg->ref_cnt++;
+}
+
+static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
+ struct hclge_fd_rule *rule)
+{
+ struct hclge_fd_user_def_cfg *cfg;
+
+ if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
+ rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
+ return;
+
+ cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
+ if (!cfg->ref_cnt)
+ return;
+
+ cfg->ref_cnt--;
+ if (!cfg->ref_cnt) {
+ cfg->offset = 0;
+ set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
+ }
+}
+
+static void hclge_update_fd_list(struct hclge_dev *hdev,
+ enum HCLGE_FD_NODE_STATE state, u16 location,
+ struct hclge_fd_rule *new_rule)
+{
+ struct hlist_head *hlist = &hdev->fd_rule_list;
+ struct hclge_fd_rule *fd_rule, *parent = NULL;
+
+ fd_rule = hclge_find_fd_rule(hlist, location, &parent);
+ if (fd_rule) {
+ hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
+ if (state == HCLGE_FD_ACTIVE)
+ hclge_fd_inc_user_def_refcnt(hdev, new_rule);
+ hclge_sync_fd_user_def_cfg(hdev, true);
+
+ hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
+ return;
+ }
+
+ /* it's unlikely to fail here, because we have checked the rule
+ * exist before.
+ */
+ if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
+ dev_warn(&hdev->pdev->dev,
+ "failed to delete fd rule %u, it's inexistent\n",
+ location);
+ return;
+ }
+
+ hclge_fd_inc_user_def_refcnt(hdev, new_rule);
+ hclge_sync_fd_user_def_cfg(hdev, true);
+
+ hclge_fd_insert_rule_node(hlist, new_rule, parent);
+ hclge_fd_inc_rule_cnt(hdev, new_rule->location);
+
+ if (state == HCLGE_FD_TO_ADD) {
+ set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
+ hclge_task_schedule(hdev, 0);
+ }
+}
+
static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
{
struct hclge_get_fd_mode_cmd *req;
@@ -4819,7 +5486,8 @@ static int hclge_get_fd_allocation(struct hclge_dev *hdev,
return ret;
}
-static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
+static int hclge_set_fd_key_config(struct hclge_dev *hdev,
+ enum HCLGE_FD_STAGE stage_num)
{
struct hclge_set_fd_key_config_cmd *req;
struct hclge_fd_key_cfg *stage;
@@ -4846,13 +5514,24 @@ static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
return ret;
}
+static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
+{
+ struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
+
+ spin_lock_bh(&hdev->fd_rule_lock);
+ memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ hclge_fd_set_user_def_cmd(hdev, cfg);
+}
+
static int hclge_init_fd_config(struct hclge_dev *hdev)
{
#define LOW_2_WORDS 0x03
struct hclge_fd_key_cfg *key_cfg;
int ret;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return 0;
ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
@@ -4873,11 +5552,8 @@ static int hclge_init_fd_config(struct hclge_dev *hdev)
return -EOPNOTSUPP;
}
- hdev->fd_cfg.proto_support =
- TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
- UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
- key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
+ key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
key_cfg->outer_sipv6_word_en = 0;
@@ -4889,10 +5565,11 @@ static int hclge_init_fd_config(struct hclge_dev *hdev)
BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
/* If use max 400bit key, we can support tuples for ether type */
- if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
- hdev->fd_cfg.proto_support |= ETHER_FLOW;
+ if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
key_cfg->tuple_active |=
BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
+ key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
}
/* roce_type is used to filter roce frames
@@ -4921,9 +5598,9 @@ static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
int ret;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
- desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
@@ -4956,6 +5633,7 @@ static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
struct hclge_fd_ad_data *action)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
struct hclge_fd_ad_config_cmd *req;
struct hclge_desc desc;
u64 ad_data = 0;
@@ -4971,6 +5649,12 @@ static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
action->write_rule_id_to_bd);
hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
action->rule_id);
+ if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
+ action->override_tc);
+ hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
+ HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
+ }
ad_data <<= 32;
hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
@@ -4995,98 +5679,57 @@ static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
struct hclge_fd_rule *rule)
{
+ int offset, moffset, ip_offset;
+ enum HCLGE_FD_KEY_OPT key_opt;
u16 tmp_x_s, tmp_y_s;
u32 tmp_x_l, tmp_y_l;
+ u8 *p = (u8 *)rule;
int i;
- if (rule->unused_tuple & tuple_bit)
+ if (rule->unused_tuple & BIT(tuple_bit))
return true;
- switch (tuple_bit) {
- case 0:
- return false;
- case BIT(INNER_DST_MAC):
- for (i = 0; i < ETH_ALEN; i++) {
- calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
- rule->tuples_mask.dst_mac[i]);
- calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
- rule->tuples_mask.dst_mac[i]);
- }
+ key_opt = tuple_key_info[tuple_bit].key_opt;
+ offset = tuple_key_info[tuple_bit].offset;
+ moffset = tuple_key_info[tuple_bit].moffset;
- return true;
- case BIT(INNER_SRC_MAC):
- for (i = 0; i < ETH_ALEN; i++) {
- calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
- rule->tuples.src_mac[i]);
- calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
- rule->tuples.src_mac[i]);
- }
+ switch (key_opt) {
+ case KEY_OPT_U8:
+ calc_x(*key_x, p[offset], p[moffset]);
+ calc_y(*key_y, p[offset], p[moffset]);
return true;
- case BIT(INNER_VLAN_TAG_FST):
- calc_x(tmp_x_s, rule->tuples.vlan_tag1,
- rule->tuples_mask.vlan_tag1);
- calc_y(tmp_y_s, rule->tuples.vlan_tag1,
- rule->tuples_mask.vlan_tag1);
+ case KEY_OPT_LE16:
+ calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
+ calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
return true;
- case BIT(INNER_ETH_TYPE):
- calc_x(tmp_x_s, rule->tuples.ether_proto,
- rule->tuples_mask.ether_proto);
- calc_y(tmp_y_s, rule->tuples.ether_proto,
- rule->tuples_mask.ether_proto);
- *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
- *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
-
- return true;
- case BIT(INNER_IP_TOS):
- calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
- calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
-
- return true;
- case BIT(INNER_IP_PROTO):
- calc_x(*key_x, rule->tuples.ip_proto,
- rule->tuples_mask.ip_proto);
- calc_y(*key_y, rule->tuples.ip_proto,
- rule->tuples_mask.ip_proto);
-
- return true;
- case BIT(INNER_SRC_IP):
- calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
- rule->tuples_mask.src_ip[IPV4_INDEX]);
- calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
- rule->tuples_mask.src_ip[IPV4_INDEX]);
+ case KEY_OPT_LE32:
+ calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
+ calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
return true;
- case BIT(INNER_DST_IP):
- calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
- rule->tuples_mask.dst_ip[IPV4_INDEX]);
- calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
- rule->tuples_mask.dst_ip[IPV4_INDEX]);
- *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
- *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
-
- return true;
- case BIT(INNER_SRC_PORT):
- calc_x(tmp_x_s, rule->tuples.src_port,
- rule->tuples_mask.src_port);
- calc_y(tmp_y_s, rule->tuples.src_port,
- rule->tuples_mask.src_port);
- *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
- *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
+ case KEY_OPT_MAC:
+ for (i = 0; i < ETH_ALEN; i++) {
+ calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
+ p[moffset + i]);
+ calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
+ p[moffset + i]);
+ }
return true;
- case BIT(INNER_DST_PORT):
- calc_x(tmp_x_s, rule->tuples.dst_port,
- rule->tuples_mask.dst_port);
- calc_y(tmp_y_s, rule->tuples.dst_port,
- rule->tuples_mask.dst_port);
- *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
- *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
+ case KEY_OPT_IP:
+ ip_offset = IPV4_INDEX * sizeof(u32);
+ calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
+ *(u32 *)(&p[moffset + ip_offset]));
+ calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
+ *(u32 *)(&p[moffset + ip_offset]));
+ *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
+ *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
return true;
default:
@@ -5162,23 +5805,24 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
u8 *cur_key_x, *cur_key_y;
- unsigned int i;
- int ret, tuple_size;
u8 meta_data_region;
+ u8 tuple_size;
+ int ret;
+ u32 i;
memset(key_x, 0, sizeof(key_x));
memset(key_y, 0, sizeof(key_y));
cur_key_x = key_x;
cur_key_y = key_y;
- for (i = 0 ; i < MAX_TUPLE; i++) {
+ for (i = 0; i < MAX_TUPLE; i++) {
bool tuple_valid;
- u32 check_tuple;
tuple_size = tuple_key_info[i].key_length / 8;
- check_tuple = key_cfg->tuple_active & BIT(i);
+ if (!(key_cfg->tuple_active & BIT(i)))
+ continue;
- tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
+ tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
cur_key_y, rule);
if (tuple_valid) {
cur_key_x += tuple_size;
@@ -5215,22 +5859,34 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
struct hclge_fd_rule *rule)
{
+ struct hclge_vport *vport = hdev->vport;
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
struct hclge_fd_ad_data ad_data;
+ memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
ad_data.ad_id = rule->location;
if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
ad_data.drop_packet = true;
- ad_data.forward_to_direct_queue = false;
- ad_data.queue_id = 0;
+ } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
+ ad_data.override_tc = true;
+ ad_data.queue_id =
+ kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
+ ad_data.tc_size =
+ ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
} else {
- ad_data.drop_packet = false;
ad_data.forward_to_direct_queue = true;
ad_data.queue_id = rule->queue_id;
}
- ad_data.use_counter = false;
- ad_data.counter_id = 0;
+ if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) {
+ ad_data.use_counter = true;
+ ad_data.counter_id = rule->vf_id %
+ hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1];
+ } else {
+ ad_data.use_counter = false;
+ ad_data.counter_id = 0;
+ }
ad_data.use_next_stage = false;
ad_data.next_input_key = 0;
@@ -5241,383 +5897,525 @@ static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
}
-static int hclge_fd_check_spec(struct hclge_dev *hdev,
- struct ethtool_rx_flow_spec *fs, u32 *unused)
+static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
+ u32 *unused_tuple)
{
- struct ethtool_tcpip4_spec *tcp_ip4_spec;
- struct ethtool_usrip4_spec *usr_ip4_spec;
- struct ethtool_tcpip6_spec *tcp_ip6_spec;
- struct ethtool_usrip6_spec *usr_ip6_spec;
- struct ethhdr *ether_spec;
-
- if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
+ if (!spec || !unused_tuple)
return -EINVAL;
- if (!(fs->flow_type & hdev->fd_cfg.proto_support))
- return -EOPNOTSUPP;
-
- if ((fs->flow_type & FLOW_EXT) &&
- (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
- dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
- return -EOPNOTSUPP;
- }
+ *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
- switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
- case SCTP_V4_FLOW:
- case TCP_V4_FLOW:
- case UDP_V4_FLOW:
- tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
- *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
+ if (!spec->ip4src)
+ *unused_tuple |= BIT(INNER_SRC_IP);
- if (!tcp_ip4_spec->ip4src)
- *unused |= BIT(INNER_SRC_IP);
+ if (!spec->ip4dst)
+ *unused_tuple |= BIT(INNER_DST_IP);
- if (!tcp_ip4_spec->ip4dst)
- *unused |= BIT(INNER_DST_IP);
+ if (!spec->psrc)
+ *unused_tuple |= BIT(INNER_SRC_PORT);
- if (!tcp_ip4_spec->psrc)
- *unused |= BIT(INNER_SRC_PORT);
+ if (!spec->pdst)
+ *unused_tuple |= BIT(INNER_DST_PORT);
- if (!tcp_ip4_spec->pdst)
- *unused |= BIT(INNER_DST_PORT);
+ if (!spec->tos)
+ *unused_tuple |= BIT(INNER_IP_TOS);
- if (!tcp_ip4_spec->tos)
- *unused |= BIT(INNER_IP_TOS);
+ return 0;
+}
- break;
- case IP_USER_FLOW:
- usr_ip4_spec = &fs->h_u.usr_ip4_spec;
- *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
- BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
+static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
+ u32 *unused_tuple)
+{
+ if (!spec || !unused_tuple)
+ return -EINVAL;
- if (!usr_ip4_spec->ip4src)
- *unused |= BIT(INNER_SRC_IP);
+ *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
+ BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
- if (!usr_ip4_spec->ip4dst)
- *unused |= BIT(INNER_DST_IP);
+ if (!spec->ip4src)
+ *unused_tuple |= BIT(INNER_SRC_IP);
- if (!usr_ip4_spec->tos)
- *unused |= BIT(INNER_IP_TOS);
+ if (!spec->ip4dst)
+ *unused_tuple |= BIT(INNER_DST_IP);
- if (!usr_ip4_spec->proto)
- *unused |= BIT(INNER_IP_PROTO);
+ if (!spec->tos)
+ *unused_tuple |= BIT(INNER_IP_TOS);
- if (usr_ip4_spec->l4_4_bytes)
- return -EOPNOTSUPP;
+ if (!spec->proto)
+ *unused_tuple |= BIT(INNER_IP_PROTO);
- if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
- return -EOPNOTSUPP;
+ if (spec->l4_4_bytes)
+ return -EOPNOTSUPP;
- break;
- case SCTP_V6_FLOW:
- case TCP_V6_FLOW:
- case UDP_V6_FLOW:
- tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
- *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
- BIT(INNER_IP_TOS);
+ if (spec->ip_ver != ETH_RX_NFC_IP4)
+ return -EOPNOTSUPP;
- /* check whether src/dst ip address used */
- if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
- !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
- *unused |= BIT(INNER_SRC_IP);
+ return 0;
+}
- if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
- !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
- *unused |= BIT(INNER_DST_IP);
+static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
+ u32 *unused_tuple)
+{
+ if (!spec || !unused_tuple)
+ return -EINVAL;
- if (!tcp_ip6_spec->psrc)
- *unused |= BIT(INNER_SRC_PORT);
+ *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
- if (!tcp_ip6_spec->pdst)
- *unused |= BIT(INNER_DST_PORT);
+ /* check whether src/dst ip address used */
+ if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
+ *unused_tuple |= BIT(INNER_SRC_IP);
- if (tcp_ip6_spec->tclass)
- return -EOPNOTSUPP;
+ if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
+ *unused_tuple |= BIT(INNER_DST_IP);
- break;
- case IPV6_USER_FLOW:
- usr_ip6_spec = &fs->h_u.usr_ip6_spec;
- *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
- BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
- BIT(INNER_DST_PORT);
+ if (!spec->psrc)
+ *unused_tuple |= BIT(INNER_SRC_PORT);
- /* check whether src/dst ip address used */
- if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
- !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
- *unused |= BIT(INNER_SRC_IP);
+ if (!spec->pdst)
+ *unused_tuple |= BIT(INNER_DST_PORT);
- if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
- !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
- *unused |= BIT(INNER_DST_IP);
+ if (!spec->tclass)
+ *unused_tuple |= BIT(INNER_IP_TOS);
- if (!usr_ip6_spec->l4_proto)
- *unused |= BIT(INNER_IP_PROTO);
+ return 0;
+}
- if (usr_ip6_spec->tclass)
- return -EOPNOTSUPP;
+static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
+ u32 *unused_tuple)
+{
+ if (!spec || !unused_tuple)
+ return -EINVAL;
- if (usr_ip6_spec->l4_4_bytes)
- return -EOPNOTSUPP;
+ *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
+ BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
- break;
- case ETHER_FLOW:
- ether_spec = &fs->h_u.ether_spec;
- *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
- BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
- BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
+ /* check whether src/dst ip address used */
+ if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
+ *unused_tuple |= BIT(INNER_SRC_IP);
- if (is_zero_ether_addr(ether_spec->h_source))
- *unused |= BIT(INNER_SRC_MAC);
+ if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
+ *unused_tuple |= BIT(INNER_DST_IP);
- if (is_zero_ether_addr(ether_spec->h_dest))
- *unused |= BIT(INNER_DST_MAC);
+ if (!spec->l4_proto)
+ *unused_tuple |= BIT(INNER_IP_PROTO);
- if (!ether_spec->h_proto)
- *unused |= BIT(INNER_ETH_TYPE);
+ if (!spec->tclass)
+ *unused_tuple |= BIT(INNER_IP_TOS);
- break;
- default:
+ if (spec->l4_4_bytes)
return -EOPNOTSUPP;
- }
- if ((fs->flow_type & FLOW_EXT)) {
- if (fs->h_ext.vlan_etype)
+ return 0;
+}
+
+static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
+{
+ if (!spec || !unused_tuple)
+ return -EINVAL;
+
+ *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
+ BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
+ BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
+
+ if (is_zero_ether_addr(spec->h_source))
+ *unused_tuple |= BIT(INNER_SRC_MAC);
+
+ if (is_zero_ether_addr(spec->h_dest))
+ *unused_tuple |= BIT(INNER_DST_MAC);
+
+ if (!spec->h_proto)
+ *unused_tuple |= BIT(INNER_ETH_TYPE);
+
+ return 0;
+}
+
+static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
+ struct ethtool_rx_flow_spec *fs,
+ u32 *unused_tuple)
+{
+ if (fs->flow_type & FLOW_EXT) {
+ if (fs->h_ext.vlan_etype) {
+ dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
return -EOPNOTSUPP;
+ }
+
if (!fs->h_ext.vlan_tci)
- *unused |= BIT(INNER_VLAN_TAG_FST);
+ *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
- if (fs->m_ext.vlan_tci) {
- if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
- return -EINVAL;
+ if (fs->m_ext.vlan_tci &&
+ be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
+ dev_err(&hdev->pdev->dev,
+ "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
+ ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
+ return -EINVAL;
}
} else {
- *unused |= BIT(INNER_VLAN_TAG_FST);
+ *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
}
if (fs->flow_type & FLOW_MAC_EXT) {
- if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
+ if (hdev->fd_cfg.fd_mode !=
+ HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
+ dev_err(&hdev->pdev->dev,
+ "FLOW_MAC_EXT is not supported in current fd mode!\n");
return -EOPNOTSUPP;
+ }
if (is_zero_ether_addr(fs->h_ext.h_dest))
- *unused |= BIT(INNER_DST_MAC);
+ *unused_tuple |= BIT(INNER_DST_MAC);
else
- *unused &= ~(BIT(INNER_DST_MAC));
+ *unused_tuple &= ~BIT(INNER_DST_MAC);
}
return 0;
}
-static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
+static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
+ struct hclge_fd_user_def_info *info)
{
- struct hclge_fd_rule *rule = NULL;
- struct hlist_node *node2;
-
- spin_lock_bh(&hdev->fd_rule_lock);
- hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
- if (rule->location >= location)
- break;
+ switch (flow_type) {
+ case ETHER_FLOW:
+ info->layer = HCLGE_FD_USER_DEF_L2;
+ *unused_tuple &= ~BIT(INNER_L2_RSV);
+ break;
+ case IP_USER_FLOW:
+ case IPV6_USER_FLOW:
+ info->layer = HCLGE_FD_USER_DEF_L3;
+ *unused_tuple &= ~BIT(INNER_L3_RSV);
+ break;
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ info->layer = HCLGE_FD_USER_DEF_L4;
+ *unused_tuple &= ~BIT(INNER_L4_RSV);
+ break;
+ default:
+ return -EOPNOTSUPP;
}
- spin_unlock_bh(&hdev->fd_rule_lock);
+ return 0;
+}
- return rule && rule->location == location;
+static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
+{
+ return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
}
-/* make sure being called after lock up with fd_rule_lock */
-static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
- struct hclge_fd_rule *new_rule,
- u16 location,
- bool is_add)
+static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
+ struct ethtool_rx_flow_spec *fs,
+ u32 *unused_tuple,
+ struct hclge_fd_user_def_info *info)
{
- struct hclge_fd_rule *rule = NULL, *parent = NULL;
- struct hlist_node *node2;
+ u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
+ u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
+ u16 data, offset, data_mask, offset_mask;
+ int ret;
- if (is_add && !new_rule)
- return -EINVAL;
+ info->layer = HCLGE_FD_USER_DEF_NONE;
+ *unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
- hlist_for_each_entry_safe(rule, node2,
- &hdev->fd_rule_list, rule_node) {
- if (rule->location >= location)
- break;
- parent = rule;
- }
+ if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
+ return 0;
- if (rule && rule->location == location) {
- hlist_del(&rule->rule_node);
- kfree(rule);
- hdev->hclge_fd_rule_num--;
+ /* user-def data from ethtool is 64 bit value, the bit0~15 is used
+ * for data, and bit32~47 is used for offset.
+ */
+ data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
+ data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
+ offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
+ offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
- if (!is_add) {
- if (!hdev->hclge_fd_rule_num)
- hdev->fd_active_type = HCLGE_FD_RULE_NONE;
- clear_bit(location, hdev->fd_bmap);
+ if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
+ dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
+ return -EOPNOTSUPP;
+ }
- return 0;
- }
- } else if (!is_add) {
+ if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
dev_err(&hdev->pdev->dev,
- "delete fail, rule %u is inexistent\n",
- location);
+ "user-def offset[%u] should be no more than %u\n",
+ offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
return -EINVAL;
}
- INIT_HLIST_NODE(&new_rule->rule_node);
+ if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
+ dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
+ return -EINVAL;
+ }
- if (parent)
- hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
- else
- hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
+ ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "unsupported flow type for user-def bytes, ret = %d\n",
+ ret);
+ return ret;
+ }
- set_bit(location, hdev->fd_bmap);
- hdev->hclge_fd_rule_num++;
- hdev->fd_active_type = new_rule->rule_type;
+ info->data = data;
+ info->data_mask = data_mask;
+ info->offset = offset;
return 0;
}
-static int hclge_fd_get_tuple(struct hclge_dev *hdev,
- struct ethtool_rx_flow_spec *fs,
- struct hclge_fd_rule *rule)
+static int hclge_fd_check_spec(struct hclge_dev *hdev,
+ struct ethtool_rx_flow_spec *fs,
+ u32 *unused_tuple,
+ struct hclge_fd_user_def_info *info)
{
- u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
+ u32 flow_type;
+ int ret;
+
+ if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
+ dev_err(&hdev->pdev->dev,
+ "failed to config fd rules, invalid rule location: %u, max is %u\n.",
+ fs->location,
+ hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
+ return -EINVAL;
+ }
+
+ ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
+ if (ret)
+ return ret;
+ flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
switch (flow_type) {
case SCTP_V4_FLOW:
case TCP_V4_FLOW:
case UDP_V4_FLOW:
- rule->tuples.src_ip[IPV4_INDEX] =
- be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
- rule->tuples_mask.src_ip[IPV4_INDEX] =
- be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
+ ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
+ unused_tuple);
+ break;
+ case IP_USER_FLOW:
+ ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
+ unused_tuple);
+ break;
+ case SCTP_V6_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
+ unused_tuple);
+ break;
+ case IPV6_USER_FLOW:
+ ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
+ unused_tuple);
+ break;
+ case ETHER_FLOW:
+ if (hdev->fd_cfg.fd_mode !=
+ HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
+ dev_err(&hdev->pdev->dev,
+ "ETHER_FLOW is not supported in current fd mode!\n");
+ return -EOPNOTSUPP;
+ }
- rule->tuples.dst_ip[IPV4_INDEX] =
- be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
- rule->tuples_mask.dst_ip[IPV4_INDEX] =
- be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
+ ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
+ unused_tuple);
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "unsupported protocol type, protocol type = %#x\n",
+ flow_type);
+ return -EOPNOTSUPP;
+ }
- rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
- rule->tuples_mask.src_port =
- be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to check flow union tuple, ret = %d\n",
+ ret);
+ return ret;
+ }
- rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
- rule->tuples_mask.dst_port =
- be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
+ return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
+}
- rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
- rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
+static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
+ struct ethtool_rx_flow_spec *fs,
+ struct hclge_fd_rule *rule, u8 ip_proto)
+{
+ rule->tuples.src_ip[IPV4_INDEX] =
+ be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
+ rule->tuples_mask.src_ip[IPV4_INDEX] =
+ be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
- rule->tuples.ether_proto = ETH_P_IP;
- rule->tuples_mask.ether_proto = 0xFFFF;
+ rule->tuples.dst_ip[IPV4_INDEX] =
+ be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
+ rule->tuples_mask.dst_ip[IPV4_INDEX] =
+ be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
- break;
- case IP_USER_FLOW:
- rule->tuples.src_ip[IPV4_INDEX] =
- be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
- rule->tuples_mask.src_ip[IPV4_INDEX] =
- be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
+ rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
+ rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
- rule->tuples.dst_ip[IPV4_INDEX] =
- be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
- rule->tuples_mask.dst_ip[IPV4_INDEX] =
- be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
+ rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
+ rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
- rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
- rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
+ rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
+ rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
- rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
- rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
+ rule->tuples.ether_proto = ETH_P_IP;
+ rule->tuples_mask.ether_proto = 0xFFFF;
- rule->tuples.ether_proto = ETH_P_IP;
- rule->tuples_mask.ether_proto = 0xFFFF;
+ rule->tuples.ip_proto = ip_proto;
+ rule->tuples_mask.ip_proto = 0xFF;
+}
- break;
- case SCTP_V6_FLOW:
- case TCP_V6_FLOW:
- case UDP_V6_FLOW:
- be32_to_cpu_array(rule->tuples.src_ip,
- fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
- be32_to_cpu_array(rule->tuples_mask.src_ip,
- fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
+static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
+ struct ethtool_rx_flow_spec *fs,
+ struct hclge_fd_rule *rule)
+{
+ rule->tuples.src_ip[IPV4_INDEX] =
+ be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
+ rule->tuples_mask.src_ip[IPV4_INDEX] =
+ be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
- be32_to_cpu_array(rule->tuples.dst_ip,
- fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
- be32_to_cpu_array(rule->tuples_mask.dst_ip,
- fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
+ rule->tuples.dst_ip[IPV4_INDEX] =
+ be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
+ rule->tuples_mask.dst_ip[IPV4_INDEX] =
+ be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
- rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
- rule->tuples_mask.src_port =
- be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
+ rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
+ rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
- rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
- rule->tuples_mask.dst_port =
- be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
+ rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
+ rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
- rule->tuples.ether_proto = ETH_P_IPV6;
- rule->tuples_mask.ether_proto = 0xFFFF;
+ rule->tuples.ether_proto = ETH_P_IP;
+ rule->tuples_mask.ether_proto = 0xFFFF;
+}
- break;
- case IPV6_USER_FLOW:
- be32_to_cpu_array(rule->tuples.src_ip,
- fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
- be32_to_cpu_array(rule->tuples_mask.src_ip,
- fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
+static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
+ struct ethtool_rx_flow_spec *fs,
+ struct hclge_fd_rule *rule, u8 ip_proto)
+{
+ be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
+ IPV6_SIZE);
+ be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
+ IPV6_SIZE);
- be32_to_cpu_array(rule->tuples.dst_ip,
- fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
- be32_to_cpu_array(rule->tuples_mask.dst_ip,
- fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
+ be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
+ IPV6_SIZE);
+ be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
+ IPV6_SIZE);
- rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
- rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
+ rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
+ rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
- rule->tuples.ether_proto = ETH_P_IPV6;
- rule->tuples_mask.ether_proto = 0xFFFF;
+ rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
+ rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
- break;
- case ETHER_FLOW:
- ether_addr_copy(rule->tuples.src_mac,
- fs->h_u.ether_spec.h_source);
- ether_addr_copy(rule->tuples_mask.src_mac,
- fs->m_u.ether_spec.h_source);
+ rule->tuples.ether_proto = ETH_P_IPV6;
+ rule->tuples_mask.ether_proto = 0xFFFF;
- ether_addr_copy(rule->tuples.dst_mac,
- fs->h_u.ether_spec.h_dest);
- ether_addr_copy(rule->tuples_mask.dst_mac,
- fs->m_u.ether_spec.h_dest);
+ rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
+ rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
- rule->tuples.ether_proto =
- be16_to_cpu(fs->h_u.ether_spec.h_proto);
- rule->tuples_mask.ether_proto =
- be16_to_cpu(fs->m_u.ether_spec.h_proto);
+ rule->tuples.ip_proto = ip_proto;
+ rule->tuples_mask.ip_proto = 0xFF;
+}
+
+static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
+ struct ethtool_rx_flow_spec *fs,
+ struct hclge_fd_rule *rule)
+{
+ be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
+ IPV6_SIZE);
+ be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
+ IPV6_SIZE);
+
+ be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
+ IPV6_SIZE);
+ be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
+ IPV6_SIZE);
+
+ rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
+ rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
+ rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
+ rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
+
+ rule->tuples.ether_proto = ETH_P_IPV6;
+ rule->tuples_mask.ether_proto = 0xFFFF;
+}
+
+static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
+ struct ethtool_rx_flow_spec *fs,
+ struct hclge_fd_rule *rule)
+{
+ ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
+ ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
+
+ ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
+ ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
+
+ rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
+ rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
+}
+
+static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
+ struct hclge_fd_rule *rule)
+{
+ switch (info->layer) {
+ case HCLGE_FD_USER_DEF_L2:
+ rule->tuples.l2_user_def = info->data;
+ rule->tuples_mask.l2_user_def = info->data_mask;
+ break;
+ case HCLGE_FD_USER_DEF_L3:
+ rule->tuples.l3_user_def = info->data;
+ rule->tuples_mask.l3_user_def = info->data_mask;
+ break;
+ case HCLGE_FD_USER_DEF_L4:
+ rule->tuples.l4_user_def = (u32)info->data << 16;
+ rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
break;
default:
- return -EOPNOTSUPP;
+ break;
}
+ rule->ep.user_def = *info;
+}
+
+static int hclge_fd_get_tuple(struct hclge_dev *hdev,
+ struct ethtool_rx_flow_spec *fs,
+ struct hclge_fd_rule *rule,
+ struct hclge_fd_user_def_info *info)
+{
+ u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
+
switch (flow_type) {
case SCTP_V4_FLOW:
- case SCTP_V6_FLOW:
- rule->tuples.ip_proto = IPPROTO_SCTP;
- rule->tuples_mask.ip_proto = 0xFF;
+ hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
break;
case TCP_V4_FLOW:
- case TCP_V6_FLOW:
- rule->tuples.ip_proto = IPPROTO_TCP;
- rule->tuples_mask.ip_proto = 0xFF;
+ hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
break;
case UDP_V4_FLOW:
+ hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
+ break;
+ case IP_USER_FLOW:
+ hclge_fd_get_ip4_tuple(hdev, fs, rule);
+ break;
+ case SCTP_V6_FLOW:
+ hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
+ break;
+ case TCP_V6_FLOW:
+ hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
+ break;
case UDP_V6_FLOW:
- rule->tuples.ip_proto = IPPROTO_UDP;
- rule->tuples_mask.ip_proto = 0xFF;
+ hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
break;
- default:
+ case IPV6_USER_FLOW:
+ hclge_fd_get_ip6_tuple(hdev, fs, rule);
+ break;
+ case ETHER_FLOW:
+ hclge_fd_get_ether_tuple(hdev, fs, rule);
break;
+ default:
+ return -EOPNOTSUPP;
}
- if ((fs->flow_type & FLOW_EXT)) {
+ if (fs->flow_type & FLOW_EXT) {
rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
+ hclge_fd_get_user_def_tuple(info, rule);
}
if (fs->flow_type & FLOW_MAC_EXT) {
@@ -5628,105 +6426,150 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev,
return 0;
}
-/* make sure being called after lock up with fd_rule_lock */
static int hclge_fd_config_rule(struct hclge_dev *hdev,
struct hclge_fd_rule *rule)
{
int ret;
- if (!rule) {
+ ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
+ if (ret)
+ return ret;
+
+ return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
+}
+
+static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
+ struct hclge_fd_rule *rule)
+{
+ int ret;
+
+ spin_lock_bh(&hdev->fd_rule_lock);
+
+ if (hdev->fd_active_type != rule->rule_type &&
+ (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
+ hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
dev_err(&hdev->pdev->dev,
- "The flow director rule is NULL\n");
+ "mode conflict(new type %d, active type %d), please delete existent rules first\n",
+ rule->rule_type, hdev->fd_active_type);
+ spin_unlock_bh(&hdev->fd_rule_lock);
return -EINVAL;
}
- /* it will never fail here, so needn't to check return value */
- hclge_fd_update_rule_list(hdev, rule, rule->location, true);
+ ret = hclge_fd_check_user_def_refcnt(hdev, rule);
+ if (ret)
+ goto out;
- ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
+ ret = hclge_clear_arfs_rules(hdev);
if (ret)
- goto clear_rule;
+ goto out;
- ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
+ ret = hclge_fd_config_rule(hdev, rule);
if (ret)
- goto clear_rule;
+ goto out;
- return 0;
+ rule->state = HCLGE_FD_ACTIVE;
+ hdev->fd_active_type = rule->rule_type;
+ hclge_update_fd_list(hdev, rule->state, rule->location, rule);
-clear_rule:
- hclge_fd_update_rule_list(hdev, rule, rule->location, false);
+out:
+ spin_unlock_bh(&hdev->fd_rule_lock);
return ret;
}
-static int hclge_add_fd_entry(struct hnae3_handle *handle,
- struct ethtool_rxnfc *cmd)
+static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- u16 dst_vport_id = 0, q_index = 0;
- struct ethtool_rx_flow_spec *fs;
- struct hclge_fd_rule *rule;
- u32 unused = 0;
- u8 action;
- int ret;
-
- if (!hnae3_dev_fd_supported(hdev))
- return -EOPNOTSUPP;
-
- if (!hdev->fd_en) {
- dev_warn(&hdev->pdev->dev,
- "Please enable flow director first\n");
- return -EOPNOTSUPP;
- }
- fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
+ return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
+}
- ret = hclge_fd_check_spec(hdev, fs, &unused);
- if (ret) {
- dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
- return ret;
- }
+static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
+ u16 *vport_id, u8 *action, u16 *queue_id)
+{
+ struct hclge_vport *vport = hdev->vport;
- if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
- action = HCLGE_FD_ACTION_DROP_PACKET;
+ if (ring_cookie == RX_CLS_FLOW_DISC) {
+ *action = HCLGE_FD_ACTION_DROP_PACKET;
} else {
- u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
- u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
+ u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
+ u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
u16 tqps;
+ /* To keep consistent with user's configuration, minus 1 when
+ * printing 'vf', because vf id from ethtool is added 1 for vf.
+ */
if (vf > hdev->num_req_vfs) {
dev_err(&hdev->pdev->dev,
- "Error: vf id (%u) > max vf num (%u)\n",
- vf, hdev->num_req_vfs);
+ "Error: vf id (%u) should be less than %u\n",
+ vf - 1U, hdev->num_req_vfs);
return -EINVAL;
}
- dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
- tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
+ *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
+ tqps = hdev->vport[vf].nic.kinfo.num_tqps;
if (ring >= tqps) {
dev_err(&hdev->pdev->dev,
"Error: queue id (%u) > max tqp num (%u)\n",
- ring, tqps - 1);
+ ring, tqps - 1U);
return -EINVAL;
}
- action = HCLGE_FD_ACTION_ACCEPT_PACKET;
- q_index = ring;
+ *action = HCLGE_FD_ACTION_SELECT_QUEUE;
+ *queue_id = ring;
+ }
+
+ return 0;
+}
+
+static int hclge_add_fd_entry(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_fd_user_def_info info;
+ u16 dst_vport_id = 0, q_index = 0;
+ struct ethtool_rx_flow_spec *fs;
+ struct hclge_fd_rule *rule;
+ u32 unused = 0;
+ u8 action;
+ int ret;
+
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
+ dev_err(&hdev->pdev->dev,
+ "flow table director is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!hdev->fd_en) {
+ dev_err(&hdev->pdev->dev,
+ "please enable flow director first\n");
+ return -EOPNOTSUPP;
}
+ fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
+ if (ret)
+ return ret;
+
+ ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
+ &action, &q_index);
+ if (ret)
+ return ret;
+
rule = kzalloc(sizeof(*rule), GFP_KERNEL);
if (!rule)
return -ENOMEM;
- ret = hclge_fd_get_tuple(hdev, fs, rule);
+ ret = hclge_fd_get_tuple(hdev, fs, rule, &info);
if (ret) {
kfree(rule);
return ret;
}
rule->flow_type = fs->flow_type;
-
rule->location = fs->location;
rule->unused_tuple = unused;
rule->vf_id = dst_vport_id;
@@ -5734,15 +6577,9 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
rule->action = action;
rule->rule_type = HCLGE_FD_EP_ACTIVE;
- /* to avoid rule conflict, when user configure rule by ethtool,
- * we need to clear all arfs rules
- */
- hclge_clear_arfs_rules(handle);
-
- spin_lock_bh(&hdev->fd_rule_lock);
- ret = hclge_fd_config_rule(hdev, rule);
-
- spin_unlock_bh(&hdev->fd_rule_lock);
+ ret = hclge_add_fd_entry_common(hdev, rule);
+ if (ret)
+ kfree(rule);
return ret;
}
@@ -5755,7 +6592,7 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle,
struct ethtool_rx_flow_spec *fs;
int ret;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
@@ -5763,38 +6600,36 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle,
if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
return -EINVAL;
- if (!hclge_fd_rule_exist(hdev, fs->location)) {
+ spin_lock_bh(&hdev->fd_rule_lock);
+ if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
+ !test_bit(fs->location, hdev->fd_bmap)) {
dev_err(&hdev->pdev->dev,
"Delete fail, rule %u is inexistent\n", fs->location);
+ spin_unlock_bh(&hdev->fd_rule_lock);
return -ENOENT;
}
ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
NULL, false);
if (ret)
- return ret;
+ goto out;
- spin_lock_bh(&hdev->fd_rule_lock);
- ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
+ hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
+out:
spin_unlock_bh(&hdev->fd_rule_lock);
-
return ret;
}
-static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
- bool clear_list)
+static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
+ bool clear_list)
{
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
struct hclge_fd_rule *rule;
struct hlist_node *node;
u16 location;
- if (!hnae3_dev_fd_supported(hdev))
- return;
-
spin_lock_bh(&hdev->fd_rule_lock);
+
for_each_set_bit(location, hdev->fd_bmap,
hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
@@ -5815,19 +6650,27 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
spin_unlock_bh(&hdev->fd_rule_lock);
}
+static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
+{
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return;
+
+ hclge_clear_fd_rules_in_list(hdev, true);
+ hclge_fd_disable_user_def(hdev);
+}
+
static int hclge_restore_fd_entries(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
struct hclge_fd_rule *rule;
struct hlist_node *node;
- int ret;
/* Return ok here, because reset error handling will check this
* return value. If error is returned here, the reset process will
* fail.
*/
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return 0;
/* if fd is disabled, should not restore it when reset */
@@ -5836,25 +6679,11 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle)
spin_lock_bh(&hdev->fd_rule_lock);
hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
- ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
- if (!ret)
- ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
-
- if (ret) {
- dev_warn(&hdev->pdev->dev,
- "Restore rule %u failed, remove it\n",
- rule->location);
- clear_bit(rule->location, hdev->fd_bmap);
- hlist_del(&rule->rule_node);
- kfree(rule);
- hdev->hclge_fd_rule_num--;
- }
+ if (rule->state == HCLGE_FD_ACTIVE)
+ rule->state = HCLGE_FD_TO_ADD;
}
-
- if (hdev->hclge_fd_rule_num)
- hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
-
spin_unlock_bh(&hdev->fd_rule_lock);
+ set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
return 0;
}
@@ -5865,7 +6694,7 @@ static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev) || hclge_is_cls_flower_active(handle))
return -EOPNOTSUPP;
cmd->rule_cnt = hdev->hclge_fd_rule_num;
@@ -5874,184 +6703,164 @@ static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
return 0;
}
-static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
- struct ethtool_rxnfc *cmd)
+static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
+ struct ethtool_tcpip4_spec *spec,
+ struct ethtool_tcpip4_spec *spec_mask)
{
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_fd_rule *rule = NULL;
- struct hclge_dev *hdev = vport->back;
- struct ethtool_rx_flow_spec *fs;
- struct hlist_node *node2;
-
- if (!hnae3_dev_fd_supported(hdev))
- return -EOPNOTSUPP;
-
- fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
+ spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
+ spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
- spin_lock_bh(&hdev->fd_rule_lock);
+ spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
+ spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
- hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
- if (rule->location >= fs->location)
- break;
- }
+ spec->psrc = cpu_to_be16(rule->tuples.src_port);
+ spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
+ 0 : cpu_to_be16(rule->tuples_mask.src_port);
- if (!rule || fs->location != rule->location) {
- spin_unlock_bh(&hdev->fd_rule_lock);
+ spec->pdst = cpu_to_be16(rule->tuples.dst_port);
+ spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
+ 0 : cpu_to_be16(rule->tuples_mask.dst_port);
- return -ENOENT;
- }
+ spec->tos = rule->tuples.ip_tos;
+ spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
+ 0 : rule->tuples_mask.ip_tos;
+}
- fs->flow_type = rule->flow_type;
- switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
- case SCTP_V4_FLOW:
- case TCP_V4_FLOW:
- case UDP_V4_FLOW:
- fs->h_u.tcp_ip4_spec.ip4src =
- cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
- fs->m_u.tcp_ip4_spec.ip4src =
- rule->unused_tuple & BIT(INNER_SRC_IP) ?
+static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
+ struct ethtool_usrip4_spec *spec,
+ struct ethtool_usrip4_spec *spec_mask)
+{
+ spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
+ spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
- fs->h_u.tcp_ip4_spec.ip4dst =
- cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
- fs->m_u.tcp_ip4_spec.ip4dst =
- rule->unused_tuple & BIT(INNER_DST_IP) ?
+ spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
+ spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
- fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
- fs->m_u.tcp_ip4_spec.psrc =
- rule->unused_tuple & BIT(INNER_SRC_PORT) ?
- 0 : cpu_to_be16(rule->tuples_mask.src_port);
+ spec->tos = rule->tuples.ip_tos;
+ spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
+ 0 : rule->tuples_mask.ip_tos;
- fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
- fs->m_u.tcp_ip4_spec.pdst =
- rule->unused_tuple & BIT(INNER_DST_PORT) ?
- 0 : cpu_to_be16(rule->tuples_mask.dst_port);
+ spec->proto = rule->tuples.ip_proto;
+ spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
+ 0 : rule->tuples_mask.ip_proto;
- fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
- fs->m_u.tcp_ip4_spec.tos =
- rule->unused_tuple & BIT(INNER_IP_TOS) ?
- 0 : rule->tuples_mask.ip_tos;
+ spec->ip_ver = ETH_RX_NFC_IP4;
+}
- break;
- case IP_USER_FLOW:
- fs->h_u.usr_ip4_spec.ip4src =
- cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
- fs->m_u.tcp_ip4_spec.ip4src =
- rule->unused_tuple & BIT(INNER_SRC_IP) ?
- 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
+static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
+ struct ethtool_tcpip6_spec *spec,
+ struct ethtool_tcpip6_spec *spec_mask)
+{
+ cpu_to_be32_array(spec->ip6src,
+ rule->tuples.src_ip, IPV6_SIZE);
+ cpu_to_be32_array(spec->ip6dst,
+ rule->tuples.dst_ip, IPV6_SIZE);
+ if (rule->unused_tuple & BIT(INNER_SRC_IP))
+ memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
+ else
+ cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
+ IPV6_SIZE);
- fs->h_u.usr_ip4_spec.ip4dst =
- cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
- fs->m_u.usr_ip4_spec.ip4dst =
- rule->unused_tuple & BIT(INNER_DST_IP) ?
- 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
+ if (rule->unused_tuple & BIT(INNER_DST_IP))
+ memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
+ else
+ cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
+ IPV6_SIZE);
- fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
- fs->m_u.usr_ip4_spec.tos =
- rule->unused_tuple & BIT(INNER_IP_TOS) ?
- 0 : rule->tuples_mask.ip_tos;
+ spec->tclass = rule->tuples.ip_tos;
+ spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
+ 0 : rule->tuples_mask.ip_tos;
- fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
- fs->m_u.usr_ip4_spec.proto =
- rule->unused_tuple & BIT(INNER_IP_PROTO) ?
- 0 : rule->tuples_mask.ip_proto;
+ spec->psrc = cpu_to_be16(rule->tuples.src_port);
+ spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
+ 0 : cpu_to_be16(rule->tuples_mask.src_port);
- fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ spec->pdst = cpu_to_be16(rule->tuples.dst_port);
+ spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
+ 0 : cpu_to_be16(rule->tuples_mask.dst_port);
+}
- break;
- case SCTP_V6_FLOW:
- case TCP_V6_FLOW:
- case UDP_V6_FLOW:
- cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
- rule->tuples.src_ip, IPV6_SIZE);
- if (rule->unused_tuple & BIT(INNER_SRC_IP))
- memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
- sizeof(int) * IPV6_SIZE);
- else
- cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
- rule->tuples_mask.src_ip, IPV6_SIZE);
-
- cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
- rule->tuples.dst_ip, IPV6_SIZE);
- if (rule->unused_tuple & BIT(INNER_DST_IP))
- memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
- sizeof(int) * IPV6_SIZE);
- else
- cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
- rule->tuples_mask.dst_ip, IPV6_SIZE);
+static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
+ struct ethtool_usrip6_spec *spec,
+ struct ethtool_usrip6_spec *spec_mask)
+{
+ cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
+ cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
+ if (rule->unused_tuple & BIT(INNER_SRC_IP))
+ memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
+ else
+ cpu_to_be32_array(spec_mask->ip6src,
+ rule->tuples_mask.src_ip, IPV6_SIZE);
- fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
- fs->m_u.tcp_ip6_spec.psrc =
- rule->unused_tuple & BIT(INNER_SRC_PORT) ?
- 0 : cpu_to_be16(rule->tuples_mask.src_port);
+ if (rule->unused_tuple & BIT(INNER_DST_IP))
+ memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
+ else
+ cpu_to_be32_array(spec_mask->ip6dst,
+ rule->tuples_mask.dst_ip, IPV6_SIZE);
- fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
- fs->m_u.tcp_ip6_spec.pdst =
- rule->unused_tuple & BIT(INNER_DST_PORT) ?
- 0 : cpu_to_be16(rule->tuples_mask.dst_port);
+ spec->tclass = rule->tuples.ip_tos;
+ spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
+ 0 : rule->tuples_mask.ip_tos;
- break;
- case IPV6_USER_FLOW:
- cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
- rule->tuples.src_ip, IPV6_SIZE);
- if (rule->unused_tuple & BIT(INNER_SRC_IP))
- memset(fs->m_u.usr_ip6_spec.ip6src, 0,
- sizeof(int) * IPV6_SIZE);
- else
- cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
- rule->tuples_mask.src_ip, IPV6_SIZE);
-
- cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
- rule->tuples.dst_ip, IPV6_SIZE);
- if (rule->unused_tuple & BIT(INNER_DST_IP))
- memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
- sizeof(int) * IPV6_SIZE);
- else
- cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
- rule->tuples_mask.dst_ip, IPV6_SIZE);
+ spec->l4_proto = rule->tuples.ip_proto;
+ spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
+ 0 : rule->tuples_mask.ip_proto;
+}
- fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
- fs->m_u.usr_ip6_spec.l4_proto =
- rule->unused_tuple & BIT(INNER_IP_PROTO) ?
- 0 : rule->tuples_mask.ip_proto;
+static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
+ struct ethhdr *spec,
+ struct ethhdr *spec_mask)
+{
+ ether_addr_copy(spec->h_source, rule->tuples.src_mac);
+ ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
- break;
- case ETHER_FLOW:
- ether_addr_copy(fs->h_u.ether_spec.h_source,
- rule->tuples.src_mac);
- if (rule->unused_tuple & BIT(INNER_SRC_MAC))
- eth_zero_addr(fs->m_u.ether_spec.h_source);
- else
- ether_addr_copy(fs->m_u.ether_spec.h_source,
- rule->tuples_mask.src_mac);
+ if (rule->unused_tuple & BIT(INNER_SRC_MAC))
+ eth_zero_addr(spec_mask->h_source);
+ else
+ ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
- ether_addr_copy(fs->h_u.ether_spec.h_dest,
- rule->tuples.dst_mac);
- if (rule->unused_tuple & BIT(INNER_DST_MAC))
- eth_zero_addr(fs->m_u.ether_spec.h_dest);
- else
- ether_addr_copy(fs->m_u.ether_spec.h_dest,
- rule->tuples_mask.dst_mac);
+ if (rule->unused_tuple & BIT(INNER_DST_MAC))
+ eth_zero_addr(spec_mask->h_dest);
+ else
+ ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
- fs->h_u.ether_spec.h_proto =
- cpu_to_be16(rule->tuples.ether_proto);
- fs->m_u.ether_spec.h_proto =
- rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
- 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
+ spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
+ spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
+ 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
+}
- break;
- default:
- spin_unlock_bh(&hdev->fd_rule_lock);
- return -EOPNOTSUPP;
+static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
+ struct hclge_fd_rule *rule)
+{
+ if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
+ HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
+ fs->h_ext.data[0] = 0;
+ fs->h_ext.data[1] = 0;
+ fs->m_ext.data[0] = 0;
+ fs->m_ext.data[1] = 0;
+ } else {
+ fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
+ fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
+ fs->m_ext.data[0] =
+ cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
+ fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
}
+}
+static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
+ struct hclge_fd_rule *rule)
+{
if (fs->flow_type & FLOW_EXT) {
fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
fs->m_ext.vlan_tci =
rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
- cpu_to_be16(VLAN_VID_MASK) :
- cpu_to_be16(rule->tuples_mask.vlan_tag1);
+ 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
+
+ hclge_fd_get_user_def_info(fs, rule);
}
if (fs->flow_type & FLOW_MAC_EXT) {
@@ -6062,7 +6871,27 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
ether_addr_copy(fs->m_u.ether_spec.h_dest,
rule->tuples_mask.dst_mac);
}
+}
+static struct hclge_fd_rule *hclge_get_fd_rule(struct hclge_dev *hdev,
+ u16 location)
+{
+ struct hclge_fd_rule *rule = NULL;
+ struct hlist_node *node2;
+
+ hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
+ if (rule->location == location)
+ return rule;
+ else if (rule->location > location)
+ return NULL;
+ }
+
+ return NULL;
+}
+
+static void hclge_fd_get_ring_cookie(struct ethtool_rx_flow_spec *fs,
+ struct hclge_fd_rule *rule)
+{
if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
fs->ring_cookie = RX_CLS_FLOW_DISC;
} else {
@@ -6073,6 +6902,64 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
fs->ring_cookie |= vf_id;
}
+}
+
+static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_fd_rule *rule = NULL;
+ struct hclge_dev *hdev = vport->back;
+ struct ethtool_rx_flow_spec *fs;
+
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return -EOPNOTSUPP;
+
+ fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ spin_lock_bh(&hdev->fd_rule_lock);
+
+ rule = hclge_get_fd_rule(hdev, fs->location);
+ if (!rule) {
+ spin_unlock_bh(&hdev->fd_rule_lock);
+ return -ENOENT;
+ }
+
+ fs->flow_type = rule->flow_type;
+ switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+ case SCTP_V4_FLOW:
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
+ &fs->m_u.tcp_ip4_spec);
+ break;
+ case IP_USER_FLOW:
+ hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
+ &fs->m_u.usr_ip4_spec);
+ break;
+ case SCTP_V6_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
+ &fs->m_u.tcp_ip6_spec);
+ break;
+ case IPV6_USER_FLOW:
+ hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
+ &fs->m_u.usr_ip6_spec);
+ break;
+ /* The flow type of fd rule has been checked before adding in to rule
+ * list. As other flow types have been handled, it must be ETHER_FLOW
+ * for the default case
+ */
+ default:
+ hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
+ &fs->m_u.ether_spec);
+ break;
+ }
+
+ hclge_fd_get_ext_info(fs, rule);
+
+ hclge_fd_get_ring_cookie(fs, rule);
spin_unlock_bh(&hdev->fd_rule_lock);
@@ -6088,7 +6975,7 @@ static int hclge_get_all_rules(struct hnae3_handle *handle,
struct hlist_node *node2;
int cnt = 0;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
@@ -6101,6 +6988,9 @@ static int hclge_get_all_rules(struct hnae3_handle *handle,
return -EMSGSIZE;
}
+ if (rule->state == HCLGE_FD_TO_DEL)
+ continue;
+
rule_locs[cnt] = rule->location;
cnt++;
}
@@ -6160,6 +7050,7 @@ static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
rule->action = 0;
rule->vf_id = 0;
rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
+ rule->state = HCLGE_FD_TO_ADD;
if (tuples->ether_proto == ETH_P_IP) {
if (tuples->ip_proto == IPPROTO_TCP)
rule->flow_type = TCP_V4_FLOW;
@@ -6179,30 +7070,26 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
u16 flow_id, struct flow_keys *fkeys)
{
struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_fd_rule_tuples new_tuples;
+ struct hclge_fd_rule_tuples new_tuples = {};
struct hclge_dev *hdev = vport->back;
struct hclge_fd_rule *rule;
- u16 tmp_queue_id;
u16 bit_id;
- int ret;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
- memset(&new_tuples, 0, sizeof(new_tuples));
- hclge_fd_get_flow_tuples(fkeys, &new_tuples);
-
- spin_lock_bh(&hdev->fd_rule_lock);
-
/* when there is already fd rule existed add by user,
* arfs should not work
*/
- if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
+ spin_lock_bh(&hdev->fd_rule_lock);
+ if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
+ hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
spin_unlock_bh(&hdev->fd_rule_lock);
-
return -EOPNOTSUPP;
}
+ hclge_fd_get_flow_tuples(fkeys, &new_tuples);
+
/* check is there flow director filter existed for this flow,
* if not, create a new filter for it;
* if filter exist with different queue id, modify the filter;
@@ -6213,45 +7100,28 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
spin_unlock_bh(&hdev->fd_rule_lock);
-
return -ENOSPC;
}
rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
if (!rule) {
spin_unlock_bh(&hdev->fd_rule_lock);
-
return -ENOMEM;
}
- set_bit(bit_id, hdev->fd_bmap);
rule->location = bit_id;
- rule->flow_id = flow_id;
+ rule->arfs.flow_id = flow_id;
rule->queue_id = queue_id;
hclge_fd_build_arfs_rule(&new_tuples, rule);
- ret = hclge_fd_config_rule(hdev, rule);
-
- spin_unlock_bh(&hdev->fd_rule_lock);
-
- if (ret)
- return ret;
-
- return rule->location;
+ hclge_update_fd_list(hdev, rule->state, rule->location, rule);
+ hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
+ } else if (rule->queue_id != queue_id) {
+ rule->queue_id = queue_id;
+ rule->state = HCLGE_FD_TO_ADD;
+ set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
+ hclge_task_schedule(hdev, 0);
}
-
spin_unlock_bh(&hdev->fd_rule_lock);
-
- if (rule->queue_id == queue_id)
- return rule->location;
-
- tmp_queue_id = rule->queue_id;
- rule->queue_id = queue_id;
- ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
- if (ret) {
- rule->queue_id = tmp_queue_id;
- return ret;
- }
-
return rule->location;
}
@@ -6261,7 +7131,6 @@ static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
struct hnae3_handle *handle = &hdev->vport[0].nic;
struct hclge_fd_rule *rule;
struct hlist_node *node;
- HLIST_HEAD(del_list);
spin_lock_bh(&hdev->fd_rule_lock);
if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
@@ -6269,33 +7138,368 @@ static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
return;
}
hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
+ if (rule->state != HCLGE_FD_ACTIVE)
+ continue;
if (rps_may_expire_flow(handle->netdev, rule->queue_id,
- rule->flow_id, rule->location)) {
- hlist_del_init(&rule->rule_node);
- hlist_add_head(&rule->rule_node, &del_list);
- hdev->hclge_fd_rule_num--;
- clear_bit(rule->location, hdev->fd_bmap);
+ rule->arfs.flow_id, rule->location)) {
+ rule->state = HCLGE_FD_TO_DEL;
+ set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
}
}
spin_unlock_bh(&hdev->fd_rule_lock);
+#endif
+}
- hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
- hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
- rule->location, NULL, false);
- kfree(rule);
+/* make sure being called after lock up with fd_rule_lock */
+static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
+{
+#ifdef CONFIG_RFS_ACCEL
+ struct hclge_fd_rule *rule;
+ struct hlist_node *node;
+ int ret;
+
+ if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
+ return 0;
+
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
+ switch (rule->state) {
+ case HCLGE_FD_TO_DEL:
+ case HCLGE_FD_ACTIVE:
+ ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
+ rule->location, NULL, false);
+ if (ret)
+ return ret;
+ fallthrough;
+ case HCLGE_FD_TO_ADD:
+ hclge_fd_dec_rule_cnt(hdev, rule->location);
+ hlist_del(&rule->rule_node);
+ kfree(rule);
+ break;
+ default:
+ break;
+ }
}
+ hclge_sync_fd_state(hdev);
+
#endif
+ return 0;
}
-static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
+static void hclge_get_cls_key_basic(const struct flow_rule *flow,
+ struct hclge_fd_rule *rule)
+{
+ if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+ u16 ethtype_key, ethtype_mask;
+
+ flow_rule_match_basic(flow, &match);
+ ethtype_key = ntohs(match.key->n_proto);
+ ethtype_mask = ntohs(match.mask->n_proto);
+
+ if (ethtype_key == ETH_P_ALL) {
+ ethtype_key = 0;
+ ethtype_mask = 0;
+ }
+ rule->tuples.ether_proto = ethtype_key;
+ rule->tuples_mask.ether_proto = ethtype_mask;
+ rule->tuples.ip_proto = match.key->ip_proto;
+ rule->tuples_mask.ip_proto = match.mask->ip_proto;
+ } else {
+ rule->unused_tuple |= BIT(INNER_IP_PROTO);
+ rule->unused_tuple |= BIT(INNER_ETH_TYPE);
+ }
+}
+
+static void hclge_get_cls_key_mac(const struct flow_rule *flow,
+ struct hclge_fd_rule *rule)
+{
+ if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(flow, &match);
+ ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
+ ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
+ ether_addr_copy(rule->tuples.src_mac, match.key->src);
+ ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
+ } else {
+ rule->unused_tuple |= BIT(INNER_DST_MAC);
+ rule->unused_tuple |= BIT(INNER_SRC_MAC);
+ }
+}
+
+static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
+ struct hclge_fd_rule *rule)
+{
+ if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(flow, &match);
+ rule->tuples.vlan_tag1 = match.key->vlan_id |
+ (match.key->vlan_priority << VLAN_PRIO_SHIFT);
+ rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
+ (match.mask->vlan_priority << VLAN_PRIO_SHIFT);
+ } else {
+ rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
+ }
+}
+
+static void hclge_get_cls_key_ip(const struct flow_rule *flow,
+ struct hclge_fd_rule *rule)
+{
+ u16 addr_type = 0;
+
+ if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_control(flow, &match);
+ addr_type = match.key->addr_type;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_ipv4_addrs(flow, &match);
+ rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
+ rule->tuples_mask.src_ip[IPV4_INDEX] =
+ be32_to_cpu(match.mask->src);
+ rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
+ rule->tuples_mask.dst_ip[IPV4_INDEX] =
+ be32_to_cpu(match.mask->dst);
+ } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_ipv6_addrs(flow, &match);
+ be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
+ IPV6_SIZE);
+ be32_to_cpu_array(rule->tuples_mask.src_ip,
+ match.mask->src.s6_addr32, IPV6_SIZE);
+ be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
+ IPV6_SIZE);
+ be32_to_cpu_array(rule->tuples_mask.dst_ip,
+ match.mask->dst.s6_addr32, IPV6_SIZE);
+ } else {
+ rule->unused_tuple |= BIT(INNER_SRC_IP);
+ rule->unused_tuple |= BIT(INNER_DST_IP);
+ }
+}
+
+static void hclge_get_cls_key_port(const struct flow_rule *flow,
+ struct hclge_fd_rule *rule)
+{
+ if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+
+ flow_rule_match_ports(flow, &match);
+
+ rule->tuples.src_port = be16_to_cpu(match.key->src);
+ rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
+ rule->tuples.dst_port = be16_to_cpu(match.key->dst);
+ rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
+ } else {
+ rule->unused_tuple |= BIT(INNER_SRC_PORT);
+ rule->unused_tuple |= BIT(INNER_DST_PORT);
+ }
+}
+
+static int hclge_parse_cls_flower(struct hclge_dev *hdev,
+ struct flow_cls_offload *cls_flower,
+ struct hclge_fd_rule *rule)
+{
+ struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
+ struct flow_dissector *dissector = flow->match.dissector;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS))) {
+ dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
+ dissector->used_keys);
+ return -EOPNOTSUPP;
+ }
+
+ hclge_get_cls_key_basic(flow, rule);
+ hclge_get_cls_key_mac(flow, rule);
+ hclge_get_cls_key_vlan(flow, rule);
+ hclge_get_cls_key_ip(flow, rule);
+ hclge_get_cls_key_port(flow, rule);
+
+ return 0;
+}
+
+static int hclge_check_cls_flower(struct hclge_dev *hdev,
+ struct flow_cls_offload *cls_flower, int tc)
+{
+ u32 prio = cls_flower->common.prio;
+
+ if (tc < 0 || tc > hdev->tc_max) {
+ dev_err(&hdev->pdev->dev, "invalid traffic class\n");
+ return -EINVAL;
+ }
+
+ if (prio == 0 ||
+ prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
+ dev_err(&hdev->pdev->dev,
+ "prio %u should be in range[1, %u]\n",
+ prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
+ return -EINVAL;
+ }
+
+ if (test_bit(prio - 1, hdev->fd_bmap)) {
+ dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int hclge_add_cls_flower(struct hnae3_handle *handle,
+ struct flow_cls_offload *cls_flower,
+ int tc)
{
-#ifdef CONFIG_RFS_ACCEL
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
+ struct hclge_fd_rule *rule;
+ int ret;
- if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
- hclge_del_all_fd_entries(handle, true);
-#endif
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
+ dev_err(&hdev->pdev->dev,
+ "cls flower is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = hclge_check_cls_flower(hdev, cls_flower, tc);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to check cls flower params, ret = %d\n", ret);
+ return ret;
+ }
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
+ if (ret) {
+ kfree(rule);
+ return ret;
+ }
+
+ rule->action = HCLGE_FD_ACTION_SELECT_TC;
+ rule->cls_flower.tc = tc;
+ rule->location = cls_flower->common.prio - 1;
+ rule->vf_id = 0;
+ rule->cls_flower.cookie = cls_flower->cookie;
+ rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
+
+ ret = hclge_add_fd_entry_common(hdev, rule);
+ if (ret)
+ kfree(rule);
+
+ return ret;
+}
+
+static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
+ unsigned long cookie)
+{
+ struct hclge_fd_rule *rule;
+ struct hlist_node *node;
+
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
+ if (rule->cls_flower.cookie == cookie)
+ return rule;
+ }
+
+ return NULL;
+}
+
+static int hclge_del_cls_flower(struct hnae3_handle *handle,
+ struct flow_cls_offload *cls_flower)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_fd_rule *rule;
+ int ret;
+
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return -EOPNOTSUPP;
+
+ spin_lock_bh(&hdev->fd_rule_lock);
+
+ rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
+ if (!rule) {
+ spin_unlock_bh(&hdev->fd_rule_lock);
+ return -EINVAL;
+ }
+
+ ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
+ NULL, false);
+ if (ret) {
+ spin_unlock_bh(&hdev->fd_rule_lock);
+ return ret;
+ }
+
+ hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ return 0;
+}
+
+static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
+{
+ struct hclge_fd_rule *rule;
+ struct hlist_node *node;
+ int ret = 0;
+
+ if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
+ return;
+
+ spin_lock_bh(&hdev->fd_rule_lock);
+
+ hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
+ switch (rule->state) {
+ case HCLGE_FD_TO_ADD:
+ ret = hclge_fd_config_rule(hdev, rule);
+ if (ret)
+ goto out;
+ rule->state = HCLGE_FD_ACTIVE;
+ break;
+ case HCLGE_FD_TO_DEL:
+ ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
+ rule->location, NULL, false);
+ if (ret)
+ goto out;
+ hclge_fd_dec_rule_cnt(hdev, rule->location);
+ hclge_fd_free_node(hdev, rule);
+ break;
+ default:
+ break;
+ }
+ }
+
+out:
+ if (ret)
+ set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
+
+ spin_unlock_bh(&hdev->fd_rule_lock);
+}
+
+static void hclge_sync_fd_table(struct hclge_dev *hdev)
+{
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return;
+
+ if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
+ bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
+
+ hclge_clear_fd_rules_in_list(hdev, clear_list);
+ }
+
+ hclge_sync_fd_user_def_cfg(hdev, false);
+
+ hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
}
static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
@@ -6307,6 +7511,14 @@ static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
}
+static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
+}
+
static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -6327,14 +7539,15 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- bool clear;
hdev->fd_en = enable;
- clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
+
if (!enable)
- hclge_del_all_fd_entries(handle, clear);
+ set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
else
hclge_restore_fd_entries(handle);
+
+ hclge_task_schedule(hdev, 0);
}
static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
@@ -6393,7 +7606,7 @@ static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
}
/* modify and write new config parameter */
- hclge_cmd_reuse_desc(&desc, false);
+ hclge_comm_cmd_reuse_desc(&desc, false);
req->switch_param = (req->switch_param & param_mask) | switch_param;
req->param_mask = param_mask;
@@ -6432,14 +7645,15 @@ static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
{
#define HCLGE_MAC_LINK_STATUS_NUM 100
+ int link_status;
int i = 0;
int ret;
do {
- ret = hclge_get_mac_link_status(hdev);
- if (ret < 0)
+ ret = hclge_get_mac_link_status(hdev, &link_status);
+ if (ret)
return ret;
- else if (ret == link_ret)
+ if (link_status == link_ret)
return 0;
msleep(HCLGE_LINK_STATUS_MS);
@@ -6450,9 +7664,6 @@ static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
bool is_phy)
{
-#define HCLGE_LINK_STATUS_DOWN 0
-#define HCLGE_LINK_STATUS_UP 1
-
int link_ret;
link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
@@ -6483,15 +7694,13 @@ static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
/* 2 Then setup the loopback flag */
loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
- hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
- hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
/* 3 Config mac work mode with loopback flag
* and its original configure parameters
*/
- hclge_cmd_reuse_desc(&desc, false);
+ hclge_comm_cmd_reuse_desc(&desc, false);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
dev_err(&hdev->pdev->dev,
@@ -6499,19 +7708,16 @@ static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
return ret;
}
-static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
- enum hnae3_loop loop_mode)
+static int hclge_cfg_common_loopback_cmd_send(struct hclge_dev *hdev, bool en,
+ enum hnae3_loop loop_mode)
{
-#define HCLGE_SERDES_RETRY_MS 10
-#define HCLGE_SERDES_RETRY_NUM 100
-
- struct hclge_serdes_lb_cmd *req;
+ struct hclge_common_lb_cmd *req;
struct hclge_desc desc;
- int ret, i = 0;
u8 loop_mode_b;
+ int ret;
- req = (struct hclge_serdes_lb_cmd *)desc.data;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
+ req = (struct hclge_common_lb_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
switch (loop_mode) {
case HNAE3_LOOP_SERIAL_SERDES:
@@ -6520,55 +7726,83 @@ static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
case HNAE3_LOOP_PARALLEL_SERDES:
loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
break;
+ case HNAE3_LOOP_PHY:
+ loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
+ break;
default:
dev_err(&hdev->pdev->dev,
- "unsupported serdes loopback mode %d\n", loop_mode);
+ "unsupported loopback mode %d\n", loop_mode);
return -ENOTSUPP;
}
- if (en) {
+ req->mask = loop_mode_b;
+ if (en)
req->enable = loop_mode_b;
- req->mask = loop_mode_b;
- } else {
- req->mask = loop_mode_b;
- }
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
- "serdes loopback set fail, ret = %d\n", ret);
- return ret;
- }
+ "failed to send loopback cmd, loop_mode = %d, ret = %d\n",
+ loop_mode, ret);
+
+ return ret;
+}
+
+static int hclge_cfg_common_loopback_wait(struct hclge_dev *hdev)
+{
+#define HCLGE_COMMON_LB_RETRY_MS 10
+#define HCLGE_COMMON_LB_RETRY_NUM 100
+
+ struct hclge_common_lb_cmd *req;
+ struct hclge_desc desc;
+ u32 i = 0;
+ int ret;
+
+ req = (struct hclge_common_lb_cmd *)desc.data;
do {
- msleep(HCLGE_SERDES_RETRY_MS);
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
+ msleep(HCLGE_COMMON_LB_RETRY_MS);
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
- "serdes loopback get, ret = %d\n", ret);
+ "failed to get loopback done status, ret = %d\n",
+ ret);
return ret;
}
- } while (++i < HCLGE_SERDES_RETRY_NUM &&
- !(req->result & HCLGE_CMD_SERDES_DONE_B));
+ } while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
+ !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
- if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
- dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
+ if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
+ dev_err(&hdev->pdev->dev, "wait loopback timeout\n");
return -EBUSY;
- } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
- dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
+ } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
+ dev_err(&hdev->pdev->dev, "failed to do loopback test\n");
return -EIO;
}
- return ret;
+
+ return 0;
+}
+
+static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
+ enum hnae3_loop loop_mode)
+{
+ int ret;
+
+ ret = hclge_cfg_common_loopback_cmd_send(hdev, en, loop_mode);
+ if (ret)
+ return ret;
+
+ return hclge_cfg_common_loopback_wait(hdev);
}
-static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
+static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
enum hnae3_loop loop_mode)
{
int ret;
- ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
+ ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
if (ret)
return ret;
@@ -6617,8 +7851,12 @@ static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
struct phy_device *phydev = hdev->hw.mac.phydev;
int ret;
- if (!phydev)
+ if (!phydev) {
+ if (hnae3_dev_phy_imp_supported(hdev))
+ return hclge_set_common_loopback(hdev, en,
+ HNAE3_LOOP_PHY);
return -ENOTSUPP;
+ }
if (en)
ret = hclge_enable_phy_loopback(hdev, phydev);
@@ -6640,41 +7878,50 @@ static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
return ret;
}
-static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
- int stream_id, bool enable)
+static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
+ u16 stream_id, bool enable)
{
struct hclge_desc desc;
struct hclge_cfg_com_tqp_queue_cmd *req =
(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
- int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
- req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
+ req->tqp_id = cpu_to_le16(tqp_id);
req->stream_id = cpu_to_le16(stream_id);
if (enable)
req->enable |= 1U << HCLGE_TQP_ENABLE_B;
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "Tqp enable fail, status =%d.\n", ret);
- return ret;
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+ u16 i;
+
+ for (i = 0; i < handle->kinfo.num_tqps; i++) {
+ ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
+ if (ret)
+ return ret;
+ }
+ return 0;
}
static int hclge_set_loopback(struct hnae3_handle *handle,
enum hnae3_loop loop_mode, bool en)
{
struct hclge_vport *vport = hclge_get_vport(handle);
- struct hnae3_knic_private_info *kinfo;
struct hclge_dev *hdev = vport->back;
- int i, ret;
+ int ret = 0;
/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
* default, SSU loopback is enabled, so if the SMAC and the DMAC are
* the same, the packets are looped back in the SSU. If SSU loopback
* is disabled, packets can reach MAC even if SMAC is the same as DMAC.
*/
- if (hdev->pdev->revision >= 0x21) {
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
@@ -6689,11 +7936,13 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
break;
case HNAE3_LOOP_SERIAL_SERDES:
case HNAE3_LOOP_PARALLEL_SERDES:
- ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
+ ret = hclge_set_common_loopback(hdev, en, loop_mode);
break;
case HNAE3_LOOP_PHY:
ret = hclge_set_phy_loopback(hdev, en);
break;
+ case HNAE3_LOOP_EXTERNAL:
+ break;
default:
ret = -ENOTSUPP;
dev_err(&hdev->pdev->dev,
@@ -6704,14 +7953,12 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
if (ret)
return ret;
- kinfo = &vport->nic.kinfo;
- for (i = 0; i < kinfo->num_tqps; i++) {
- ret = hclge_tqp_enable(hdev, i, 0, en);
- if (ret)
- return ret;
- }
+ ret = hclge_tqp_enable(handle, en);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
+ en ? "enable" : "disable", ret);
- return 0;
+ return ret;
}
static int hclge_set_default_loopback(struct hclge_dev *hdev)
@@ -6722,30 +7969,14 @@ static int hclge_set_default_loopback(struct hclge_dev *hdev)
if (ret)
return ret;
- ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
+ ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
if (ret)
return ret;
- return hclge_cfg_serdes_loopback(hdev, false,
+ return hclge_cfg_common_loopback(hdev, false,
HNAE3_LOOP_PARALLEL_SERDES);
}
-static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
-{
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hnae3_knic_private_info *kinfo;
- struct hnae3_queue *queue;
- struct hclge_tqp *tqp;
- int i;
-
- kinfo = &vport->nic.kinfo;
- for (i = 0; i < kinfo->num_tqps; i++) {
- queue = handle->kinfo.tqp[i];
- tqp = container_of(queue, struct hclge_tqp, q);
- memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
- }
-}
-
static void hclge_flush_link_update(struct hclge_dev *hdev)
{
#define HCLGE_FLUSH_LINK_TIMEOUT 100000
@@ -6765,7 +7996,7 @@ static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
struct hclge_dev *hdev = vport->back;
if (enable) {
- hclge_task_schedule(hdev, round_jiffies_relative(HZ));
+ hclge_task_schedule(hdev, 0);
} else {
/* Set the DOWN flag here to disable link updating */
set_bit(HCLGE_STATE_DOWN, &hdev->state);
@@ -6787,7 +8018,7 @@ static int hclge_ae_start(struct hnae3_handle *handle)
hdev->hw.mac.link = 0;
/* reset tqp stats */
- hclge_reset_tqp_stats(handle);
+ hclge_comm_reset_tqp_stats(handle);
hclge_mac_start_phy(hdev);
@@ -6798,24 +8029,24 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- int i;
set_bit(HCLGE_STATE_DOWN, &hdev->state);
+ spin_lock_bh(&hdev->fd_rule_lock);
+ hclge_clear_arfs_rules(hdev);
+ spin_unlock_bh(&hdev->fd_rule_lock);
- hclge_clear_arfs_rules(handle);
-
- /* If it is not PF reset, the firmware will disable the MAC,
+ /* If it is not PF reset or FLR, the firmware will disable the MAC,
* so it only need to stop phy here.
*/
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
- hdev->reset_type != HNAE3_FUNC_RESET) {
+ hdev->reset_type != HNAE3_FUNC_RESET &&
+ hdev->reset_type != HNAE3_FLR_RESET) {
hclge_mac_stop_phy(hdev);
hclge_update_link_status(hdev);
return;
}
- for (i = 0; i < handle->kinfo.num_tqps; i++)
- hclge_reset_tqp(handle, i);
+ hclge_reset_tqp(handle);
hclge_config_mac_tnl_int(hdev, false);
@@ -6825,14 +8056,29 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
hclge_mac_stop_phy(hdev);
/* reset tqp stats */
- hclge_reset_tqp_stats(handle);
+ hclge_comm_reset_tqp_stats(handle);
hclge_update_link_status(hdev);
}
int hclge_vport_start(struct hclge_vport *vport)
{
+ struct hclge_dev *hdev = vport->back;
+
set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+ set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
vport->last_active_jiffies = jiffies;
+
+ if (test_bit(vport->vport_id, hdev->vport_config_block)) {
+ if (vport->vport_id) {
+ hclge_restore_mac_table_common(vport);
+ hclge_restore_vport_vlan_table(vport);
+ } else {
+ hclge_restore_hw_table(hdev);
+ }
+ }
+
+ clear_bit(vport->vport_id, hdev->vport_config_block);
+
return 0;
}
@@ -6869,17 +8115,11 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
}
if (op == HCLGE_MAC_VLAN_ADD) {
- if ((!resp_code) || (resp_code == 1)) {
+ if (!resp_code || resp_code == 1)
return 0;
- } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
- dev_err(&hdev->pdev->dev,
- "add mac addr failed for uc_overflow.\n");
- return -ENOSPC;
- } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
- dev_err(&hdev->pdev->dev,
- "add mac addr failed for mc_overflow.\n");
+ else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
+ resp_code == HCLGE_ADD_MC_OVERFLOW)
return -ENOSPC;
- }
dev_err(&hdev->pdev->dev,
"add mac addr failed for undefined, code=%u.\n",
@@ -7019,14 +8259,14 @@ static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
if (is_mc) {
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
memcpy(desc[0].data,
req,
sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
hclge_cmd_setup_basic_desc(&desc[1],
HCLGE_OPC_MAC_VLAN_ADD,
true);
- desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[2],
HCLGE_OPC_MAC_VLAN_ADD,
true);
@@ -7076,12 +8316,12 @@ static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
resp_code,
HCLGE_MAC_VLAN_ADD);
} else {
- hclge_cmd_reuse_desc(&mc_desc[0], false);
- mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_reuse_desc(&mc_desc[1], false);
- mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_reuse_desc(&mc_desc[2], false);
- mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
+ hclge_comm_cmd_reuse_desc(&mc_desc[0], false);
+ mc_desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_comm_cmd_reuse_desc(&mc_desc[1], false);
+ mc_desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_comm_cmd_reuse_desc(&mc_desc[2], false);
+ mc_desc[2].flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_NEXT);
memcpy(mc_desc[0].data, req,
sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
@@ -7103,52 +8343,8 @@ static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
return cfg_status;
}
-static int hclge_init_umv_space(struct hclge_dev *hdev)
-{
- u16 allocated_size = 0;
- int ret;
-
- ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
- true);
- if (ret)
- return ret;
-
- if (allocated_size < hdev->wanted_umv_size)
- dev_warn(&hdev->pdev->dev,
- "Alloc umv space failed, want %u, get %u\n",
- hdev->wanted_umv_size, allocated_size);
-
- mutex_init(&hdev->umv_mutex);
- hdev->max_umv_size = allocated_size;
- /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
- * preserve some unicast mac vlan table entries shared by pf
- * and its vfs.
- */
- hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
- hdev->share_umv_size = hdev->priv_umv_size +
- hdev->max_umv_size % (hdev->num_req_vfs + 2);
-
- return 0;
-}
-
-static int hclge_uninit_umv_space(struct hclge_dev *hdev)
-{
- int ret;
-
- if (hdev->max_umv_size > 0) {
- ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
- false);
- if (ret)
- return ret;
- hdev->max_umv_size = 0;
- }
- mutex_destroy(&hdev->umv_mutex);
-
- return 0;
-}
-
static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
- u16 *allocated_size, bool is_alloc)
+ u16 *allocated_size)
{
struct hclge_umv_spc_alc_cmd *req;
struct hclge_desc desc;
@@ -7156,21 +8352,42 @@ static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
req = (struct hclge_umv_spc_alc_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
- if (!is_alloc)
- hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
req->space_size = cpu_to_le32(space_size);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
- dev_err(&hdev->pdev->dev,
- "%s umv space failed for cmd_send, ret =%d\n",
- is_alloc ? "allocate" : "free", ret);
+ dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
+ ret);
return ret;
}
- if (is_alloc && allocated_size)
- *allocated_size = le32_to_cpu(desc.data[1]);
+ *allocated_size = le32_to_cpu(desc.data[1]);
+
+ return 0;
+}
+
+static int hclge_init_umv_space(struct hclge_dev *hdev)
+{
+ u16 allocated_size = 0;
+ int ret;
+
+ ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
+ if (ret)
+ return ret;
+
+ if (allocated_size < hdev->wanted_umv_size)
+ dev_warn(&hdev->pdev->dev,
+ "failed to alloc umv space, want %u, get %u\n",
+ hdev->wanted_umv_size, allocated_size);
+
+ hdev->max_umv_size = allocated_size;
+ hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
+ hdev->share_umv_size = hdev->priv_umv_size +
+ hdev->max_umv_size % (hdev->num_alloc_vport + 1);
+
+ if (hdev->ae_dev->dev_specs.mc_mac_size)
+ set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, hdev->ae_dev->caps);
return 0;
}
@@ -7185,21 +8402,27 @@ static void hclge_reset_umv_space(struct hclge_dev *hdev)
vport->used_umv_num = 0;
}
- mutex_lock(&hdev->umv_mutex);
+ mutex_lock(&hdev->vport_lock);
hdev->share_umv_size = hdev->priv_umv_size +
- hdev->max_umv_size % (hdev->num_req_vfs + 2);
- mutex_unlock(&hdev->umv_mutex);
+ hdev->max_umv_size % (hdev->num_alloc_vport + 1);
+ mutex_unlock(&hdev->vport_lock);
+
+ hdev->used_mc_mac_num = 0;
}
-static bool hclge_is_umv_space_full(struct hclge_vport *vport)
+static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
{
struct hclge_dev *hdev = vport->back;
bool is_full;
- mutex_lock(&hdev->umv_mutex);
+ if (need_lock)
+ mutex_lock(&hdev->vport_lock);
+
is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
hdev->share_umv_size == 0);
- mutex_unlock(&hdev->umv_mutex);
+
+ if (need_lock)
+ mutex_unlock(&hdev->vport_lock);
return is_full;
}
@@ -7208,7 +8431,6 @@ static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
{
struct hclge_dev *hdev = vport->back;
- mutex_lock(&hdev->umv_mutex);
if (is_free) {
if (vport->used_umv_num > hdev->priv_umv_size)
hdev->share_umv_size++;
@@ -7221,7 +8443,101 @@ static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
hdev->share_umv_size--;
vport->used_umv_num++;
}
- mutex_unlock(&hdev->umv_mutex);
+}
+
+static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
+ const u8 *mac_addr)
+{
+ struct hclge_mac_node *mac_node, *tmp;
+
+ list_for_each_entry_safe(mac_node, tmp, list, node)
+ if (ether_addr_equal(mac_addr, mac_node->mac_addr))
+ return mac_node;
+
+ return NULL;
+}
+
+static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
+ enum HCLGE_MAC_NODE_STATE state)
+{
+ switch (state) {
+ /* from set_rx_mode or tmp_add_list */
+ case HCLGE_MAC_TO_ADD:
+ if (mac_node->state == HCLGE_MAC_TO_DEL)
+ mac_node->state = HCLGE_MAC_ACTIVE;
+ break;
+ /* only from set_rx_mode */
+ case HCLGE_MAC_TO_DEL:
+ if (mac_node->state == HCLGE_MAC_TO_ADD) {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ } else {
+ mac_node->state = HCLGE_MAC_TO_DEL;
+ }
+ break;
+ /* only from tmp_add_list, the mac_node->state won't be
+ * ACTIVE.
+ */
+ case HCLGE_MAC_ACTIVE:
+ if (mac_node->state == HCLGE_MAC_TO_ADD)
+ mac_node->state = HCLGE_MAC_ACTIVE;
+
+ break;
+ }
+}
+
+int hclge_update_mac_list(struct hclge_vport *vport,
+ enum HCLGE_MAC_NODE_STATE state,
+ enum HCLGE_MAC_ADDR_TYPE mac_type,
+ const unsigned char *addr)
+{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_mac_node *mac_node;
+ struct list_head *list;
+
+ list = (mac_type == HCLGE_MAC_ADDR_UC) ?
+ &vport->uc_mac_list : &vport->mc_mac_list;
+
+ spin_lock_bh(&vport->mac_list_lock);
+
+ /* if the mac addr is already in the mac list, no need to add a new
+ * one into it, just check the mac addr state, convert it to a new
+ * state, or just remove it, or do nothing.
+ */
+ mac_node = hclge_find_mac_node(list, addr);
+ if (mac_node) {
+ hclge_update_mac_node(mac_node, state);
+ spin_unlock_bh(&vport->mac_list_lock);
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
+ return 0;
+ }
+
+ /* if this address is never added, unnecessary to delete */
+ if (state == HCLGE_MAC_TO_DEL) {
+ spin_unlock_bh(&vport->mac_list_lock);
+ hnae3_format_mac_addr(format_mac_addr, addr);
+ dev_err(&hdev->pdev->dev,
+ "failed to delete address %s from mac list\n",
+ format_mac_addr);
+ return -ENOENT;
+ }
+
+ mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
+ if (!mac_node) {
+ spin_unlock_bh(&vport->mac_list_lock);
+ return -ENOMEM;
+ }
+
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
+
+ mac_node->state = state;
+ ether_addr_copy(mac_node->mac_addr, addr);
+ list_add_tail(&mac_node->node, list);
+
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ return 0;
}
static int hclge_add_uc_addr(struct hnae3_handle *handle,
@@ -7229,12 +8545,14 @@ static int hclge_add_uc_addr(struct hnae3_handle *handle,
{
struct hclge_vport *vport = hclge_get_vport(handle);
- return hclge_add_uc_addr_common(vport, addr);
+ return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
+ addr);
}
int hclge_add_uc_addr_common(struct hclge_vport *vport,
const unsigned char *addr)
{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
struct hclge_mac_vlan_tbl_entry_cmd req;
struct hclge_desc desc;
@@ -7245,9 +8563,10 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
if (is_zero_ether_addr(addr) ||
is_broadcast_ether_addr(addr) ||
is_multicast_ether_addr(addr)) {
+ hnae3_format_mac_addr(format_mac_addr, addr);
dev_err(&hdev->pdev->dev,
- "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
- addr, is_zero_ether_addr(addr),
+ "Set_uc mac err! invalid mac:%s. is_zero:%d,is_br=%d,is_mul=%d\n",
+ format_mac_addr, is_zero_ether_addr(addr),
is_broadcast_ether_addr(addr),
is_multicast_ether_addr(addr));
return -EINVAL;
@@ -7268,29 +8587,26 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
*/
ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
if (ret == -ENOENT) {
- if (!hclge_is_umv_space_full(vport)) {
+ mutex_lock(&hdev->vport_lock);
+ if (!hclge_is_umv_space_full(vport, false)) {
ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
if (!ret)
hclge_update_umv_space(vport, false);
+ mutex_unlock(&hdev->vport_lock);
return ret;
}
+ mutex_unlock(&hdev->vport_lock);
- dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
- hdev->priv_umv_size);
+ if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
+ dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
+ hdev->priv_umv_size);
return -ENOSPC;
}
/* check if we just hit the duplicate */
- if (!ret) {
- dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
- vport->vport_id, addr);
- return 0;
- }
-
- dev_err(&hdev->pdev->dev,
- "PF failed to add unicast entry(%pM) in the MAC table\n",
- addr);
+ if (!ret)
+ return -EEXIST;
return ret;
}
@@ -7300,12 +8616,14 @@ static int hclge_rm_uc_addr(struct hnae3_handle *handle,
{
struct hclge_vport *vport = hclge_get_vport(handle);
- return hclge_rm_uc_addr_common(vport, addr);
+ return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
+ addr);
}
int hclge_rm_uc_addr_common(struct hclge_vport *vport,
const unsigned char *addr)
{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
struct hclge_mac_vlan_tbl_entry_cmd req;
int ret;
@@ -7314,8 +8632,9 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport,
if (is_zero_ether_addr(addr) ||
is_broadcast_ether_addr(addr) ||
is_multicast_ether_addr(addr)) {
- dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
- addr);
+ hnae3_format_mac_addr(format_mac_addr, addr);
+ dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%s.\n",
+ format_mac_addr);
return -EINVAL;
}
@@ -7323,8 +8642,12 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport,
hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr, false);
ret = hclge_remove_mac_vlan_tbl(vport, &req);
- if (!ret)
+ if (!ret || ret == -ENOENT) {
+ mutex_lock(&hdev->vport_lock);
hclge_update_umv_space(vport, true);
+ mutex_unlock(&hdev->vport_lock);
+ return 0;
+ }
return ret;
}
@@ -7334,29 +8657,39 @@ static int hclge_add_mc_addr(struct hnae3_handle *handle,
{
struct hclge_vport *vport = hclge_get_vport(handle);
- return hclge_add_mc_addr_common(vport, addr);
+ return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
+ addr);
}
int hclge_add_mc_addr_common(struct hclge_vport *vport,
const unsigned char *addr)
{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
struct hclge_mac_vlan_tbl_entry_cmd req;
struct hclge_desc desc[3];
+ bool is_new_addr = false;
int status;
/* mac addr check */
if (!is_multicast_ether_addr(addr)) {
+ hnae3_format_mac_addr(format_mac_addr, addr);
dev_err(&hdev->pdev->dev,
- "Add mc mac err! invalid mac:%pM.\n",
- addr);
+ "Add mc mac err! invalid mac:%s.\n",
+ format_mac_addr);
return -EINVAL;
}
memset(&req, 0, sizeof(req));
- hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr, true);
status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
if (status) {
+ if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) &&
+ hdev->used_mc_mac_num >=
+ hdev->ae_dev->dev_specs.mc_mac_size)
+ goto err_no_space;
+
+ is_new_addr = true;
+
/* This mac addr do not exist, add new entry for it */
memset(desc[0].data, 0, sizeof(desc[0].data));
memset(desc[1].data, 0, sizeof(desc[0].data));
@@ -7366,11 +8699,21 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
if (status)
return status;
status = hclge_add_mac_vlan_tbl(vport, &req, desc);
-
if (status == -ENOSPC)
- dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
+ goto err_no_space;
+ else if (!status && is_new_addr)
+ hdev->used_mc_mac_num++;
return status;
+
+err_no_space:
+ /* if already overflow, not to print each time */
+ if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE)) {
+ vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
+ dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
+ }
+
+ return -ENOSPC;
}
static int hclge_rm_mc_addr(struct hnae3_handle *handle,
@@ -7378,27 +8721,29 @@ static int hclge_rm_mc_addr(struct hnae3_handle *handle,
{
struct hclge_vport *vport = hclge_get_vport(handle);
- return hclge_rm_mc_addr_common(vport, addr);
+ return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
+ addr);
}
int hclge_rm_mc_addr_common(struct hclge_vport *vport,
const unsigned char *addr)
{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
struct hclge_mac_vlan_tbl_entry_cmd req;
- enum hclge_cmd_status status;
+ enum hclge_comm_cmd_status status;
struct hclge_desc desc[3];
/* mac addr check */
if (!is_multicast_ether_addr(addr)) {
+ hnae3_format_mac_addr(format_mac_addr, addr);
dev_dbg(&hdev->pdev->dev,
- "Remove mc mac err! invalid mac:%pM.\n",
- addr);
+ "Remove mc mac err! invalid mac:%s.\n",
+ format_mac_addr);
return -EINVAL;
}
memset(&req, 0, sizeof(req));
- hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr, true);
status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
if (!status) {
@@ -7407,118 +8752,384 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
if (status)
return status;
- if (hclge_is_all_function_id_zero(desc))
+ if (hclge_is_all_function_id_zero(desc)) {
/* All the vfid is zero, so need to delete this entry */
status = hclge_remove_mac_vlan_tbl(vport, &req);
- else
+ if (!status)
+ hdev->used_mc_mac_num--;
+ } else {
/* Not all the vfid is zero, update the vfid */
status = hclge_add_mac_vlan_tbl(vport, &req, desc);
-
- } else {
- /* Maybe this mac address is in mta table, but it cannot be
- * deleted here because an entry of mta represents an address
- * range rather than a specific address. the delete action to
- * all entries will take effect in update_mta_status called by
- * hns3_nic_set_rx_mode.
- */
+ }
+ } else if (status == -ENOENT) {
status = 0;
}
return status;
}
-void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
- enum HCLGE_MAC_ADDR_TYPE mac_type)
+static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
+ struct list_head *list,
+ enum HCLGE_MAC_ADDR_TYPE mac_type)
{
- struct hclge_vport_mac_addr_cfg *mac_cfg;
- struct list_head *list;
+ int (*sync)(struct hclge_vport *vport, const unsigned char *addr);
+ struct hclge_mac_node *mac_node, *tmp;
+ int ret;
- if (!vport->vport_id)
- return;
+ if (mac_type == HCLGE_MAC_ADDR_UC)
+ sync = hclge_add_uc_addr_common;
+ else
+ sync = hclge_add_mc_addr_common;
- mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
- if (!mac_cfg)
- return;
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ ret = sync(vport, mac_node->mac_addr);
+ if (!ret) {
+ mac_node->state = HCLGE_MAC_ACTIVE;
+ } else {
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
+ &vport->state);
+
+ /* If one unicast mac address is existing in hardware,
+ * we need to try whether other unicast mac addresses
+ * are new addresses that can be added.
+ * Multicast mac address can be reusable, even though
+ * there is no space to add new multicast mac address,
+ * we should check whether other mac addresses are
+ * existing in hardware for reuse.
+ */
+ if ((mac_type == HCLGE_MAC_ADDR_UC && ret != -EEXIST) ||
+ (mac_type == HCLGE_MAC_ADDR_MC && ret != -ENOSPC))
+ break;
+ }
+ }
+}
- mac_cfg->hd_tbl_status = true;
- memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
+static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
+ struct list_head *list,
+ enum HCLGE_MAC_ADDR_TYPE mac_type)
+{
+ int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
+ struct hclge_mac_node *mac_node, *tmp;
+ int ret;
- list = (mac_type == HCLGE_MAC_ADDR_UC) ?
- &vport->uc_mac_list : &vport->mc_mac_list;
+ if (mac_type == HCLGE_MAC_ADDR_UC)
+ unsync = hclge_rm_uc_addr_common;
+ else
+ unsync = hclge_rm_mc_addr_common;
- list_add_tail(&mac_cfg->node, list);
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ ret = unsync(vport, mac_node->mac_addr);
+ if (!ret || ret == -ENOENT) {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ } else {
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
+ &vport->state);
+ break;
+ }
+ }
}
-void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
- bool is_write_tbl,
- enum HCLGE_MAC_ADDR_TYPE mac_type)
+static bool hclge_sync_from_add_list(struct list_head *add_list,
+ struct list_head *mac_list)
{
- struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
+ struct hclge_mac_node *mac_node, *tmp, *new_node;
+ bool all_added = true;
+
+ list_for_each_entry_safe(mac_node, tmp, add_list, node) {
+ if (mac_node->state == HCLGE_MAC_TO_ADD)
+ all_added = false;
+
+ /* if the mac address from tmp_add_list is not in the
+ * uc/mc_mac_list, it means have received a TO_DEL request
+ * during the time window of adding the mac address into mac
+ * table. if mac_node state is ACTIVE, then change it to TO_DEL,
+ * then it will be removed at next time. else it must be TO_ADD,
+ * this address hasn't been added into mac table,
+ * so just remove the mac node.
+ */
+ new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
+ if (new_node) {
+ hclge_update_mac_node(new_node, mac_node->state);
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
+ mac_node->state = HCLGE_MAC_TO_DEL;
+ list_move_tail(&mac_node->node, mac_list);
+ } else {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ }
+ }
+
+ return all_added;
+}
+
+static void hclge_sync_from_del_list(struct list_head *del_list,
+ struct list_head *mac_list)
+{
+ struct hclge_mac_node *mac_node, *tmp, *new_node;
+
+ list_for_each_entry_safe(mac_node, tmp, del_list, node) {
+ new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
+ if (new_node) {
+ /* If the mac addr exists in the mac list, it means
+ * received a new TO_ADD request during the time window
+ * of configuring the mac address. For the mac node
+ * state is TO_ADD, and the address is already in the
+ * in the hardware(due to delete fail), so we just need
+ * to change the mac node state to ACTIVE.
+ */
+ new_node->state = HCLGE_MAC_ACTIVE;
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ } else {
+ list_move_tail(&mac_node->node, mac_list);
+ }
+ }
+}
+
+static void hclge_update_overflow_flags(struct hclge_vport *vport,
+ enum HCLGE_MAC_ADDR_TYPE mac_type,
+ bool is_all_added)
+{
+ if (mac_type == HCLGE_MAC_ADDR_UC) {
+ if (is_all_added)
+ vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
+ else
+ vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
+ } else {
+ if (is_all_added)
+ vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
+ else
+ vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
+ }
+}
+
+static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
+ enum HCLGE_MAC_ADDR_TYPE mac_type)
+{
+ struct hclge_mac_node *mac_node, *tmp, *new_node;
+ struct list_head tmp_add_list, tmp_del_list;
struct list_head *list;
- bool uc_flag, mc_flag;
+ bool all_added;
+
+ INIT_LIST_HEAD(&tmp_add_list);
+ INIT_LIST_HEAD(&tmp_del_list);
+ /* move the mac addr to the tmp_add_list and tmp_del_list, then
+ * we can add/delete these mac addr outside the spin lock
+ */
list = (mac_type == HCLGE_MAC_ADDR_UC) ?
- &vport->uc_mac_list : &vport->mc_mac_list;
+ &vport->uc_mac_list : &vport->mc_mac_list;
- uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
- mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
+ spin_lock_bh(&vport->mac_list_lock);
- list_for_each_entry_safe(mac_cfg, tmp, list, node) {
- if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) {
- if (uc_flag && mac_cfg->hd_tbl_status)
- hclge_rm_uc_addr_common(vport, mac_addr);
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ switch (mac_node->state) {
+ case HCLGE_MAC_TO_DEL:
+ list_move_tail(&mac_node->node, &tmp_del_list);
+ break;
+ case HCLGE_MAC_TO_ADD:
+ new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
+ if (!new_node)
+ goto stop_traverse;
+ ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
+ new_node->state = mac_node->state;
+ list_add_tail(&new_node->node, &tmp_add_list);
+ break;
+ default:
+ break;
+ }
+ }
+
+stop_traverse:
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ /* delete first, in order to get max mac table space for adding */
+ hclge_unsync_vport_mac_list(vport, &tmp_del_list, mac_type);
+ hclge_sync_vport_mac_list(vport, &tmp_add_list, mac_type);
+
+ /* if some mac addresses were added/deleted fail, move back to the
+ * mac_list, and retry at next time.
+ */
+ spin_lock_bh(&vport->mac_list_lock);
+
+ hclge_sync_from_del_list(&tmp_del_list, list);
+ all_added = hclge_sync_from_add_list(&tmp_add_list, list);
+
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ hclge_update_overflow_flags(vport, mac_type, all_added);
+}
+
+static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
+{
+ struct hclge_dev *hdev = vport->back;
+
+ if (test_bit(vport->vport_id, hdev->vport_config_block))
+ return false;
+
+ if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
+ return true;
+
+ return false;
+}
+
+static void hclge_sync_mac_table(struct hclge_dev *hdev)
+{
+ int i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ struct hclge_vport *vport = &hdev->vport[i];
+
+ if (!hclge_need_sync_mac_table(vport))
+ continue;
- if (mc_flag && mac_cfg->hd_tbl_status)
- hclge_rm_mc_addr_common(vport, mac_addr);
+ hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
+ hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
+ }
+}
- list_del(&mac_cfg->node);
- kfree(mac_cfg);
+static void hclge_build_del_list(struct list_head *list,
+ bool is_del_list,
+ struct list_head *tmp_del_list)
+{
+ struct hclge_mac_node *mac_cfg, *tmp;
+
+ list_for_each_entry_safe(mac_cfg, tmp, list, node) {
+ switch (mac_cfg->state) {
+ case HCLGE_MAC_TO_DEL:
+ case HCLGE_MAC_ACTIVE:
+ list_move_tail(&mac_cfg->node, tmp_del_list);
+ break;
+ case HCLGE_MAC_TO_ADD:
+ if (is_del_list) {
+ list_del(&mac_cfg->node);
+ kfree(mac_cfg);
+ }
break;
}
}
}
+static void hclge_unsync_del_list(struct hclge_vport *vport,
+ int (*unsync)(struct hclge_vport *vport,
+ const unsigned char *addr),
+ bool is_del_list,
+ struct list_head *tmp_del_list)
+{
+ struct hclge_mac_node *mac_cfg, *tmp;
+ int ret;
+
+ list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
+ ret = unsync(vport, mac_cfg->mac_addr);
+ if (!ret || ret == -ENOENT) {
+ /* clear all mac addr from hardware, but remain these
+ * mac addr in the mac list, and restore them after
+ * vf reset finished.
+ */
+ if (!is_del_list &&
+ mac_cfg->state == HCLGE_MAC_ACTIVE) {
+ mac_cfg->state = HCLGE_MAC_TO_ADD;
+ } else {
+ list_del(&mac_cfg->node);
+ kfree(mac_cfg);
+ }
+ } else if (is_del_list) {
+ mac_cfg->state = HCLGE_MAC_TO_DEL;
+ }
+ }
+}
+
void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
enum HCLGE_MAC_ADDR_TYPE mac_type)
{
- struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
- struct list_head *list;
+ int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
+ struct hclge_dev *hdev = vport->back;
+ struct list_head tmp_del_list, *list;
- list = (mac_type == HCLGE_MAC_ADDR_UC) ?
- &vport->uc_mac_list : &vport->mc_mac_list;
+ if (mac_type == HCLGE_MAC_ADDR_UC) {
+ list = &vport->uc_mac_list;
+ unsync = hclge_rm_uc_addr_common;
+ } else {
+ list = &vport->mc_mac_list;
+ unsync = hclge_rm_mc_addr_common;
+ }
- list_for_each_entry_safe(mac_cfg, tmp, list, node) {
- if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
- hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
+ INIT_LIST_HEAD(&tmp_del_list);
- if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
- hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
+ if (!is_del_list)
+ set_bit(vport->vport_id, hdev->vport_config_block);
- mac_cfg->hd_tbl_status = false;
- if (is_del_list) {
- list_del(&mac_cfg->node);
- kfree(mac_cfg);
+ spin_lock_bh(&vport->mac_list_lock);
+
+ hclge_build_del_list(list, is_del_list, &tmp_del_list);
+
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
+
+ spin_lock_bh(&vport->mac_list_lock);
+
+ hclge_sync_from_del_list(&tmp_del_list, list);
+
+ spin_unlock_bh(&vport->mac_list_lock);
+}
+
+/* remove all mac address when uninitailize */
+static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
+ enum HCLGE_MAC_ADDR_TYPE mac_type)
+{
+ struct hclge_mac_node *mac_node, *tmp;
+ struct hclge_dev *hdev = vport->back;
+ struct list_head tmp_del_list, *list;
+
+ INIT_LIST_HEAD(&tmp_del_list);
+
+ list = (mac_type == HCLGE_MAC_ADDR_UC) ?
+ &vport->uc_mac_list : &vport->mc_mac_list;
+
+ spin_lock_bh(&vport->mac_list_lock);
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ switch (mac_node->state) {
+ case HCLGE_MAC_TO_DEL:
+ case HCLGE_MAC_ACTIVE:
+ list_move_tail(&mac_node->node, &tmp_del_list);
+ break;
+ case HCLGE_MAC_TO_ADD:
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ break;
}
}
+
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ hclge_unsync_vport_mac_list(vport, &tmp_del_list, mac_type);
+
+ if (!list_empty(&tmp_del_list))
+ dev_warn(&hdev->pdev->dev,
+ "uninit %s mac list for vport %u not completely.\n",
+ mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
+ vport->vport_id);
+
+ list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ }
}
-void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
+static void hclge_uninit_mac_table(struct hclge_dev *hdev)
{
- struct hclge_vport_mac_addr_cfg *mac, *tmp;
struct hclge_vport *vport;
int i;
for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i];
- list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
- list_del(&mac->node);
- kfree(mac);
- }
-
- list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
- list_del(&mac->node);
- kfree(mac);
- }
+ hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
+ hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
}
}
@@ -7564,65 +9175,42 @@ static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
return return_status;
}
-static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
- u8 *mac_addr)
-{
- struct hclge_mac_vlan_tbl_entry_cmd req;
- struct hclge_dev *hdev = vport->back;
- struct hclge_desc desc;
- u16 egress_port = 0;
- int i;
-
- if (is_zero_ether_addr(mac_addr))
- return false;
-
- memset(&req, 0, sizeof(req));
- hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
- HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
- req.egress_port = cpu_to_le16(egress_port);
- hclge_prepare_mac_addr(&req, mac_addr, false);
-
- if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
- return true;
-
- vf_idx += HCLGE_VF_VPORT_START_NUM;
- for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
- if (i != vf_idx &&
- ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
- return true;
-
- return false;
-}
-
static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
u8 *mac_addr)
{
struct hclge_vport *vport = hclge_get_vport(handle);
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
vport = hclge_get_vf_vport(hdev, vf);
if (!vport)
return -EINVAL;
+ hnae3_format_mac_addr(format_mac_addr, mac_addr);
if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
dev_info(&hdev->pdev->dev,
- "Specified MAC(=%pM) is same as before, no change committed!\n",
- mac_addr);
+ "Specified MAC(=%s) is same as before, no change committed!\n",
+ format_mac_addr);
return 0;
}
- if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
- dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
- mac_addr);
- return -EEXIST;
- }
-
ether_addr_copy(vport->vf_info.mac, mac_addr);
- dev_info(&hdev->pdev->dev,
- "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
- vf, mac_addr);
- return hclge_inform_reset_assert_to_vf(vport);
+ /* there is a timewindow for PF to know VF unalive, it may
+ * cause send mailbox fail, but it doesn't matter, VF will
+ * query it when reinit.
+ */
+ if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
+ dev_info(&hdev->pdev->dev,
+ "MAC of VF %d has been set to %s, and it will be reinitialized!\n",
+ vf, format_mac_addr);
+ (void)hclge_inform_reset_assert_to_vf(vport);
+ return 0;
+ }
+
+ dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %s\n",
+ vf, format_mac_addr);
+ return 0;
}
static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
@@ -7676,68 +9264,171 @@ static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
ether_addr_copy(p, hdev->hw.mac.mac_addr);
}
-static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
+int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
+ const u8 *old_addr, const u8 *new_addr)
+{
+ struct list_head *list = &vport->uc_mac_list;
+ struct hclge_mac_node *old_node, *new_node;
+
+ new_node = hclge_find_mac_node(list, new_addr);
+ if (!new_node) {
+ new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
+ if (!new_node)
+ return -ENOMEM;
+
+ new_node->state = HCLGE_MAC_TO_ADD;
+ ether_addr_copy(new_node->mac_addr, new_addr);
+ list_add(&new_node->node, list);
+ } else {
+ if (new_node->state == HCLGE_MAC_TO_DEL)
+ new_node->state = HCLGE_MAC_ACTIVE;
+
+ /* make sure the new addr is in the list head, avoid dev
+ * addr may be not re-added into mac table for the umv space
+ * limitation after global/imp reset which will clear mac
+ * table by hardware.
+ */
+ list_move(&new_node->node, list);
+ }
+
+ if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
+ old_node = hclge_find_mac_node(list, old_addr);
+ if (old_node) {
+ if (old_node->state == HCLGE_MAC_TO_ADD) {
+ list_del(&old_node->node);
+ kfree(old_node);
+ } else {
+ old_node->state = HCLGE_MAC_TO_DEL;
+ }
+ }
+ }
+
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
+
+ return 0;
+}
+
+static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p,
bool is_first)
{
const unsigned char *new_addr = (const unsigned char *)p;
struct hclge_vport *vport = hclge_get_vport(handle);
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
+ unsigned char *old_addr = NULL;
int ret;
/* mac addr check */
if (is_zero_ether_addr(new_addr) ||
is_broadcast_ether_addr(new_addr) ||
is_multicast_ether_addr(new_addr)) {
+ hnae3_format_mac_addr(format_mac_addr, new_addr);
dev_err(&hdev->pdev->dev,
- "Change uc mac err! invalid mac:%pM.\n",
- new_addr);
+ "change uc mac err! invalid mac: %s.\n",
+ format_mac_addr);
return -EINVAL;
}
- if ((!is_first || is_kdump_kernel()) &&
- hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
- dev_warn(&hdev->pdev->dev,
- "remove old uc mac address fail.\n");
-
- ret = hclge_add_uc_addr(handle, new_addr);
+ ret = hclge_pause_addr_cfg(hdev, new_addr);
if (ret) {
dev_err(&hdev->pdev->dev,
- "add uc mac address fail, ret =%d.\n",
+ "failed to configure mac pause address, ret = %d\n",
ret);
-
- if (!is_first &&
- hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
- dev_err(&hdev->pdev->dev,
- "restore uc mac address fail.\n");
-
- return -EIO;
+ return ret;
}
- ret = hclge_pause_addr_cfg(hdev, new_addr);
+ if (!is_first)
+ old_addr = hdev->hw.mac.mac_addr;
+
+ spin_lock_bh(&vport->mac_list_lock);
+ ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
if (ret) {
+ hnae3_format_mac_addr(format_mac_addr, new_addr);
dev_err(&hdev->pdev->dev,
- "configure mac pause address fail, ret =%d.\n",
- ret);
- return -EIO;
- }
+ "failed to change the mac addr:%s, ret = %d\n",
+ format_mac_addr, ret);
+ spin_unlock_bh(&vport->mac_list_lock);
+ if (!is_first)
+ hclge_pause_addr_cfg(hdev, old_addr);
+
+ return ret;
+ }
+ /* we must update dev addr with spin lock protect, preventing dev addr
+ * being removed by set_rx_mode path.
+ */
ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ hclge_task_schedule(hdev, 0);
return 0;
}
+static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
+{
+ struct mii_ioctl_data *data = if_mii(ifr);
+
+ if (!hnae3_dev_phy_imp_supported(hdev))
+ return -EOPNOTSUPP;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = hdev->hw.mac.phy_addr;
+ /* this command reads phy id and register at the same time */
+ fallthrough;
+ case SIOCGMIIREG:
+ data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
+ return 0;
+
+ case SIOCSMIIREG:
+ return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
int cmd)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- if (!hdev->hw.mac.phydev)
- return -EOPNOTSUPP;
+ switch (cmd) {
+ case SIOCGHWTSTAMP:
+ return hclge_ptp_get_cfg(hdev, ifr);
+ case SIOCSHWTSTAMP:
+ return hclge_ptp_set_cfg(hdev, ifr);
+ default:
+ if (!hdev->hw.mac.phydev)
+ return hclge_mii_ioctl(hdev, ifr, cmd);
+ }
return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
}
+static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id,
+ bool bypass_en)
+{
+ struct hclge_port_vlan_filter_bypass_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false);
+ req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
+ req->vf_id = vf_id;
+ hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B,
+ bypass_en ? 1 : 0);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to set vport%u port vlan filter bypass state, ret = %d.\n",
+ vf_id, ret);
+
+ return ret;
+}
+
static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
u8 fe_type, bool filter_en, u8 vf_id)
{
@@ -7753,89 +9444,139 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
- dev_err(&hdev->pdev->dev,
- "failed to get vlan filter config, ret = %d.\n", ret);
+ dev_err(&hdev->pdev->dev, "failed to get vport%u vlan filter config, ret = %d.\n",
+ vf_id, ret);
return ret;
}
/* modify and write new config parameter */
- hclge_cmd_reuse_desc(&desc, false);
+ hclge_comm_cmd_reuse_desc(&desc, false);
req->vlan_fe = filter_en ?
(req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
- dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
- ret);
+ dev_err(&hdev->pdev->dev, "failed to set vport%u vlan filter, ret = %d.\n",
+ vf_id, ret);
return ret;
}
-#define HCLGE_FILTER_TYPE_VF 0
-#define HCLGE_FILTER_TYPE_PORT 1
-#define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
-#define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
-#define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
-#define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
-#define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
-#define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
- | HCLGE_FILTER_FE_ROCE_EGRESS_B)
-#define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
- | HCLGE_FILTER_FE_ROCE_INGRESS_B)
+static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable)
+{
+ struct hclge_dev *hdev = vport->back;
+ struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
+ int ret;
+
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_EGRESS_V1_B,
+ enable, vport->vport_id);
-static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
+ ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_EGRESS, enable,
+ vport->vport_id);
+ if (ret)
+ return ret;
+
+ if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) {
+ ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
+ !enable);
+ } else if (!vport->vport_id) {
+ if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
+ enable = false;
+
+ ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
+ HCLGE_FILTER_FE_INGRESS,
+ enable, 0);
+ }
+
+ return ret;
+}
+
+static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
{
- struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hnae3_handle *handle = &vport->nic;
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
struct hclge_dev *hdev = vport->back;
- if (hdev->pdev->revision >= 0x21) {
- hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
- HCLGE_FILTER_FE_EGRESS, enable, 0);
- hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
- HCLGE_FILTER_FE_INGRESS, enable, 0);
- } else {
- hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
- HCLGE_FILTER_FE_EGRESS_V1_B, enable,
- 0);
+ if (vport->vport_id) {
+ if (vport->port_base_vlan_cfg.state !=
+ HNAE3_PORT_BASE_VLAN_DISABLE)
+ return true;
+
+ if (vport->vf_info.trusted && vport->vf_info.request_uc_en)
+ return false;
+ } else if (handle->netdev_flags & HNAE3_USER_UPE) {
+ return false;
}
- if (enable)
- handle->netdev_flags |= HNAE3_VLAN_FLTR;
- else
- handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
+
+ if (!vport->req_vlan_fltr_en)
+ return false;
+
+ /* compatible with former device, always enable vlan filter */
+ if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
+ return true;
+
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
+ if (vlan->vlan_id != 0)
+ return true;
+
+ return false;
}
-static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
- bool is_kill, u16 vlan,
- __be16 proto)
+int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
+{
+ struct hclge_dev *hdev = vport->back;
+ bool need_en;
+ int ret;
+
+ mutex_lock(&hdev->vport_lock);
+
+ vport->req_vlan_fltr_en = request_en;
+
+ need_en = hclge_need_enable_vport_vlan_filter(vport);
+ if (need_en == vport->cur_vlan_fltr_en) {
+ mutex_unlock(&hdev->vport_lock);
+ return 0;
+ }
+
+ ret = hclge_set_vport_vlan_filter(vport, need_en);
+ if (ret) {
+ mutex_unlock(&hdev->vport_lock);
+ return ret;
+ }
+
+ vport->cur_vlan_fltr_en = need_en;
+
+ mutex_unlock(&hdev->vport_lock);
+
+ return 0;
+}
+
+static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ return hclge_enable_vport_vlan_filter(vport, enable);
+}
+
+static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
+ bool is_kill, u16 vlan,
+ struct hclge_desc *desc)
{
- struct hclge_vport *vport = &hdev->vport[vfid];
struct hclge_vlan_filter_vf_cfg_cmd *req0;
struct hclge_vlan_filter_vf_cfg_cmd *req1;
- struct hclge_desc desc[2];
u8 vf_byte_val;
u8 vf_byte_off;
int ret;
- /* if vf vlan table is full, firmware will close vf vlan filter, it
- * is unable and unnecessary to add new vlan id to vf vlan filter.
- * If spoof check is enable, and vf vlan is full, it shouldn't add
- * new vlan, because tx packets with these vlan id will be dropped.
- */
- if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
- if (vport->vf_info.spoofchk && vlan) {
- dev_err(&hdev->pdev->dev,
- "Can't add vlan due to spoof check is on and vf vlan table is full\n");
- return -EPERM;
- }
- return 0;
- }
-
hclge_cmd_setup_basic_desc(&desc[0],
HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
hclge_cmd_setup_basic_desc(&desc[1],
HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
vf_byte_off = vfid / 8;
vf_byte_val = 1 << (vfid % 8);
@@ -7859,12 +9600,22 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
return ret;
}
+ return 0;
+}
+
+static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
+ bool is_kill, struct hclge_desc *desc)
+{
+ struct hclge_vlan_filter_vf_cfg_cmd *req;
+
+ req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
+
if (!is_kill) {
#define HCLGE_VF_VLAN_NO_ENTRY 2
- if (!req0->resp_code || req0->resp_code == 1)
+ if (!req->resp_code || req->resp_code == 1)
return 0;
- if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
+ if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
set_bit(vfid, hdev->vf_vlan_full);
dev_warn(&hdev->pdev->dev,
"vf vlan table is full, vf vlan filter is disabled\n");
@@ -7873,10 +9624,10 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
dev_err(&hdev->pdev->dev,
"Add vf vlan filter fail, ret =%u.\n",
- req0->resp_code);
+ req->resp_code);
} else {
#define HCLGE_VF_VLAN_DEL_NO_FOUND 1
- if (!req0->resp_code)
+ if (!req->resp_code)
return 0;
/* vf vlan filter is disabled when vf vlan table is full,
@@ -7884,17 +9635,45 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
* Just return 0 without warning, avoid massive verbose
* print logs when unload.
*/
- if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
+ if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
return 0;
dev_err(&hdev->pdev->dev,
"Kill vf vlan filter fail, ret =%u.\n",
- req0->resp_code);
+ req->resp_code);
}
return -EIO;
}
+static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
+ bool is_kill, u16 vlan)
+{
+ struct hclge_vport *vport = &hdev->vport[vfid];
+ struct hclge_desc desc[2];
+ int ret;
+
+ /* if vf vlan table is full, firmware will close vf vlan filter, it
+ * is unable and unnecessary to add new vlan id to vf vlan filter.
+ * If spoof check is enable, and vf vlan is full, it shouldn't add
+ * new vlan, because tx packets with these vlan id will be dropped.
+ */
+ if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
+ if (vport->vf_info.spoofchk && vlan) {
+ dev_err(&hdev->pdev->dev,
+ "Can't add vlan due to spoof check is on and vf vlan table is full\n");
+ return -EPERM;
+ }
+ return 0;
+ }
+
+ ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
+ if (ret)
+ return ret;
+
+ return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
+}
+
static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
u16 vlan_id, bool is_kill)
{
@@ -7924,6 +9703,32 @@ static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
return ret;
}
+static bool hclge_need_update_port_vlan(struct hclge_dev *hdev, u16 vport_id,
+ u16 vlan_id, bool is_kill)
+{
+ /* vlan 0 may be added twice when 8021q module is enabled */
+ if (!is_kill && !vlan_id &&
+ test_bit(vport_id, hdev->vlan_table[vlan_id]))
+ return false;
+
+ if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
+ dev_warn(&hdev->pdev->dev,
+ "Add port vlan failed, vport %u is already in vlan %u\n",
+ vport_id, vlan_id);
+ return false;
+ }
+
+ if (is_kill &&
+ !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
+ dev_warn(&hdev->pdev->dev,
+ "Delete port vlan failed, vport %u is not in vlan %u\n",
+ vport_id, vlan_id);
+ return false;
+ }
+
+ return true;
+}
+
static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
u16 vport_id, u16 vlan_id,
bool is_kill)
@@ -7934,8 +9739,10 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
if (is_kill && !vlan_id)
return 0;
- ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
- proto);
+ if (vlan_id >= VLAN_N_VID)
+ return -EINVAL;
+
+ ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
if (ret) {
dev_err(&hdev->pdev->dev,
"Set %u vport vlan filter config fail, ret =%d.\n",
@@ -7943,26 +9750,9 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
return ret;
}
- /* vlan 0 may be added twice when 8021q module is enabled */
- if (!is_kill && !vlan_id &&
- test_bit(vport_id, hdev->vlan_table[vlan_id]))
+ if (!hclge_need_update_port_vlan(hdev, vport_id, vlan_id, is_kill))
return 0;
- if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
- dev_err(&hdev->pdev->dev,
- "Add port vlan failed, vport %u is already in vlan %u\n",
- vport_id, vlan_id);
- return -EINVAL;
- }
-
- if (is_kill &&
- !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
- dev_err(&hdev->pdev->dev,
- "Delete port vlan failed, vport %u is not in vlan %u\n",
- vport_id, vlan_id);
- return -EINVAL;
- }
-
for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
vport_num++;
@@ -7999,6 +9789,8 @@ static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
vcfg->insert_tag1_en ? 1 : 0);
hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
vcfg->insert_tag2_en ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
+ vcfg->tag_shift_mode_en ? 1 : 0);
hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
@@ -8036,6 +9828,10 @@ static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
vcfg->vlan1_vlan_prionly ? 1 : 0);
hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
vcfg->vlan2_vlan_prionly ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
+ vcfg->strip_tag1_discard_en ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
+ vcfg->strip_tag2_discard_en ? 1 : 0);
req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
@@ -8054,7 +9850,7 @@ static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
u16 port_base_vlan_state,
- u16 vlan_tag)
+ u16 vlan_tag, u8 qos)
{
int ret;
@@ -8063,9 +9859,13 @@ static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
vport->txvlan_cfg.insert_tag1_en = false;
vport->txvlan_cfg.default_tag1 = 0;
} else {
- vport->txvlan_cfg.accept_tag1 = false;
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
+
+ vport->txvlan_cfg.accept_tag1 =
+ ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
vport->txvlan_cfg.insert_tag1_en = true;
- vport->txvlan_cfg.default_tag1 = vlan_tag;
+ vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) |
+ vlan_tag;
}
vport->txvlan_cfg.accept_untag1 = true;
@@ -8078,16 +9878,21 @@ static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
vport->txvlan_cfg.accept_untag2 = true;
vport->txvlan_cfg.insert_tag2_en = false;
vport->txvlan_cfg.default_tag2 = 0;
+ vport->txvlan_cfg.tag_shift_mode_en = true;
if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
vport->rxvlan_cfg.strip_tag1_en = false;
vport->rxvlan_cfg.strip_tag2_en =
vport->rxvlan_cfg.rx_vlan_offload_en;
+ vport->rxvlan_cfg.strip_tag2_discard_en = false;
} else {
vport->rxvlan_cfg.strip_tag1_en =
vport->rxvlan_cfg.rx_vlan_offload_en;
vport->rxvlan_cfg.strip_tag2_en = true;
+ vport->rxvlan_cfg.strip_tag2_discard_en = true;
}
+
+ vport->rxvlan_cfg.strip_tag1_discard_en = false;
vport->rxvlan_cfg.vlan1_vlan_prionly = false;
vport->rxvlan_cfg.vlan2_vlan_prionly = false;
@@ -8139,66 +9944,80 @@ static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
return status;
}
-static int hclge_init_vlan_config(struct hclge_dev *hdev)
+static int hclge_init_vlan_filter(struct hclge_dev *hdev)
{
-#define HCLGE_DEF_VLAN_TYPE 0x8100
-
- struct hnae3_handle *handle = &hdev->vport[0].nic;
struct hclge_vport *vport;
int ret;
int i;
- if (hdev->pdev->revision >= 0x21) {
- /* for revision 0x21, vf vlan filter is per function */
- for (i = 0; i < hdev->num_alloc_vport; i++) {
- vport = &hdev->vport[i];
- ret = hclge_set_vlan_filter_ctrl(hdev,
- HCLGE_FILTER_TYPE_VF,
- HCLGE_FILTER_FE_EGRESS,
- true,
- vport->vport_id);
- if (ret)
- return ret;
- }
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_EGRESS_V1_B,
+ true, 0);
- ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
- HCLGE_FILTER_FE_INGRESS, true,
- 0);
- if (ret)
- return ret;
- } else {
+ /* for revision 0x21, vf vlan filter is per function */
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
- HCLGE_FILTER_FE_EGRESS_V1_B,
- true, 0);
+ HCLGE_FILTER_FE_EGRESS, true,
+ vport->vport_id);
if (ret)
return ret;
+ vport->cur_vlan_fltr_en = true;
}
- handle->netdev_flags |= HNAE3_VLAN_FLTR;
+ return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
+ HCLGE_FILTER_FE_INGRESS, true, 0);
+}
- hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
- hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
- hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
- hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
- hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
- hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
+static int hclge_init_vlan_type(struct hclge_dev *hdev)
+{
+ hdev->vlan_type_cfg.rx_in_fst_vlan_type = ETH_P_8021Q;
+ hdev->vlan_type_cfg.rx_in_sec_vlan_type = ETH_P_8021Q;
+ hdev->vlan_type_cfg.rx_ot_fst_vlan_type = ETH_P_8021Q;
+ hdev->vlan_type_cfg.rx_ot_sec_vlan_type = ETH_P_8021Q;
+ hdev->vlan_type_cfg.tx_ot_vlan_type = ETH_P_8021Q;
+ hdev->vlan_type_cfg.tx_in_vlan_type = ETH_P_8021Q;
- ret = hclge_set_vlan_protocol_type(hdev);
- if (ret)
- return ret;
+ return hclge_set_vlan_protocol_type(hdev);
+}
- for (i = 0; i < hdev->num_alloc_vport; i++) {
- u16 vlan_tag;
+static int hclge_init_vport_vlan_offload(struct hclge_dev *hdev)
+{
+ struct hclge_port_base_vlan_config *cfg;
+ struct hclge_vport *vport;
+ int ret;
+ int i;
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i];
- vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
+ cfg = &vport->port_base_vlan_cfg;
- ret = hclge_vlan_offload_cfg(vport,
- vport->port_base_vlan_cfg.state,
- vlan_tag);
+ ret = hclge_vlan_offload_cfg(vport, cfg->state,
+ cfg->vlan_info.vlan_tag,
+ cfg->vlan_info.qos);
if (ret)
return ret;
}
+ return 0;
+}
+
+static int hclge_init_vlan_config(struct hclge_dev *hdev)
+{
+ struct hnae3_handle *handle = &hdev->vport[0].nic;
+ int ret;
+
+ ret = hclge_init_vlan_filter(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclge_init_vlan_type(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclge_init_vport_vlan_offload(hdev);
+ if (ret)
+ return ret;
return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
}
@@ -8206,16 +10025,29 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
bool writen_to_tbl)
{
- struct hclge_vport_vlan_cfg *vlan;
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
+ struct hclge_dev *hdev = vport->back;
+
+ mutex_lock(&hdev->vport_lock);
+
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
+ if (vlan->vlan_id == vlan_id) {
+ mutex_unlock(&hdev->vport_lock);
+ return;
+ }
+ }
vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
- if (!vlan)
+ if (!vlan) {
+ mutex_unlock(&hdev->vport_lock);
return;
+ }
vlan->hd_tbl_status = writen_to_tbl;
vlan->vlan_id = vlan_id;
list_add_tail(&vlan->node, &vport->vlan_list);
+ mutex_unlock(&hdev->vport_lock);
}
static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
@@ -8224,6 +10056,8 @@ static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
struct hclge_dev *hdev = vport->back;
int ret;
+ mutex_lock(&hdev->vport_lock);
+
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
if (!vlan->hd_tbl_status) {
ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
@@ -8233,12 +10067,16 @@ static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
dev_err(&hdev->pdev->dev,
"restore vport vlan list failed, ret=%d\n",
ret);
+
+ mutex_unlock(&hdev->vport_lock);
return ret;
}
}
vlan->hd_tbl_status = true;
}
+ mutex_unlock(&hdev->vport_lock);
+
return 0;
}
@@ -8248,6 +10086,8 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
struct hclge_vport_vlan_cfg *vlan, *tmp;
struct hclge_dev *hdev = vport->back;
+ mutex_lock(&hdev->vport_lock);
+
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
if (vlan->vlan_id == vlan_id) {
if (is_write_tbl && vlan->hd_tbl_status)
@@ -8262,6 +10102,8 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
break;
}
}
+
+ mutex_unlock(&hdev->vport_lock);
}
void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
@@ -8269,6 +10111,8 @@ void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
struct hclge_vport_vlan_cfg *vlan, *tmp;
struct hclge_dev *hdev = vport->back;
+ mutex_lock(&hdev->vport_lock);
+
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
if (vlan->hd_tbl_status)
hclge_set_vlan_filter_hw(hdev,
@@ -8284,6 +10128,7 @@ void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
}
}
clear_bit(vport->vport_id, hdev->vf_vlan_full);
+ mutex_unlock(&hdev->vport_lock);
}
void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
@@ -8292,6 +10137,8 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
struct hclge_vport *vport;
int i;
+ mutex_lock(&hdev->vport_lock);
+
for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i];
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
@@ -8299,44 +10146,106 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
kfree(vlan);
}
}
+
+ mutex_unlock(&hdev->vport_lock);
}
-static void hclge_restore_vlan_table(struct hnae3_handle *handle)
+void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev)
{
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_vport_vlan_cfg *vlan, *tmp;
- struct hclge_dev *hdev = vport->back;
+ struct hclge_vlan_info *vlan_info;
+ struct hclge_vport *vport;
u16 vlan_proto;
- u16 state, vlan_id;
- int i;
+ u16 vlan_id;
+ u16 state;
+ int vf_id;
+ int ret;
- for (i = 0; i < hdev->num_alloc_vport; i++) {
- vport = &hdev->vport[i];
- vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
- vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
+ /* PF should restore all vfs port base vlan */
+ for (vf_id = 0; vf_id < hdev->num_alloc_vfs; vf_id++) {
+ vport = &hdev->vport[vf_id + HCLGE_VF_VPORT_START_NUM];
+ vlan_info = vport->port_base_vlan_cfg.tbl_sta ?
+ &vport->port_base_vlan_cfg.vlan_info :
+ &vport->port_base_vlan_cfg.old_vlan_info;
+
+ vlan_id = vlan_info->vlan_tag;
+ vlan_proto = vlan_info->vlan_proto;
state = vport->port_base_vlan_cfg.state;
if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
- hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
- vport->vport_id, vlan_id,
- false);
- continue;
+ clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
+ ret = hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
+ vport->vport_id,
+ vlan_id, false);
+ vport->port_base_vlan_cfg.tbl_sta = ret == 0;
}
+ }
+}
- list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
- int ret;
+void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
+{
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
+ struct hclge_dev *hdev = vport->back;
+ int ret;
- if (!vlan->hd_tbl_status)
- continue;
+ mutex_lock(&hdev->vport_lock);
+
+ if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
vport->vport_id,
vlan->vlan_id, false);
if (ret)
break;
+ vlan->hd_tbl_status = true;
+ }
+ }
+
+ mutex_unlock(&hdev->vport_lock);
+}
+
+/* For global reset and imp reset, hardware will clear the mac table,
+ * so we change the mac address state from ACTIVE to TO_ADD, then they
+ * can be restored in the service task after reset complete. Furtherly,
+ * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
+ * be restored after reset, so just remove these mac nodes from mac_list.
+ */
+static void hclge_mac_node_convert_for_reset(struct list_head *list)
+{
+ struct hclge_mac_node *mac_node, *tmp;
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ if (mac_node->state == HCLGE_MAC_ACTIVE) {
+ mac_node->state = HCLGE_MAC_TO_ADD;
+ } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
+ list_del(&mac_node->node);
+ kfree(mac_node);
}
}
}
+void hclge_restore_mac_table_common(struct hclge_vport *vport)
+{
+ spin_lock_bh(&vport->mac_list_lock);
+
+ hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
+ hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
+
+ spin_unlock_bh(&vport->mac_list_lock);
+}
+
+static void hclge_restore_hw_table(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = &hdev->vport[0];
+ struct hnae3_handle *handle = &vport->nic;
+
+ hclge_restore_mac_table_common(vport);
+ hclge_restore_vport_port_base_vlan_config(hdev);
+ hclge_restore_vport_vlan_table(vport);
+ set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
+ hclge_restore_fd_entries(handle);
+}
+
int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -8344,10 +10253,14 @@ int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
vport->rxvlan_cfg.strip_tag1_en = false;
vport->rxvlan_cfg.strip_tag2_en = enable;
+ vport->rxvlan_cfg.strip_tag2_discard_en = false;
} else {
vport->rxvlan_cfg.strip_tag1_en = enable;
vport->rxvlan_cfg.strip_tag2_en = true;
+ vport->rxvlan_cfg.strip_tag2_discard_en = true;
}
+
+ vport->rxvlan_cfg.strip_tag1_discard_en = false;
vport->rxvlan_cfg.vlan1_vlan_prionly = false;
vport->rxvlan_cfg.vlan2_vlan_prionly = false;
vport->rxvlan_cfg.rx_vlan_offload_en = enable;
@@ -8355,6 +10268,14 @@ int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
return hclge_set_vlan_rx_offload_cfg(vport);
}
+static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport)
+{
+ struct hclge_dev *hdev = vport->back;
+
+ if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
+ set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state);
+}
+
static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
u16 port_base_vlan_state,
struct hclge_vlan_info *new_info,
@@ -8365,6 +10286,10 @@ static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
hclge_rm_vport_all_vlan_table(vport, false);
+ /* force clear VLAN 0 */
+ ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0);
+ if (ret)
+ return ret;
return hclge_set_vlan_filter_hw(hdev,
htons(new_info->vlan_proto),
vport->vport_id,
@@ -8372,6 +10297,13 @@ static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
false);
}
+ vport->port_base_vlan_cfg.tbl_sta = false;
+
+ /* force add VLAN 0 */
+ ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
+ if (ret)
+ return ret;
+
ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
vport->vport_id, old_info->vlan_tag,
true);
@@ -8381,91 +10313,122 @@ static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
return hclge_add_vport_all_vlan_table(vport);
}
+static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
+ const struct hclge_vlan_info *old_cfg)
+{
+ if (new_cfg->vlan_tag != old_cfg->vlan_tag)
+ return true;
+
+ if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0))
+ return true;
+
+ return false;
+}
+
+static int hclge_modify_port_base_vlan_tag(struct hclge_vport *vport,
+ struct hclge_vlan_info *new_info,
+ struct hclge_vlan_info *old_info)
+{
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ /* add new VLAN tag */
+ ret = hclge_set_vlan_filter_hw(hdev, htons(new_info->vlan_proto),
+ vport->vport_id, new_info->vlan_tag,
+ false);
+ if (ret)
+ return ret;
+
+ vport->port_base_vlan_cfg.tbl_sta = false;
+ /* remove old VLAN tag */
+ if (old_info->vlan_tag == 0)
+ ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
+ true, 0);
+ else
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+ vport->vport_id,
+ old_info->vlan_tag, true);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to clear vport%u port base vlan %u, ret = %d.\n",
+ vport->vport_id, old_info->vlan_tag, ret);
+
+ return ret;
+}
+
int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
struct hclge_vlan_info *vlan_info)
{
struct hnae3_handle *nic = &vport->nic;
struct hclge_vlan_info *old_vlan_info;
- struct hclge_dev *hdev = vport->back;
int ret;
old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
- ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
+ ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag,
+ vlan_info->qos);
if (ret)
return ret;
- if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
- /* add new VLAN tag */
- ret = hclge_set_vlan_filter_hw(hdev,
- htons(vlan_info->vlan_proto),
- vport->vport_id,
- vlan_info->vlan_tag,
- false);
- if (ret)
- return ret;
-
- /* remove old VLAN tag */
- ret = hclge_set_vlan_filter_hw(hdev,
- htons(old_vlan_info->vlan_proto),
- vport->vport_id,
- old_vlan_info->vlan_tag,
- true);
- if (ret)
- return ret;
-
- goto update;
- }
+ if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
+ goto out;
- ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
- old_vlan_info);
+ if (state == HNAE3_PORT_BASE_VLAN_MODIFY)
+ ret = hclge_modify_port_base_vlan_tag(vport, vlan_info,
+ old_vlan_info);
+ else
+ ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
+ old_vlan_info);
if (ret)
return ret;
- /* update state only when disable/enable port based VLAN */
+out:
vport->port_base_vlan_cfg.state = state;
if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
else
nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
-update:
- vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
- vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
- vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
+ vport->port_base_vlan_cfg.old_vlan_info = *old_vlan_info;
+ vport->port_base_vlan_cfg.vlan_info = *vlan_info;
+ vport->port_base_vlan_cfg.tbl_sta = true;
+ hclge_set_vport_vlan_fltr_change(vport);
return 0;
}
static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
enum hnae3_port_base_vlan_state state,
- u16 vlan)
+ u16 vlan, u8 qos)
{
if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
- if (!vlan)
+ if (!vlan && !qos)
return HNAE3_PORT_BASE_VLAN_NOCHANGE;
- else
- return HNAE3_PORT_BASE_VLAN_ENABLE;
- } else {
- if (!vlan)
- return HNAE3_PORT_BASE_VLAN_DISABLE;
- else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
- return HNAE3_PORT_BASE_VLAN_NOCHANGE;
- else
- return HNAE3_PORT_BASE_VLAN_MODIFY;
+
+ return HNAE3_PORT_BASE_VLAN_ENABLE;
}
+
+ if (!vlan && !qos)
+ return HNAE3_PORT_BASE_VLAN_DISABLE;
+
+ if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan &&
+ vport->port_base_vlan_cfg.vlan_info.qos == qos)
+ return HNAE3_PORT_BASE_VLAN_NOCHANGE;
+
+ return HNAE3_PORT_BASE_VLAN_MODIFY;
}
static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
u16 vlan, u8 qos, __be16 proto)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
struct hclge_vlan_info vlan_info;
u16 state;
int ret;
- if (hdev->pdev->revision == 0x20)
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
return -EOPNOTSUPP;
vport = hclge_get_vf_vport(hdev, vfid);
@@ -8480,7 +10443,7 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
state = hclge_get_port_base_vlan_state(vport,
vport->port_base_vlan_cfg.state,
- vlan);
+ vlan, qos);
if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
return 0;
@@ -8488,16 +10451,27 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
vlan_info.qos = qos;
vlan_info.vlan_proto = ntohs(proto);
- if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
- return hclge_update_port_base_vlan_cfg(vport, state,
- &vlan_info);
- } else {
- ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
- vport->vport_id, state,
- vlan, qos,
- ntohs(proto));
+ ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to update port base vlan for vf %d, ret = %d\n",
+ vfid, ret);
return ret;
}
+
+ /* there is a timewindow for PF to know VF unalive, it may
+ * cause send mailbox fail, but it doesn't matter, VF will
+ * query it when reinit.
+ * for DEVICE_VERSION_V3, vf doesn't need to know about the port based
+ * VLAN state.
+ */
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
+ test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
+ (void)hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
+ vport->vport_id,
+ state, &vlan_info);
+
+ return 0;
}
static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
@@ -8530,11 +10504,12 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
bool writen_to_tbl = false;
int ret = 0;
- /* When device is resetting, firmware is unable to handle
- * mailbox. Just record the vlan id, and remove it after
+ /* When device is resetting or reset failed, firmware is unable to
+ * handle mailbox. Just record the vlan id, and remove it after
* reset finished.
*/
- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
+ if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
+ test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
set_bit(vlan_id, vport->vlan_del_fail_bmap);
return -EBUSY;
}
@@ -8552,11 +10527,11 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
}
if (!ret) {
- if (is_kill)
- hclge_rm_vport_vlan_table(vport, vlan_id, false);
- else
+ if (!is_kill)
hclge_add_vport_vlan_table(vport, vlan_id,
writen_to_tbl);
+ else if (is_kill && vlan_id != 0)
+ hclge_rm_vport_vlan_table(vport, vlan_id, false);
} else if (is_kill) {
/* when remove hw vlan filter failed, record the vlan id,
* and try to remove it from hw later, to be consistence
@@ -8564,9 +10539,37 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
*/
set_bit(vlan_id, vport->vlan_del_fail_bmap);
}
+
+ hclge_set_vport_vlan_fltr_change(vport);
+
return ret;
}
+static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport;
+ int ret;
+ u16 i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
+ if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
+ &vport->state))
+ continue;
+
+ ret = hclge_enable_vport_vlan_filter(vport,
+ vport->req_vlan_fltr_en);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to sync vlan filter state for vport%u, ret = %d\n",
+ vport->vport_id, ret);
+ set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
+ &vport->state);
+ return;
+ }
+ }
+}
+
static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
{
#define HCLGE_MAX_SYNC_COUNT 60
@@ -8589,6 +10592,7 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
clear_bit(vlan_id, vport->vlan_del_fail_bmap);
hclge_rm_vport_vlan_table(vport, vlan_id, false);
+ hclge_set_vport_vlan_fltr_change(vport);
sync_cnt++;
if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
@@ -8598,6 +10602,8 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
VLAN_N_VID);
}
}
+
+ hclge_sync_vlan_fltr_state(hdev);
}
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
@@ -8629,7 +10635,7 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
/* HW supprt 2 layer vlan */
max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
- max_frm_size > HCLGE_MAC_MAX_FRAME)
+ max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
return -EINVAL;
max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
@@ -8647,6 +10653,9 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
/* PF's mps must be greater then VF's mps */
for (i = 1; i < hdev->num_alloc_vport; i++)
if (max_frm_size < hdev->vport[i].mps) {
+ dev_err(&hdev->pdev->dev,
+ "failed to set pf mtu for less than vport %d, mps = %u.\n",
+ i, hdev->vport[i].mps);
mutex_unlock(&hdev->vport_lock);
return -EINVAL;
}
@@ -8674,7 +10683,7 @@ out:
return ret;
}
-static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
+static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
bool enable)
{
struct hclge_reset_tqp_queue_cmd *req;
@@ -8684,7 +10693,7 @@ static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
- req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
+ req->tqp_id = cpu_to_le16(queue_id);
if (enable)
hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
@@ -8698,7 +10707,8 @@ static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
return 0;
}
-static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
+static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id,
+ u8 *reset_status)
{
struct hclge_reset_tqp_queue_cmd *req;
struct hclge_desc desc;
@@ -8707,7 +10717,7 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
- req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
+ req->tqp_id = cpu_to_le16(queue_id);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
@@ -8716,101 +10726,134 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
return ret;
}
- return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
+ *reset_status = hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
+
+ return 0;
}
u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
{
+ struct hclge_comm_tqp *tqp;
struct hnae3_queue *queue;
- struct hclge_tqp *tqp;
queue = handle->kinfo.tqp[queue_id];
- tqp = container_of(queue, struct hclge_tqp, q);
+ tqp = container_of(queue, struct hclge_comm_tqp, q);
return tqp->index;
}
-int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
+static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- int reset_try_times = 0;
- int reset_status;
+ u16 reset_try_times = 0;
+ u8 reset_status;
u16 queue_gid;
int ret;
+ u16 i;
- queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
+ for (i = 0; i < handle->kinfo.num_tqps; i++) {
+ queue_gid = hclge_covert_handle_qid_global(handle, i);
+ ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to send reset tqp cmd, ret = %d\n",
+ ret);
+ return ret;
+ }
- ret = hclge_tqp_enable(hdev, queue_id, 0, false);
- if (ret) {
- dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
- return ret;
- }
+ while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
+ ret = hclge_get_reset_status(hdev, queue_gid,
+ &reset_status);
+ if (ret)
+ return ret;
- ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Send reset tqp cmd fail, ret = %d\n", ret);
- return ret;
- }
+ if (reset_status)
+ break;
- while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
- reset_status = hclge_get_reset_status(hdev, queue_gid);
- if (reset_status)
- break;
+ /* Wait for tqp hw reset */
+ usleep_range(1000, 1200);
+ }
- /* Wait for tqp hw reset */
- usleep_range(1000, 1200);
- }
+ if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
+ dev_err(&hdev->pdev->dev,
+ "wait for tqp hw reset timeout\n");
+ return -ETIME;
+ }
- if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
- dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
- return ret;
+ ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to deassert soft reset, ret = %d\n",
+ ret);
+ return ret;
+ }
+ reset_try_times = 0;
}
-
- ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "Deassert the soft reset fail, ret = %d\n", ret);
-
- return ret;
+ return 0;
}
-void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
+static int hclge_reset_rcb(struct hnae3_handle *handle)
{
+#define HCLGE_RESET_RCB_NOT_SUPPORT 0U
+#define HCLGE_RESET_RCB_SUCCESS 1U
+
+ struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- int reset_try_times = 0;
- int reset_status;
+ struct hclge_reset_cmd *req;
+ struct hclge_desc desc;
+ u8 return_status;
u16 queue_gid;
int ret;
- queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
+ queue_gid = hclge_covert_handle_qid_global(handle, 0);
- ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
+ req = (struct hclge_reset_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
+ hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
+ req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
+ req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
- dev_warn(&hdev->pdev->dev,
- "Send reset tqp cmd fail, ret = %d\n", ret);
- return;
+ dev_err(&hdev->pdev->dev,
+ "failed to send rcb reset cmd, ret = %d\n", ret);
+ return ret;
}
- while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
- reset_status = hclge_get_reset_status(hdev, queue_gid);
- if (reset_status)
- break;
+ return_status = req->fun_reset_rcb_return_status;
+ if (return_status == HCLGE_RESET_RCB_SUCCESS)
+ return 0;
- /* Wait for tqp hw reset */
- usleep_range(1000, 1200);
+ if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
+ dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
+ return_status);
+ return -EIO;
}
- if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
- dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
- return;
+ /* if reset rcb cmd is unsupported, we need to send reset tqp cmd
+ * again to reset all tqps
+ */
+ return hclge_reset_tqp_cmd(handle);
+}
+
+int hclge_reset_tqp(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ /* only need to disable PF's tqp */
+ if (!vport->vport_id) {
+ ret = hclge_tqp_enable(handle, false);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to disable tqp, ret = %d\n", ret);
+ return ret;
+ }
}
- ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
- if (ret)
- dev_warn(&hdev->pdev->dev,
- "Deassert the soft reset fail, ret = %d\n", ret);
+ return hclge_reset_rcb(handle);
}
static u32 hclge_get_fw_version(struct hnae3_handle *handle)
@@ -8883,9 +10926,10 @@ static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- struct phy_device *phydev = hdev->hw.mac.phydev;
+ u8 media_type = hdev->hw.mac.media_type;
- *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
+ *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
+ hclge_get_autoneg(handle) : 0;
if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
*rx_en = 0;
@@ -8931,7 +10975,7 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
struct phy_device *phydev = hdev->hw.mac.phydev;
u32 fc_autoneg;
- if (phydev) {
+ if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
fc_autoneg = hclge_get_autoneg(handle);
if (auto_neg != fc_autoneg) {
dev_info(&hdev->pdev->dev,
@@ -8950,7 +10994,7 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
hclge_record_user_pauseparam(hdev, rx_en, tx_en);
- if (!auto_neg)
+ if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
if (phydev)
@@ -8960,7 +11004,7 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
}
static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
- u8 *auto_neg, u32 *speed, u8 *duplex)
+ u8 *auto_neg, u32 *speed, u8 *duplex, u32 *lane_num)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -8971,6 +11015,8 @@ static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
*duplex = hdev->hw.mac.duplex;
if (auto_neg)
*auto_neg = hdev->hw.mac.autoneg;
+ if (lane_num)
+ *lane_num = hdev->hw.mac.lane_num;
}
static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
@@ -8979,6 +11025,12 @@ static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
+ /* When nic is down, the service task is not running, doesn't update
+ * the port information per second. Query the port information before
+ * return the media type, ensure getting the correct media information.
+ */
+ hclge_update_port_info(hdev);
+
if (media_type)
*media_type = hdev->hw.mac.media_type;
@@ -9046,7 +11098,6 @@ static void hclge_info_show(struct hclge_dev *hdev)
dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
- dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
@@ -9058,6 +11109,8 @@ static void hclge_info_show(struct hclge_dev *hdev)
hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
dev_info(dev, "MQPRIO %s\n",
hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
+ dev_info(dev, "Default tx spare buffer size: %u\n",
+ hdev->tx_spare_buf_size);
dev_info(dev, "PF info end.\n");
}
@@ -9109,8 +11162,8 @@ init_nic_err:
static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
struct hclge_vport *vport)
{
- struct hnae3_client *client = vport->roce.client;
struct hclge_dev *hdev = ae_dev->priv;
+ struct hnae3_client *client;
int rst_cnt;
int ret;
@@ -9161,39 +11214,35 @@ static int hclge_init_client_instance(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev)
{
struct hclge_dev *hdev = ae_dev->priv;
- struct hclge_vport *vport;
- int i, ret;
-
- for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
- vport = &hdev->vport[i];
+ struct hclge_vport *vport = &hdev->vport[0];
+ int ret;
- switch (client->type) {
- case HNAE3_CLIENT_KNIC:
- hdev->nic_client = client;
- vport->nic.client = client;
- ret = hclge_init_nic_client_instance(ae_dev, vport);
- if (ret)
- goto clear_nic;
+ switch (client->type) {
+ case HNAE3_CLIENT_KNIC:
+ hdev->nic_client = client;
+ vport->nic.client = client;
+ ret = hclge_init_nic_client_instance(ae_dev, vport);
+ if (ret)
+ goto clear_nic;
- ret = hclge_init_roce_client_instance(ae_dev, vport);
- if (ret)
- goto clear_roce;
+ ret = hclge_init_roce_client_instance(ae_dev, vport);
+ if (ret)
+ goto clear_roce;
- break;
- case HNAE3_CLIENT_ROCE:
- if (hnae3_dev_roce_supported(hdev)) {
- hdev->roce_client = client;
- vport->roce.client = client;
- }
+ break;
+ case HNAE3_CLIENT_ROCE:
+ if (hnae3_dev_roce_supported(hdev)) {
+ hdev->roce_client = client;
+ vport->roce.client = client;
+ }
- ret = hclge_init_roce_client_instance(ae_dev, vport);
- if (ret)
- goto clear_roce;
+ ret = hclge_init_roce_client_instance(ae_dev, vport);
+ if (ret)
+ goto clear_roce;
- break;
- default:
- return -EINVAL;
- }
+ break;
+ default:
+ return -EINVAL;
}
return 0;
@@ -9212,35 +11261,51 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev)
{
struct hclge_dev *hdev = ae_dev->priv;
- struct hclge_vport *vport;
- int i;
+ struct hclge_vport *vport = &hdev->vport[0];
- for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
- vport = &hdev->vport[i];
- if (hdev->roce_client) {
- clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
- while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
- msleep(HCLGE_WAIT_RESET_DONE);
-
- hdev->roce_client->ops->uninit_instance(&vport->roce,
- 0);
- hdev->roce_client = NULL;
- vport->roce.client = NULL;
- }
- if (client->type == HNAE3_CLIENT_ROCE)
- return;
- if (hdev->nic_client && client->ops->uninit_instance) {
- clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
- while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
- msleep(HCLGE_WAIT_RESET_DONE);
-
- client->ops->uninit_instance(&vport->nic, 0);
- hdev->nic_client = NULL;
- vport->nic.client = NULL;
- }
+ if (hdev->roce_client) {
+ clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
+ while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ msleep(HCLGE_WAIT_RESET_DONE);
+
+ hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
+ hdev->roce_client = NULL;
+ vport->roce.client = NULL;
+ }
+ if (client->type == HNAE3_CLIENT_ROCE)
+ return;
+ if (hdev->nic_client && client->ops->uninit_instance) {
+ clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
+ while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ msleep(HCLGE_WAIT_RESET_DONE);
+
+ client->ops->uninit_instance(&vport->nic, 0);
+ hdev->nic_client = NULL;
+ vport->nic.client = NULL;
}
}
+static int hclge_dev_mem_map(struct hclge_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ struct hclge_hw *hw = &hdev->hw;
+
+ /* for device does not have device memory, return directly */
+ if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
+ return 0;
+
+ hw->hw.mem_base =
+ devm_ioremap_wc(&pdev->dev,
+ pci_resource_start(pdev, HCLGE_MEM_BAR),
+ pci_resource_len(pdev, HCLGE_MEM_BAR));
+ if (!hw->hw.mem_base) {
+ dev_err(&pdev->dev, "failed to map device memory\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
static int hclge_pci_init(struct hclge_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
@@ -9272,16 +11337,23 @@ static int hclge_pci_init(struct hclge_dev *hdev)
pci_set_master(pdev);
hw = &hdev->hw;
- hw->io_base = pcim_iomap(pdev, 2, 0);
- if (!hw->io_base) {
+ hw->hw.io_base = pcim_iomap(pdev, 2, 0);
+ if (!hw->hw.io_base) {
dev_err(&pdev->dev, "Can't map configuration register space\n");
ret = -ENOMEM;
goto err_clr_master;
}
+ ret = hclge_dev_mem_map(hdev);
+ if (ret)
+ goto err_unmap_io_base;
+
hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
return 0;
+
+err_unmap_io_base:
+ pcim_iounmap(pdev, hdev->hw.hw.io_base);
err_clr_master:
pci_clear_master(pdev);
pci_release_regions(pdev);
@@ -9295,7 +11367,10 @@ static void hclge_pci_uninit(struct hclge_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
- pcim_iounmap(pdev, hdev->hw.io_base);
+ if (hdev->hw.hw.mem_base)
+ devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base);
+
+ pcim_iounmap(pdev, hdev->hw.hw.io_base);
pci_free_irq_vectors(pdev);
pci_clear_master(pdev);
pci_release_mem_regions(pdev);
@@ -9324,42 +11399,41 @@ static void hclge_state_uninit(struct hclge_dev *hdev)
cancel_delayed_work_sync(&hdev->service_task);
}
-static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
+static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
+ enum hnae3_reset_type rst_type)
{
-#define HCLGE_FLR_RETRY_WAIT_MS 500
-#define HCLGE_FLR_RETRY_CNT 5
+#define HCLGE_RESET_RETRY_WAIT_MS 500
+#define HCLGE_RESET_RETRY_CNT 5
struct hclge_dev *hdev = ae_dev->priv;
int retry_cnt = 0;
int ret;
-retry:
- down(&hdev->reset_sem);
- set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
- hdev->reset_type = HNAE3_FLR_RESET;
- ret = hclge_reset_prepare(hdev);
- if (ret) {
- dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
- ret);
- if (hdev->reset_pending ||
- retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
- dev_err(&hdev->pdev->dev,
- "reset_pending:0x%lx, retry_cnt:%d\n",
- hdev->reset_pending, retry_cnt);
- clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
- up(&hdev->reset_sem);
- msleep(HCLGE_FLR_RETRY_WAIT_MS);
- goto retry;
- }
+ while (retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
+ down(&hdev->reset_sem);
+ set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ hdev->reset_type = rst_type;
+ ret = hclge_reset_prepare(hdev);
+ if (!ret && !hdev->reset_pending)
+ break;
+
+ dev_err(&hdev->pdev->dev,
+ "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n",
+ ret, hdev->reset_pending, retry_cnt);
+ clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
+ msleep(HCLGE_RESET_RETRY_WAIT_MS);
}
- /* disable misc vector before FLR done */
+ /* disable misc vector before reset done */
hclge_enable_vector(&hdev->misc_vector, false);
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
- hdev->rst_stats.flr_rst_cnt++;
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
+
+ if (hdev->reset_type == HNAE3_FLR_RESET)
+ hdev->rst_stats.flr_rst_cnt++;
}
-static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
+static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
{
struct hclge_dev *hdev = ae_dev->priv;
int ret;
@@ -9383,15 +11457,49 @@ static void hclge_clear_resetting_state(struct hclge_dev *hdev)
struct hclge_vport *vport = &hdev->vport[i];
int ret;
- /* Send cmd to clear VF's FUNC_RST_ING */
+ /* Send cmd to clear vport's FUNC_RST_ING */
ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
if (ret)
dev_warn(&hdev->pdev->dev,
- "clear vf(%u) rst failed %d!\n",
+ "clear vport(%u) rst failed %d!\n",
vport->vport_id, ret);
}
}
+static int hclge_clear_hw_resource(struct hclge_dev *hdev)
+{
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ /* This new command is only supported by new firmware, it will
+ * fail with older firmware. Error value -EOPNOSUPP can only be
+ * returned by older firmware running this command, to keep code
+ * backward compatible we will override this value and return
+ * success.
+ */
+ if (ret && ret != -EOPNOTSUPP) {
+ dev_err(&hdev->pdev->dev,
+ "failed to clear hw resource, ret = %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
+{
+ if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
+ hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1);
+}
+
+static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev)
+{
+ if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
+ hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0);
+}
+
static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
{
struct pci_dev *pdev = ae_dev->pdev;
@@ -9399,10 +11507,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
int ret;
hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
- if (!hdev) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!hdev)
+ return -ENOMEM;
hdev->pdev = pdev;
hdev->ae_dev = ae_dev;
@@ -9421,13 +11527,22 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
if (ret)
goto out;
- /* Firmware command queue initialize */
- ret = hclge_cmd_queue_init(hdev);
+ ret = hclge_devlink_init(hdev);
if (ret)
goto err_pci_uninit;
+ /* Firmware command queue initialize */
+ ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
+ if (ret)
+ goto err_devlink_uninit;
+
/* Firmware command initialize */
- ret = hclge_cmd_init(hdev);
+ ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version,
+ true, hdev->reset_pending);
+ if (ret)
+ goto err_cmd_uninit;
+
+ ret = hclge_clear_hw_resource(hdev);
if (ret)
goto err_cmd_uninit;
@@ -9435,6 +11550,13 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
if (ret)
goto err_cmd_uninit;
+ ret = hclge_query_dev_specs(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
+ ret);
+ goto err_cmd_uninit;
+ }
+
ret = hclge_configure(hdev);
if (ret) {
dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
@@ -9465,7 +11587,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
if (ret)
goto err_msi_irq_uninit;
- if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
+ if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
+ !hnae3_dev_phy_imp_supported(hdev)) {
ret = hclge_mac_mdio_config(hdev);
if (ret)
goto err_msi_irq_uninit;
@@ -9487,7 +11610,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_mdiobus_unreg;
}
- ret = hclge_config_gro(hdev, true);
+ ret = hclge_config_gro(hdev);
if (ret)
goto err_mdiobus_unreg;
@@ -9503,7 +11626,13 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_mdiobus_unreg;
}
- hclge_rss_init_cfg(hdev);
+ ret = hclge_comm_rss_init_cfg(&hdev->vport->nic, hdev->ae_dev,
+ &hdev->rss_cfg);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
+ goto err_mdiobus_unreg;
+ }
+
ret = hclge_rss_init_hw(hdev);
if (ret) {
dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
@@ -9523,6 +11652,14 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_mdiobus_unreg;
}
+ ret = hclge_ptp_init(hdev);
+ if (ret)
+ goto err_mdiobus_unreg;
+
+ ret = hclge_update_port_info(hdev);
+ if (ret)
+ goto err_mdiobus_unreg;
+
INIT_KFIFO(hdev->mac_tnl_log);
hclge_dcb_ops_set(hdev);
@@ -9530,16 +11667,14 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
- /* Setup affinity after service timer setup because add_timer_on
- * is called in affinity notify.
- */
- hclge_misc_affinity_setup(hdev);
-
hclge_clear_all_event_cause(hdev);
hclge_clear_resetting_state(hdev);
/* Log and clear the hw errors those already occurred */
- hclge_handle_all_hns_hw_errors(ae_dev);
+ if (hnae3_dev_ras_imp_supported(hdev))
+ hclge_handle_occurred_error(hdev);
+ else
+ hclge_handle_all_hns_hw_errors(ae_dev);
/* request delayed reset for the error recovery because an immediate
* global reset on a PF affecting pending initialization of other PFs
@@ -9553,6 +11688,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
}
+ hclge_init_rxd_adv_layout(hdev);
+
/* Enable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, true);
@@ -9574,19 +11711,23 @@ err_msi_irq_uninit:
err_msi_uninit:
pci_free_irq_vectors(pdev);
err_cmd_uninit:
- hclge_cmd_uninit(hdev);
+ hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
+err_devlink_uninit:
+ hclge_devlink_uninit(hdev);
err_pci_uninit:
- pcim_iounmap(pdev, hdev->hw.io_base);
+ pcim_iounmap(pdev, hdev->hw.hw.io_base);
pci_clear_master(pdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
out:
+ mutex_destroy(&hdev->vport_lock);
return ret;
}
static void hclge_stats_clear(struct hclge_dev *hdev)
{
memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
+ memset(&hdev->fec_stats, 0, sizeof(hdev->fec_stats));
}
static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
@@ -9631,7 +11772,7 @@ static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
u32 new_spoofchk = enable ? 1 : 0;
int ret;
- if (hdev->pdev->revision == 0x20)
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
return -EOPNOTSUPP;
vport = hclge_get_vf_vport(hdev, vf);
@@ -9645,7 +11786,7 @@ static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
dev_warn(&hdev->pdev->dev,
"vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
vf);
- else if (enable && hclge_is_umv_space_full(vport))
+ else if (enable && hclge_is_umv_space_full(vport, true))
dev_warn(&hdev->pdev->dev,
"vf %d mac table is full, enable spoof check may cause its packet send fail\n",
vf);
@@ -9664,7 +11805,7 @@ static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
int ret;
int i;
- if (hdev->pdev->revision == 0x20)
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
return 0;
/* resume the vf spoof check state after reset */
@@ -9685,8 +11826,6 @@ static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
u32 new_trusted = enable ? 1 : 0;
- bool en_bc_pmc;
- int ret;
vport = hclge_get_vf_vport(hdev, vf);
if (!vport)
@@ -9695,18 +11834,9 @@ static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
if (vport->vf_info.trusted == new_trusted)
return 0;
- /* Disable promisc mode for VF if it is not trusted any more. */
- if (!enable && vport->vf_info.promisc_enable) {
- en_bc_pmc = hdev->pdev->revision != 0x20;
- ret = hclge_set_vport_promisc_mode(vport, false, false,
- en_bc_pmc);
- if (ret)
- return ret;
- vport->vf_info.promisc_enable = 0;
- hclge_inform_vf_promisc_info(vport);
- }
-
vport->vf_info.trusted = new_trusted;
+ set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
+ hclge_task_schedule(hdev, 0);
return 0;
}
@@ -9729,7 +11859,7 @@ static void hclge_reset_vf_rate(struct hclge_dev *hdev)
}
}
-static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
+static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
int min_tx_rate, int max_tx_rate)
{
if (min_tx_rate != 0 ||
@@ -9750,7 +11880,7 @@ static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
struct hclge_dev *hdev = vport->back;
int ret;
- ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
+ ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
if (ret)
return ret;
@@ -9822,10 +11952,19 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
set_bit(HCLGE_STATE_DOWN, &hdev->state);
hclge_stats_clear(hdev);
- memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
- memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
+ /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
+ * so here should not clean table in memory.
+ */
+ if (hdev->reset_type == HNAE3_IMP_RESET ||
+ hdev->reset_type == HNAE3_GLOBAL_RESET) {
+ memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
+ memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
+ bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
+ hclge_reset_umv_space(hdev);
+ }
- ret = hclge_cmd_init(hdev);
+ ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version,
+ true, hdev->reset_pending);
if (ret) {
dev_err(&pdev->dev, "Cmd queue init failed\n");
return ret;
@@ -9837,21 +11976,26 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
- hclge_reset_umv_space(hdev);
-
ret = hclge_mac_init(hdev);
if (ret) {
dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
return ret;
}
+ ret = hclge_tp_port_init(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
+ ret);
+ return ret;
+ }
+
ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
if (ret) {
dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
return ret;
}
- ret = hclge_config_gro(hdev, true);
+ ret = hclge_config_gro(hdev);
if (ret)
return ret;
@@ -9886,8 +12030,15 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
+ ret = hclge_ptp_init(hdev);
+ if (ret)
+ return ret;
+
/* Log and clear the hw errors those already occurred */
- hclge_handle_all_hns_hw_errors(ae_dev);
+ if (hnae3_dev_ras_imp_supported(hdev))
+ hclge_handle_occurred_error(hdev);
+ else
+ hclge_handle_all_hns_hw_errors(ae_dev);
/* Re-enable the hw error interrupts because
* the interrupts get disabled on global reset.
@@ -9919,6 +12070,8 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
if (ret)
return ret;
+ hclge_init_rxd_adv_layout(hdev);
+
dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
HCLGE_DRIVER_NAME);
@@ -9932,14 +12085,15 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_reset_vf_rate(hdev);
hclge_clear_vf_vlan(hdev);
- hclge_misc_affinity_teardown(hdev);
hclge_state_uninit(hdev);
+ hclge_ptp_uninit(hdev);
+ hclge_uninit_rxd_adv_layout(hdev);
+ hclge_uninit_mac_table(hdev);
+ hclge_del_all_fd_entries(hdev);
if (mac->phydev)
mdiobus_unregister(mac->mdio_bus);
- hclge_uninit_umv_space(hdev);
-
/* Disable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, false);
synchronize_irq(hdev->misc_vector.vector_irq);
@@ -9949,23 +12103,21 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_config_nic_hw_error(hdev, false);
hclge_config_rocee_ras_interrupt(hdev, false);
- hclge_cmd_uninit(hdev);
+ hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
hclge_misc_irq_uninit(hdev);
+ hclge_devlink_uninit(hdev);
hclge_pci_uninit(hdev);
- mutex_destroy(&hdev->vport_lock);
- hclge_uninit_vport_mac_table(hdev);
hclge_uninit_vport_vlan_table(hdev);
+ mutex_destroy(&hdev->vport_lock);
ae_dev->priv = NULL;
}
static u32 hclge_get_max_channels(struct hnae3_handle *handle)
{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- return min_t(u32, hdev->rss_size_max,
- vport->alloc_tqps / kinfo->num_tc);
+ return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
}
static void hclge_get_channels(struct hnae3_handle *handle,
@@ -9984,34 +12136,20 @@ static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
struct hclge_dev *hdev = vport->back;
*alloc_tqps = vport->alloc_tqps;
- *max_rss_size = hdev->rss_size_max;
+ *max_rss_size = hdev->pf_rss_size_max;
}
-static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
- bool rxfh_configured)
+static int hclge_set_rss_tc_mode_cfg(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
- struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
struct hclge_dev *hdev = vport->back;
u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
- u16 cur_rss_size = kinfo->rss_size;
- u16 cur_tqps = kinfo->num_tqps;
u16 tc_valid[HCLGE_MAX_TC_NUM];
u16 roundup_size;
- u32 *rss_indir;
unsigned int i;
- int ret;
- kinfo->req_rss_size = new_tqps_num;
-
- ret = hclge_tm_vport_map_update(hdev);
- if (ret) {
- dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
- return ret;
- }
-
- roundup_size = roundup_pow_of_two(kinfo->rss_size);
+ roundup_size = roundup_pow_of_two(vport->nic.kinfo.rss_size);
roundup_size = ilog2(roundup_size);
/* Set the RSS TC mode according to the new RSS size */
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
@@ -10022,22 +12160,49 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
tc_valid[i] = 1;
tc_size[i] = roundup_size;
- tc_offset[i] = kinfo->rss_size * i;
+ tc_offset[i] = vport->nic.kinfo.rss_size * i;
+ }
+
+ return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid,
+ tc_size);
+}
+
+static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
+ bool rxfh_configured)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hclge_dev *hdev = vport->back;
+ u16 cur_rss_size = kinfo->rss_size;
+ u16 cur_tqps = kinfo->num_tqps;
+ u32 *rss_indir;
+ unsigned int i;
+ int ret;
+
+ kinfo->req_rss_size = new_tqps_num;
+
+ ret = hclge_tm_vport_map_update(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
+ return ret;
}
- ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
+
+ ret = hclge_set_rss_tc_mode_cfg(handle);
if (ret)
return ret;
- /* RSS indirection table has been configuared by user */
+ /* RSS indirection table has been configured by user */
if (rxfh_configured)
goto out;
/* Reinitializes the rss indirect table according to the new RSS size */
- rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
+ rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
+ GFP_KERNEL);
if (!rss_indir)
return -ENOMEM;
- for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
+ for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
rss_indir[i] = i % kinfo->rss_size;
ret = hclge_set_rss(handle, rss_indir, NULL, 0);
@@ -10052,7 +12217,7 @@ out:
dev_info(&hdev->pdev->dev,
"Channels changed, rss_size from %u to %u, tqps from %u to %u",
cur_rss_size, kinfo->rss_size,
- cur_tqps, kinfo->rss_size * kinfo->num_tc);
+ cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
return ret;
}
@@ -10196,20 +12361,22 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
#define REG_SEPARATOR_LINE 1
#define REG_NUM_REMAIN_MASK 3
-#define BD_LIST_MAX_NUM 30
int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
{
- /*prepare 4 commands to query DFX BD number*/
- hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
- desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
- desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
+ int i;
- return hclge_cmd_send(&hdev->hw, desc, 4);
+ /* initialize command BD except the last one */
+ for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
+ true);
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ }
+
+ /* initialize the last command BD */
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
+
+ return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
}
static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
@@ -10247,7 +12414,7 @@ static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
hclge_cmd_setup_basic_desc(desc, cmd, true);
for (i = 0; i < bd_num - 1; i++) {
- desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
desc++;
hclge_cmd_setup_basic_desc(desc, cmd, true);
}
@@ -10286,15 +12453,20 @@ static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
{
u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
- int data_len_per_desc, data_len, bd_num, i;
- int bd_num_list[BD_LIST_MAX_NUM];
+ int data_len_per_desc, bd_num, i;
+ int *bd_num_list;
+ u32 data_len;
int ret;
+ bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
+ if (!bd_num_list)
+ return -ENOMEM;
+
ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
if (ret) {
dev_err(&hdev->pdev->dev,
"Get dfx reg bd num fail, status is %d.\n", ret);
- return ret;
+ goto out;
}
data_len_per_desc = sizeof_field(struct hclge_desc, data);
@@ -10305,6 +12477,8 @@ static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
*len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
}
+out:
+ kfree(bd_num_list);
return ret;
}
@@ -10312,16 +12486,20 @@ static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
{
u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
int bd_num, bd_num_max, buf_len, i;
- int bd_num_list[BD_LIST_MAX_NUM];
struct hclge_desc *desc_src;
+ int *bd_num_list;
u32 *reg = data;
int ret;
+ bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
+ if (!bd_num_list)
+ return -ENOMEM;
+
ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
if (ret) {
dev_err(&hdev->pdev->dev,
"Get dfx reg bd num fail, status is %d.\n", ret);
- return ret;
+ goto out;
}
bd_num_max = bd_num_list[0];
@@ -10330,8 +12508,10 @@ static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
buf_len = sizeof(*desc_src) * bd_num_max;
desc_src = kzalloc(buf_len, GFP_KERNEL);
- if (!desc_src)
- return -ENOMEM;
+ if (!desc_src) {
+ ret = -ENOMEM;
+ goto out;
+ }
for (i = 0; i < dfx_reg_type_num; i++) {
bd_num = bd_num_list[i];
@@ -10347,6 +12527,8 @@ static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
}
kfree(desc_src);
+out:
+ kfree(bd_num_list);
return ret;
}
@@ -10558,15 +12740,269 @@ static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
+ bool gro_en_old = hdev->gro_en;
+ int ret;
+
+ hdev->gro_en = enable;
+ ret = hclge_config_gro(hdev);
+ if (ret)
+ hdev->gro_en = gro_en_old;
+
+ return ret;
+}
+
+static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = &hdev->vport[0];
+ struct hnae3_handle *handle = &vport->nic;
+ u8 tmp_flags;
+ int ret;
+ u16 i;
+
+ if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
+ set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
+ vport->last_promisc_flags = vport->overflow_promisc_flags;
+ }
+
+ if (test_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state)) {
+ tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
+ ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
+ tmp_flags & HNAE3_MPE);
+ if (!ret) {
+ clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
+ &vport->state);
+ set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
+ &vport->state);
+ }
+ }
+
+ for (i = 1; i < hdev->num_alloc_vport; i++) {
+ bool uc_en = false;
+ bool mc_en = false;
+ bool bc_en;
+
+ vport = &hdev->vport[i];
+
+ if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
+ &vport->state))
+ continue;
+
+ if (vport->vf_info.trusted) {
+ uc_en = vport->vf_info.request_uc_en > 0 ||
+ vport->overflow_promisc_flags &
+ HNAE3_OVERFLOW_UPE;
+ mc_en = vport->vf_info.request_mc_en > 0 ||
+ vport->overflow_promisc_flags &
+ HNAE3_OVERFLOW_MPE;
+ }
+ bc_en = vport->vf_info.request_bc_en > 0;
+
+ ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
+ mc_en, bc_en);
+ if (ret) {
+ set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
+ &vport->state);
+ return;
+ }
+ hclge_set_vport_vlan_fltr_change(vport);
+ }
+}
+
+static bool hclge_module_existed(struct hclge_dev *hdev)
+{
+ struct hclge_desc desc;
+ u32 existed;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get SFP exist state, ret = %d\n", ret);
+ return false;
+ }
+
+ existed = le32_to_cpu(desc.data[0]);
+
+ return existed != 0;
+}
+
+/* need 6 bds(total 140 bytes) in one reading
+ * return the number of bytes actually read, 0 means read failed.
+ */
+static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
+ u32 len, u8 *data)
+{
+ struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
+ struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
+ u16 read_len;
+ u16 copy_len;
+ int ret;
+ int i;
+
+ /* setup all 6 bds to read module eeprom info. */
+ for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
+ true);
+
+ /* bd0~bd4 need next flag */
+ if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ }
+
+ /* setup bd0, this bd contains offset and read length. */
+ sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
+ sfp_info_bd0->offset = cpu_to_le16((u16)offset);
+ read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
+ sfp_info_bd0->read_len = cpu_to_le16(read_len);
+
+ ret = hclge_cmd_send(&hdev->hw, desc, i);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get SFP eeprom info, ret = %d\n", ret);
+ return 0;
+ }
+
+ /* copy sfp info from bd0 to out buffer. */
+ copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
+ memcpy(data, sfp_info_bd0->data, copy_len);
+ read_len = copy_len;
+
+ /* copy sfp info from bd1~bd5 to out buffer if needed. */
+ for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
+ if (read_len >= len)
+ return read_len;
+
+ copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
+ memcpy(data + read_len, desc[i].data, copy_len);
+ read_len += copy_len;
+ }
+
+ return read_len;
+}
+
+static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
+ u32 len, u8 *data)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 read_len = 0;
+ u16 data_len;
+
+ if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
+ return -EOPNOTSUPP;
+
+ if (!hclge_module_existed(hdev))
+ return -ENXIO;
+
+ while (read_len < len) {
+ data_len = hclge_get_sfp_eeprom_info(hdev,
+ offset + read_len,
+ len - read_len,
+ data + read_len);
+ if (!data_len)
+ return -EIO;
+
+ read_len += data_len;
+ }
+
+ return 0;
+}
+
+static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle,
+ u32 *status_code)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_desc desc;
+ int ret;
+
+ if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2)
+ return -EOPNOTSUPP;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_DIAGNOSIS, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to query link diagnosis info, ret = %d\n", ret);
+ return ret;
+ }
+
+ *status_code = le32_to_cpu(desc.data[0]);
+ return 0;
+}
+
+/* After disable sriov, VF still has some config and info need clean,
+ * which configed by PF.
+ */
+static void hclge_clear_vport_vf_info(struct hclge_vport *vport, int vfid)
+{
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_vlan_info vlan_info;
+ int ret;
+
+ /* after disable sriov, clean VF rate configured by PF */
+ ret = hclge_tm_qs_shaper_cfg(vport, 0);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to clean vf%d rate config, ret = %d\n",
+ vfid, ret);
+
+ vlan_info.vlan_tag = 0;
+ vlan_info.qos = 0;
+ vlan_info.vlan_proto = ETH_P_8021Q;
+ ret = hclge_update_port_base_vlan_cfg(vport,
+ HNAE3_PORT_BASE_VLAN_DISABLE,
+ &vlan_info);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to clean vf%d port base vlan, ret = %d\n",
+ vfid, ret);
+
+ ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, false);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to clean vf%d spoof config, ret = %d\n",
+ vfid, ret);
- return hclge_config_gro(hdev, enable);
+ memset(&vport->vf_info, 0, sizeof(vport->vf_info));
+}
+
+static void hclge_clean_vport_config(struct hnae3_ae_dev *ae_dev, int num_vfs)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+ struct hclge_vport *vport;
+ int i;
+
+ for (i = 0; i < num_vfs; i++) {
+ vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
+
+ hclge_clear_vport_vf_info(vport, i);
+ }
+}
+
+static int hclge_get_dscp_prio(struct hnae3_handle *h, u8 dscp, u8 *tc_mode,
+ u8 *priority)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+
+ if (dscp >= HNAE3_MAX_DSCP)
+ return -EINVAL;
+
+ if (tc_mode)
+ *tc_mode = vport->nic.kinfo.tc_map_mode;
+ if (priority)
+ *priority = vport->nic.kinfo.dscp_prio[dscp] == HNAE3_PRIO_ID_INVALID ? 0 :
+ vport->nic.kinfo.dscp_prio[dscp];
+
+ return 0;
}
static const struct hnae3_ae_ops hclge_ops = {
.init_ae_dev = hclge_init_ae_dev,
.uninit_ae_dev = hclge_uninit_ae_dev,
- .flr_prepare = hclge_flr_prepare,
- .flr_done = hclge_flr_done,
+ .reset_prepare = hclge_reset_prepare_general,
+ .reset_done = hclge_reset_done,
.init_client_instance = hclge_init_client_instance,
.uninit_client_instance = hclge_uninit_client_instance,
.map_ring_to_vector = hclge_map_ring_to_vector,
@@ -10574,6 +13010,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_vector = hclge_get_vector,
.put_vector = hclge_put_vector,
.set_promisc_mode = hclge_set_promisc_mode,
+ .request_update_promisc_mode = hclge_request_update_promisc_mode,
.set_loopback = hclge_set_loopback,
.start = hclge_ae_start,
.stop = hclge_ae_stop,
@@ -10584,10 +13021,10 @@ static const struct hnae3_ae_ops hclge_ops = {
.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
.get_media_type = hclge_get_media_type,
.check_port_speed = hclge_check_port_speed,
+ .get_fec_stats = hclge_get_fec_stats,
.get_fec = hclge_get_fec,
.set_fec = hclge_set_fec,
- .get_rss_key_size = hclge_get_rss_key_size,
- .get_rss_indir_size = hclge_get_rss_indir_size,
+ .get_rss_key_size = hclge_comm_get_rss_key_size,
.get_rss = hclge_get_rss,
.set_rss = hclge_set_rss,
.set_rss_tuple = hclge_set_rss_tuple,
@@ -10631,14 +13068,12 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_link_mode = hclge_get_link_mode,
.add_fd_entry = hclge_add_fd_entry,
.del_fd_entry = hclge_del_fd_entry,
- .del_all_fd_entries = hclge_del_all_fd_entries,
.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
.get_fd_rule_info = hclge_get_fd_rule_info,
.get_fd_all_rules = hclge_get_all_rules,
- .restore_fd_rules = hclge_restore_fd_entries,
.enable_fd = hclge_enable_fd,
.add_arfs_entry = hclge_add_fd_entry_by_arfs,
- .dbg_run_cmd = hclge_dbg_run_cmd,
+ .dbg_read_cmd = hclge_dbg_read_cmd,
.handle_hw_ras_error = hclge_handle_hw_ras_error,
.get_hw_reset_stat = hclge_get_hw_reset_stat,
.ae_dev_resetting = hclge_ae_dev_resetting,
@@ -10648,13 +13083,25 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_timer_task = hclge_set_timer_task,
.mac_connect_phy = hclge_mac_connect_phy,
.mac_disconnect_phy = hclge_mac_disconnect_phy,
- .restore_vlan_table = hclge_restore_vlan_table,
.get_vf_config = hclge_get_vf_config,
.set_vf_link_state = hclge_set_vf_link_state,
.set_vf_spoofchk = hclge_set_vf_spoofchk,
.set_vf_trust = hclge_set_vf_trust,
.set_vf_rate = hclge_set_vf_rate,
.set_vf_mac = hclge_set_vf_mac,
+ .get_module_eeprom = hclge_get_module_eeprom,
+ .get_cmdq_stat = hclge_get_cmdq_stat,
+ .add_cls_flower = hclge_add_cls_flower,
+ .del_cls_flower = hclge_del_cls_flower,
+ .cls_flower_active = hclge_is_cls_flower_active,
+ .get_phy_link_ksettings = hclge_get_phy_link_ksettings,
+ .set_phy_link_ksettings = hclge_set_phy_link_ksettings,
+ .set_tx_hwts_info = hclge_ptp_set_tx_info,
+ .get_rx_hwts = hclge_ptp_get_rx_hwts,
+ .get_ts_info = hclge_ptp_get_ts_info,
+ .get_link_diagnosis_info = hclge_get_link_diagnosis_info,
+ .clean_vf_config = hclge_clean_vport_config,
+ .get_dscp_prio = hclge_get_dscp_prio,
};
static struct hnae3_ae_algo ae_algo = {
@@ -10662,11 +13109,11 @@ static struct hnae3_ae_algo ae_algo = {
.pdev_id_table = ae_algo_pci_tbl,
};
-static int hclge_init(void)
+static int __init hclge_init(void)
{
pr_info("%s is initializing\n", HCLGE_NAME);
- hclge_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, HCLGE_NAME);
+ hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME);
if (!hclge_wq) {
pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
return -ENOMEM;
@@ -10677,8 +13124,9 @@ static int hclge_init(void)
return 0;
}
-static void hclge_exit(void)
+static void __exit hclge_exit(void)
{
+ hnae3_unregister_ae_algo_prepare(&ae_algo);
hnae3_unregister_ae_algo(&ae_algo);
destroy_workqueue(hclge_wq);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index f78cbb4cc85e..495b639b0dc2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -8,15 +8,21 @@
#include <linux/phy.h>
#include <linux/if_vlan.h>
#include <linux/kfifo.h>
+#include <net/devlink.h>
#include "hclge_cmd.h"
+#include "hclge_ptp.h"
#include "hnae3.h"
+#include "hclge_comm_rss.h"
+#include "hclge_comm_tqp_stats.h"
#define HCLGE_MOD_VERSION "1.0"
#define HCLGE_DRIVER_NAME "hclge"
#define HCLGE_MAX_PF_NUM 8
+#define HCLGE_VF_VPORT_START_NUM 1
+
#define HCLGE_RD_FIRST_STATS_NUM 2
#define HCLGE_RD_OTHER_STATS_NUM 4
@@ -27,31 +33,18 @@
(HCLGE_PF_CFG_BLOCK_SIZE / HCLGE_CFG_RD_LEN_BYTES)
#define HCLGE_VECTOR_REG_BASE 0x20000
+#define HCLGE_VECTOR_EXT_REG_BASE 0x30000
#define HCLGE_MISC_VECTOR_REG_BASE 0x20400
#define HCLGE_VECTOR_REG_OFFSET 0x4
+#define HCLGE_VECTOR_REG_OFFSET_H 0x1000
#define HCLGE_VECTOR_VF_OFFSET 0x100000
-#define HCLGE_CMDQ_TX_ADDR_L_REG 0x27000
-#define HCLGE_CMDQ_TX_ADDR_H_REG 0x27004
-#define HCLGE_CMDQ_TX_DEPTH_REG 0x27008
-#define HCLGE_CMDQ_TX_TAIL_REG 0x27010
-#define HCLGE_CMDQ_TX_HEAD_REG 0x27014
-#define HCLGE_CMDQ_RX_ADDR_L_REG 0x27018
-#define HCLGE_CMDQ_RX_ADDR_H_REG 0x2701C
-#define HCLGE_CMDQ_RX_DEPTH_REG 0x27020
-#define HCLGE_CMDQ_RX_TAIL_REG 0x27024
-#define HCLGE_CMDQ_RX_HEAD_REG 0x27028
-#define HCLGE_CMDQ_INTR_SRC_REG 0x27100
-#define HCLGE_CMDQ_INTR_STS_REG 0x27104
-#define HCLGE_CMDQ_INTR_EN_REG 0x27108
-#define HCLGE_CMDQ_INTR_GEN_REG 0x2710C
+#define HCLGE_NIC_CSQ_DEPTH_REG 0x27008
/* bar registers for common func */
-#define HCLGE_VECTOR0_OTER_EN_REG 0x20600
-#define HCLGE_RAS_OTHER_STS_REG 0x20B00
-#define HCLGE_FUNC_RESET_STS_REG 0x20C00
#define HCLGE_GRO_EN_REG 0x28000
+#define HCLGE_RXD_ADV_LAYOUT_EN_REG 0x28008
/* bar registers for rcb */
#define HCLGE_RING_RX_ADDR_L_REG 0x80000
@@ -89,22 +82,6 @@
#define HCLGE_TQP_INTR_RL_REG 0x20900
#define HCLGE_RSS_IND_TBL_SIZE 512
-#define HCLGE_RSS_SET_BITMAP_MSK GENMASK(15, 0)
-#define HCLGE_RSS_KEY_SIZE 40
-#define HCLGE_RSS_HASH_ALGO_TOEPLITZ 0
-#define HCLGE_RSS_HASH_ALGO_SIMPLE 1
-#define HCLGE_RSS_HASH_ALGO_SYMMETRIC 2
-#define HCLGE_RSS_HASH_ALGO_MASK GENMASK(3, 0)
-#define HCLGE_RSS_CFG_TBL_NUM \
- (HCLGE_RSS_IND_TBL_SIZE / HCLGE_RSS_CFG_TBL_SIZE)
-
-#define HCLGE_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0)
-#define HCLGE_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0)
-#define HCLGE_D_PORT_BIT BIT(0)
-#define HCLGE_S_PORT_BIT BIT(1)
-#define HCLGE_D_IP_BIT BIT(2)
-#define HCLGE_S_IP_BIT BIT(3)
-#define HCLGE_V_TAG_BIT BIT(4)
#define HCLGE_RSS_TC_SIZE_0 1
#define HCLGE_RSS_TC_SIZE_1 2
@@ -144,6 +121,10 @@
/* Factor used to calculate offset and bitmap of VF num */
#define HCLGE_VF_NUM_PER_CMD 64
+#define HCLGE_MAX_QSET_NUM 1024
+
+#define HCLGE_DBG_RESET_INFO_LEN 1024
+
enum HLCGE_PORT_TYPE {
HOST_PORT,
NETWORK_PORT
@@ -172,6 +153,7 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_FUN_RST_ING_B 0
/* Vector0 register bits define */
+#define HCLGE_VECTOR0_REG_PTP_INT_B 0
#define HCLGE_VECTOR0_GLOBALRESET_INT_B 5
#define HCLGE_VECTOR0_CORERESET_INT_B 6
#define HCLGE_VECTOR0_IMPRESET_INT_B 7
@@ -184,6 +166,16 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_VECTOR0_IMP_RESET_INT_B 1
#define HCLGE_VECTOR0_IMP_CMDQ_ERR_B 4U
#define HCLGE_VECTOR0_IMP_RD_POISON_B 5U
+#define HCLGE_VECTOR0_ALL_MSIX_ERR_B 6U
+#define HCLGE_TRIGGER_IMP_RESET_B 7U
+
+#define HCLGE_TQP_MEM_SIZE 0x10000
+#define HCLGE_MEM_BAR 4
+/* in the bar4, the first half is for roce, and the second half is for nic */
+#define HCLGE_NIC_MEM_OFFSET(hdev) \
+ (pci_resource_len((hdev)->pdev, HCLGE_MEM_BAR) >> 1)
+#define HCLGE_TQP_MEM_OFFSET(hdev, i) \
+ (HCLGE_NIC_MEM_OFFSET(hdev) + HCLGE_TQP_MEM_SIZE * (i))
#define HCLGE_MAC_DEFAULT_FRAME \
(ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN)
@@ -199,6 +191,7 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_SUPPORT_40G_BIT BIT(5)
#define HCLGE_SUPPORT_100M_BIT BIT(6)
#define HCLGE_SUPPORT_10M_BIT BIT(7)
+#define HCLGE_SUPPORT_200G_BIT BIT(8)
#define HCLGE_SUPPORT_GE \
(HCLGE_SUPPORT_1G_BIT | HCLGE_SUPPORT_100M_BIT | HCLGE_SUPPORT_10M_BIT)
@@ -214,10 +207,16 @@ enum HCLGE_DEV_STATE {
HCLGE_STATE_RST_HANDLING,
HCLGE_STATE_MBX_SERVICE_SCHED,
HCLGE_STATE_MBX_HANDLING,
+ HCLGE_STATE_ERR_SERVICE_SCHED,
HCLGE_STATE_STATISTICS_UPDATING,
- HCLGE_STATE_CMD_DISABLE,
HCLGE_STATE_LINK_UPDATING,
HCLGE_STATE_RST_FAIL,
+ HCLGE_STATE_FD_TBL_CHANGED,
+ HCLGE_STATE_FD_CLEAR_ALL,
+ HCLGE_STATE_FD_USER_DEF_CHANGED,
+ HCLGE_STATE_PTP_EN,
+ HCLGE_STATE_PTP_TX_HANDLING,
+ HCLGE_STATE_FEC_STATS_UPDATING,
HCLGE_STATE_MAX
};
@@ -225,6 +224,7 @@ enum hclge_evt_cause {
HCLGE_VECTOR0_EVENT_RST,
HCLGE_VECTOR0_EVENT_MBX,
HCLGE_VECTOR0_EVENT_ERR,
+ HCLGE_VECTOR0_EVENT_PTP,
HCLGE_VECTOR0_EVENT_OTHER,
};
@@ -237,7 +237,8 @@ enum HCLGE_MAC_SPEED {
HCLGE_MAC_SPEED_25G = 25000, /* 25000 Mbps = 25 Gbps */
HCLGE_MAC_SPEED_40G = 40000, /* 40000 Mbps = 40 Gbps */
HCLGE_MAC_SPEED_50G = 50000, /* 50000 Mbps = 50 Gbps */
- HCLGE_MAC_SPEED_100G = 100000 /* 100000 Mbps = 100 Gbps */
+ HCLGE_MAC_SPEED_100G = 100000, /* 100000 Mbps = 100 Gbps */
+ HCLGE_MAC_SPEED_200G = 200000 /* 200000 Mbps = 200 Gbps */
};
enum HCLGE_MAC_DUPLEX {
@@ -249,6 +250,7 @@ enum HCLGE_MAC_DUPLEX {
#define QUERY_ACTIVE_SPEED 1
struct hclge_mac {
+ u8 mac_id;
u8 phy_addr;
u8 flag;
u8 media_type; /* port media type, e.g. fibre/copper/backplane */
@@ -257,6 +259,7 @@ struct hclge_mac {
u8 duplex;
u8 support_autoneg;
u8 speed_type; /* 0: sfp speed, 1: active speed */
+ u8 lane_num;
u32 speed;
u32 max_speed;
u32 speed_ability; /* speed ability supported by current media */
@@ -264,7 +267,7 @@ struct hclge_mac {
u32 fec_mode; /* active fec mode */
u32 user_fec_mode;
u32 fec_ability;
- int link; /* store the link status of mac & phy (if phy exit) */
+ int link; /* store the link status of mac & phy (if phy exists) */
struct phy_device *phydev;
struct mii_bus *mdio_bus;
phy_interface_t phy_if;
@@ -273,30 +276,9 @@ struct hclge_mac {
};
struct hclge_hw {
- void __iomem *io_base;
+ struct hclge_comm_hw hw;
struct hclge_mac mac;
int num_vec;
- struct hclge_cmq cmq;
-};
-
-/* TQP stats */
-struct hlcge_tqp_stats {
- /* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */
- u64 rcb_tx_ring_pktnum_rcd; /* 32bit */
- /* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */
- u64 rcb_rx_ring_pktnum_rcd; /* 32bit */
-};
-
-struct hclge_tqp {
- /* copy of device pointer from pci_dev,
- * used when perform DMA mapping
- */
- struct device *dev;
- struct hnae3_queue q;
- struct hlcge_tqp_stats tqp_stats;
- u16 index; /* Global index in a NIC controller */
-
- bool alloced;
};
enum hclge_fc_mode {
@@ -308,6 +290,22 @@ enum hclge_fc_mode {
HCLGE_FC_DEFAULT
};
+#define HCLGE_FILTER_TYPE_VF 0
+#define HCLGE_FILTER_TYPE_PORT 1
+#define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
+#define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
+#define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
+#define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
+#define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
+#define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
+ | HCLGE_FILTER_FE_ROCE_EGRESS_B)
+#define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
+ | HCLGE_FILTER_FE_ROCE_INGRESS_B)
+
+enum hclge_vlan_fltr_cap {
+ HCLGE_VLAN_FLTR_DEF,
+ HCLGE_VLAN_FLTR_CAN_MDF,
+};
enum hclge_link_fail_code {
HCLGE_LF_NORMAL,
HCLGE_LF_REF_CLOCK_LOST,
@@ -315,6 +313,9 @@ enum hclge_link_fail_code {
HCLGE_LF_XSFP_ABSENT,
};
+#define HCLGE_LINK_STATUS_DOWN 0
+#define HCLGE_LINK_STATUS_UP 1
+
#define HCLGE_PG_NUM 4
#define HCLGE_SCH_MODE_SP 0
#define HCLGE_SCH_MODE_DWRR 1
@@ -334,17 +335,19 @@ struct hclge_tc_info {
};
struct hclge_cfg {
- u8 vmdq_vport_num;
u8 tc_num;
+ u8 vlan_fliter_cap;
u16 tqp_desc_num;
u16 rx_buf_len;
- u16 rss_size_max;
+ u16 vf_rss_size_max;
+ u16 pf_rss_size_max;
u8 phy_addr;
u8 media_type;
u8 mac_addr[ETH_ALEN];
u8 default_speed;
u32 numa_node_map;
- u8 speed_ability;
+ u32 tx_spare_buf_size;
+ u16 speed_ability;
u16 umv_space;
};
@@ -360,8 +363,13 @@ struct hclge_tm_info {
u8 pfc_en; /* PFC enabled or not for user priority */
};
+/* max number of mac statistics on each version */
+#define HCLGE_MAC_STATS_MAX_NUM_V1 87
+#define HCLGE_MAC_STATS_MAX_NUM_V2 105
+
struct hclge_comm_stats_str {
char desc[ETH_GSTRING_LEN];
+ u32 stats_num;
unsigned long offset;
};
@@ -369,6 +377,7 @@ struct hclge_comm_stats_str {
struct hclge_mac_stats {
u64 mac_tx_mac_pause_num;
u64 mac_rx_mac_pause_num;
+ u64 rsv0;
u64 mac_tx_pfc_pri0_pkt_num;
u64 mac_tx_pfc_pri1_pkt_num;
u64 mac_tx_pfc_pri2_pkt_num;
@@ -405,7 +414,7 @@ struct hclge_mac_stats {
u64 mac_tx_1519_2047_oct_pkt_num;
u64 mac_tx_2048_4095_oct_pkt_num;
u64 mac_tx_4096_8191_oct_pkt_num;
- u64 rsv0;
+ u64 rsv1;
u64 mac_tx_8192_9216_oct_pkt_num;
u64 mac_tx_9217_12287_oct_pkt_num;
u64 mac_tx_12288_16383_oct_pkt_num;
@@ -432,7 +441,7 @@ struct hclge_mac_stats {
u64 mac_rx_1519_2047_oct_pkt_num;
u64 mac_rx_2048_4095_oct_pkt_num;
u64 mac_rx_4096_8191_oct_pkt_num;
- u64 rsv1;
+ u64 rsv2;
u64 mac_rx_8192_9216_oct_pkt_num;
u64 mac_rx_9217_12287_oct_pkt_num;
u64 mac_rx_12288_16383_oct_pkt_num;
@@ -455,10 +464,52 @@ struct hclge_mac_stats {
u64 mac_rx_pfc_pause_pkt_num;
u64 mac_tx_ctrl_pkt_num;
u64 mac_rx_ctrl_pkt_num;
+
+ /* duration of pfc */
+ u64 mac_tx_pfc_pri0_xoff_time;
+ u64 mac_tx_pfc_pri1_xoff_time;
+ u64 mac_tx_pfc_pri2_xoff_time;
+ u64 mac_tx_pfc_pri3_xoff_time;
+ u64 mac_tx_pfc_pri4_xoff_time;
+ u64 mac_tx_pfc_pri5_xoff_time;
+ u64 mac_tx_pfc_pri6_xoff_time;
+ u64 mac_tx_pfc_pri7_xoff_time;
+ u64 mac_rx_pfc_pri0_xoff_time;
+ u64 mac_rx_pfc_pri1_xoff_time;
+ u64 mac_rx_pfc_pri2_xoff_time;
+ u64 mac_rx_pfc_pri3_xoff_time;
+ u64 mac_rx_pfc_pri4_xoff_time;
+ u64 mac_rx_pfc_pri5_xoff_time;
+ u64 mac_rx_pfc_pri6_xoff_time;
+ u64 mac_rx_pfc_pri7_xoff_time;
+
+ /* duration of pause */
+ u64 mac_tx_pause_xoff_time;
+ u64 mac_rx_pause_xoff_time;
};
#define HCLGE_STATS_TIMER_INTERVAL 300UL
+/* fec stats ,opcode id: 0x0316 */
+#define HCLGE_FEC_STATS_MAX_LANES 8
+struct hclge_fec_stats {
+ /* fec rs mode total stats */
+ u64 rs_corr_blocks;
+ u64 rs_uncorr_blocks;
+ u64 rs_error_blocks;
+ /* fec base-r mode per lanes stats */
+ u64 base_r_lane_num;
+ u64 base_r_corr_blocks;
+ u64 base_r_uncorr_blocks;
+ union {
+ struct {
+ u64 base_r_corr_per_lanes[HCLGE_FEC_STATS_MAX_LANES];
+ u64 base_r_uncorr_per_lanes[HCLGE_FEC_STATS_MAX_LANES];
+ };
+ u64 per_lanes[HCLGE_FEC_STATS_MAX_LANES * 2];
+ };
+};
+
struct hclge_vlan_type_cfg {
u16 rx_ot_fst_vlan_type;
u16 rx_ot_sec_vlan_type;
@@ -524,6 +575,9 @@ enum HCLGE_FD_TUPLE {
MAX_TUPLE,
};
+#define HCLGE_FD_TUPLE_USER_DEF_TUPLES \
+ (BIT(INNER_L2_RSV) | BIT(INNER_L3_RSV) | BIT(INNER_L4_RSV))
+
enum HCLGE_FD_META_DATA {
PACKET_TYPE_ID,
IP_FRAGEMENT,
@@ -536,9 +590,21 @@ enum HCLGE_FD_META_DATA {
MAX_META_DATA,
};
+enum HCLGE_FD_KEY_OPT {
+ KEY_OPT_U8,
+ KEY_OPT_LE16,
+ KEY_OPT_LE32,
+ KEY_OPT_MAC,
+ KEY_OPT_IP,
+ KEY_OPT_VNI,
+};
+
struct key_info {
u8 key_type;
u8 key_length; /* use bit as unit */
+ enum HCLGE_FD_KEY_OPT key_opt;
+ int offset;
+ int moffset;
};
#define MAX_KEY_LENGTH 400
@@ -546,14 +612,25 @@ struct key_info {
#define MAX_KEY_BYTES (MAX_KEY_DWORDS * 4)
#define MAX_META_DATA_LENGTH 32
+#define HCLGE_FD_MAX_USER_DEF_OFFSET 9000
+#define HCLGE_FD_USER_DEF_DATA GENMASK(15, 0)
+#define HCLGE_FD_USER_DEF_OFFSET GENMASK(15, 0)
+#define HCLGE_FD_USER_DEF_OFFSET_UNMASK GENMASK(15, 0)
+
/* assigned by firmware, the real filter number for each pf may be less */
#define MAX_FD_FILTER_NUM 4096
#define HCLGE_ARFS_EXPIRE_INTERVAL 5UL
+#define hclge_read_dev(a, reg) \
+ hclge_comm_read_reg((a)->hw.io_base, reg)
+#define hclge_write_dev(a, reg, value) \
+ hclge_comm_write_reg((a)->hw.io_base, reg, value)
+
enum HCLGE_FD_ACTIVE_RULE_TYPE {
HCLGE_FD_RULE_NONE,
HCLGE_FD_ARFS_ACTIVE,
HCLGE_FD_EP_ACTIVE,
+ HCLGE_FD_TC_FLOWER_ACTIVE,
};
enum HCLGE_FD_PACKET_TYPE {
@@ -562,8 +639,36 @@ enum HCLGE_FD_PACKET_TYPE {
};
enum HCLGE_FD_ACTION {
- HCLGE_FD_ACTION_ACCEPT_PACKET,
+ HCLGE_FD_ACTION_SELECT_QUEUE,
HCLGE_FD_ACTION_DROP_PACKET,
+ HCLGE_FD_ACTION_SELECT_TC,
+};
+
+enum HCLGE_FD_NODE_STATE {
+ HCLGE_FD_TO_ADD,
+ HCLGE_FD_TO_DEL,
+ HCLGE_FD_ACTIVE,
+ HCLGE_FD_DELETED,
+};
+
+enum HCLGE_FD_USER_DEF_LAYER {
+ HCLGE_FD_USER_DEF_NONE,
+ HCLGE_FD_USER_DEF_L2,
+ HCLGE_FD_USER_DEF_L3,
+ HCLGE_FD_USER_DEF_L4,
+};
+
+#define HCLGE_FD_USER_DEF_LAYER_NUM 3
+struct hclge_fd_user_def_cfg {
+ u16 ref_cnt;
+ u16 offset;
+};
+
+struct hclge_fd_user_def_info {
+ enum HCLGE_FD_USER_DEF_LAYER layer;
+ u16 data;
+ u16 data_mask;
+ u16 offset;
};
struct hclge_fd_key_cfg {
@@ -579,10 +684,10 @@ struct hclge_fd_key_cfg {
struct hclge_fd_cfg {
u8 fd_mode;
u16 max_key_length; /* use bit as unit */
- u32 proto_support;
u32 rule_num[MAX_STAGE_NUM]; /* rule entry number */
u16 cnt_num[MAX_STAGE_NUM]; /* rule hit counter number */
struct hclge_fd_key_cfg key_cfg[MAX_STAGE_NUM];
+ struct hclge_fd_user_def_cfg user_def_cfg[HCLGE_FD_USER_DEF_LAYER_NUM];
};
#define IPV4_INDEX 3
@@ -599,6 +704,9 @@ struct hclge_fd_rule_tuples {
u16 dst_port;
u16 vlan_tag1;
u16 ether_proto;
+ u16 l2_user_def;
+ u16 l3_user_def;
+ u32 l4_user_def;
u8 ip_tos;
u8 ip_proto;
};
@@ -609,12 +717,24 @@ struct hclge_fd_rule {
struct hclge_fd_rule_tuples tuples_mask;
u32 unused_tuple;
u32 flow_type;
- u8 action;
- u16 vf_id;
+ union {
+ struct {
+ unsigned long cookie;
+ u8 tc;
+ } cls_flower;
+ struct {
+ u16 flow_id; /* only used for arfs */
+ } arfs;
+ struct {
+ struct hclge_fd_user_def_info user_def;
+ } ep;
+ };
u16 queue_id;
+ u16 vf_id;
u16 location;
- u16 flow_id; /* only used for arfs */
enum HCLGE_FD_ACTIVE_RULE_TYPE rule_type;
+ enum HCLGE_FD_NODE_STATE state;
+ u8 action;
};
struct hclge_fd_ad_data {
@@ -628,11 +748,19 @@ struct hclge_fd_ad_data {
u8 write_rule_id_to_bd;
u8 next_input_key;
u16 rule_id;
+ u16 tc_size;
+ u8 override_tc;
};
-struct hclge_vport_mac_addr_cfg {
+enum HCLGE_MAC_NODE_STATE {
+ HCLGE_MAC_TO_ADD,
+ HCLGE_MAC_TO_DEL,
+ HCLGE_MAC_ACTIVE
+};
+
+struct hclge_mac_node {
struct list_head node;
- int hd_tbl_status;
+ enum HCLGE_MAC_NODE_STATE state;
u8 mac_addr[ETH_ALEN];
};
@@ -671,9 +799,14 @@ struct hclge_mac_tnl_stats {
struct hclge_vf_vlan_cfg {
u8 mbx_cmd;
u8 subcode;
- u8 is_kill;
- u16 vlan;
- u16 proto;
+ union {
+ struct {
+ u8 is_kill;
+ __le16 vlan;
+ __le16 proto;
+ };
+ u8 enable;
+ };
};
#pragma pack()
@@ -696,7 +829,7 @@ struct hclge_vf_vlan_cfg {
* x = (~k) & v
* y = (k ^ ~v) & k
*/
-#define calc_x(x, k, v) ((x) = (~(k) & (v)))
+#define calc_x(x, k, v) (x = ~(k) & (v))
#define calc_y(y, k, v) \
do { \
const typeof(k) _k_ = (k); \
@@ -704,6 +837,9 @@ struct hclge_vf_vlan_cfg {
(y) = (_k_ ^ ~_v_) & (_k_); \
} while (0)
+#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
+#define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
+
#define HCLGE_MAC_TNL_LOG_SIZE 8
#define HCLGE_VPORT_NUM 256
struct hclge_dev {
@@ -712,6 +848,7 @@ struct hclge_dev {
struct hclge_hw hw;
struct hclge_misc_vector misc_vector;
struct hclge_mac_stats mac_stats;
+ struct hclge_fec_stats fec_stats;
unsigned long state;
unsigned long flr_state;
unsigned long last_reset_time;
@@ -724,13 +861,14 @@ struct hclge_dev {
struct hclge_rst_stats rst_stats;
struct semaphore reset_sem; /* protect reset process */
u32 fw_version;
- u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */
u16 num_tqps; /* Num task queue pairs of this PF */
u16 num_req_vfs; /* Num VFs requested for this PF */
u16 base_tqp_pid; /* Base task tqp physical id of this PF */
u16 alloc_rss_size; /* Allocated RSS task queue */
- u16 rss_size_max; /* HW defined max RSS task queue */
+ u16 vf_rss_size_max; /* HW defined VF max RSS task queue */
+ u16 pf_rss_size_max; /* HW defined PF max RSS task queue */
+ u32 tx_spare_buf_size; /* HW defined TX spare buffer size */
u16 fdir_pf_filter_count; /* Num of guaranteed filters for this PF */
u16 num_alloc_vport; /* Num vports this driver supports */
@@ -739,7 +877,6 @@ struct hclge_dev {
u16 num_tx_desc; /* desc num of per tx queue */
u16 num_rx_desc; /* desc num of per rx queue */
u8 hw_tc_map;
- u8 tc_num_last_time;
enum hclge_fc_mode fc_mode_last_time;
u8 support_sfp_query;
@@ -756,20 +893,11 @@ struct hclge_dev {
u16 num_msi;
u16 num_msi_left;
u16 num_msi_used;
- u16 roce_base_msix_offset;
- u32 base_msi_vector;
u16 *vector_status;
int *vector_irq;
u16 num_nic_msi; /* Num of nic vectors for this PF */
u16 num_roce_msi; /* Num of roce vectors for this PF */
- int roce_base_vector;
- u16 pending_udp_bitmap;
-
- u16 rx_itr_default;
- u16 tx_itr_default;
-
- u16 adminq_work_limit; /* Num of admin receive queue desc to process */
unsigned long service_timer_period;
unsigned long service_timer_previous;
struct timer_list reset_timer;
@@ -778,7 +906,7 @@ struct hclge_dev {
bool cur_promisc;
int num_alloc_vfs; /* Actual number of VFs allocated */
- struct hclge_tqp *htqp;
+ struct hclge_comm_tqp *htqp;
struct hclge_vport *vport;
struct dentry *hclge_dbgfs;
@@ -805,15 +933,20 @@ struct hclge_dev {
unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)];
unsigned long vf_vlan_full[BITS_TO_LONGS(HCLGE_VPORT_NUM)];
+ unsigned long vport_config_block[BITS_TO_LONGS(HCLGE_VPORT_NUM)];
+
struct hclge_fd_cfg fd_cfg;
struct hlist_head fd_rule_list;
spinlock_t fd_rule_lock; /* protect fd_rule_list and fd_bmap */
u16 hclge_fd_rule_num;
unsigned long serv_processed_cnt;
unsigned long last_serv_processed;
+ unsigned long last_rst_scheduled;
+ unsigned long last_mbx_scheduled;
unsigned long fd_bmap[BITS_TO_LONGS(MAX_FD_FILTER_NUM)];
enum HCLGE_FD_ACTIVE_RULE_TYPE fd_active_type;
u8 fd_en;
+ bool gro_en;
u16 wanted_umv_size;
/* max available unicast mac vlan space */
@@ -822,14 +955,15 @@ struct hclge_dev {
u16 priv_umv_size;
/* unicast mac vlan space shared by PF and its VFs */
u16 share_umv_size;
- struct mutex umv_mutex; /* protect share_umv_size */
+ /* multicast mac address number used by PF and its VFs */
+ u16 used_mc_mac_num;
DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats,
HCLGE_MAC_TNL_LOG_SIZE);
- /* affinity mask and notify for misc interrupt */
- cpumask_t affinity_mask;
- struct irq_affinity_notify affinity_notify;
+ struct hclge_ptp *ptp;
+ struct devlink *devlink;
+ struct hclge_comm_rss_cfg rss_cfg;
};
/* VPort level vlan tag configuration for TX direction */
@@ -842,30 +976,25 @@ struct hclge_tx_vtag_cfg {
bool insert_tag2_en; /* Whether insert outer vlan tag */
u16 default_tag1; /* The default inner vlan tag to insert */
u16 default_tag2; /* The default outer vlan tag to insert */
+ bool tag_shift_mode_en;
};
/* VPort level vlan tag configuration for RX direction */
struct hclge_rx_vtag_cfg {
- u8 rx_vlan_offload_en; /* Whether enable rx vlan offload */
- u8 strip_tag1_en; /* Whether strip inner vlan tag */
- u8 strip_tag2_en; /* Whether strip outer vlan tag */
- u8 vlan1_vlan_prionly; /* Inner VLAN Tag up to descriptor Enable */
- u8 vlan2_vlan_prionly; /* Outer VLAN Tag up to descriptor Enable */
-};
-
-struct hclge_rss_tuple_cfg {
- u8 ipv4_tcp_en;
- u8 ipv4_udp_en;
- u8 ipv4_sctp_en;
- u8 ipv4_fragment_en;
- u8 ipv6_tcp_en;
- u8 ipv6_udp_en;
- u8 ipv6_sctp_en;
- u8 ipv6_fragment_en;
+ bool rx_vlan_offload_en; /* Whether enable rx vlan offload */
+ bool strip_tag1_en; /* Whether strip inner vlan tag */
+ bool strip_tag2_en; /* Whether strip outer vlan tag */
+ bool vlan1_vlan_prionly; /* Inner vlan tag up to descriptor enable */
+ bool vlan2_vlan_prionly; /* Outer vlan tag up to descriptor enable */
+ bool strip_tag1_discard_en; /* Inner vlan tag discard for BD enable */
+ bool strip_tag2_discard_en; /* Outer vlan tag discard for BD enable */
};
enum HCLGE_VPORT_STATE {
HCLGE_VPORT_STATE_ALIVE,
+ HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
+ HCLGE_VPORT_STATE_PROMISC_CHANGE,
+ HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
HCLGE_VPORT_STATE_MAX
};
@@ -877,7 +1006,9 @@ struct hclge_vlan_info {
struct hclge_port_base_vlan_config {
u16 state;
+ bool tbl_sta;
struct hclge_vlan_info vlan_info;
+ struct hclge_vlan_info old_vlan_info;
};
struct hclge_vf_info {
@@ -886,25 +1017,20 @@ struct hclge_vf_info {
u32 spoofchk;
u32 max_tx_rate;
u32 trusted;
- u16 promisc_enable;
+ u8 request_uc_en;
+ u8 request_mc_en;
+ u8 request_bc_en;
};
struct hclge_vport {
u16 alloc_tqps; /* Allocated Tx/Rx queues */
- u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */
- /* User configured lookup table entries */
- u8 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE];
- int rss_algo; /* User configured hash algorithm */
- /* User configured rss tuple sets */
- struct hclge_rss_tuple_cfg rss_tuple_sets;
-
- u16 alloc_rss_size;
-
u16 qs_offset;
u32 bw_limit; /* VSI BW Limit (0 = disabled) */
u8 dwrr;
+ bool req_vlan_fltr_en;
+ bool cur_vlan_fltr_en;
unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];
struct hclge_port_base_vlan_config port_base_vlan_cfg;
struct hclge_tx_vtag_cfg txvlan_cfg;
@@ -922,11 +1048,26 @@ struct hclge_vport {
u32 mps; /* Max packet size */
struct hclge_vf_info vf_info;
+ u8 overflow_promisc_flags;
+ u8 last_promisc_flags;
+
+ spinlock_t mac_list_lock; /* protect mac address need to add/detele */
struct list_head uc_mac_list; /* Store VF unicast table */
struct list_head mc_mac_list; /* Store VF multicast table */
+
struct list_head vlan_list; /* Store VF vlan table */
};
+struct hclge_speed_bit_map {
+ u32 speed;
+ u32 speed_bit;
+};
+
+struct hclge_mac_speed_map {
+ u32 speed_drv; /* speed defined in driver */
+ u32 speed_fw; /* speed defined in firmware */
+};
+
int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
bool en_mc_pmc, bool en_bc_pmc);
int hclge_add_uc_addr_common(struct hclge_vport *vport,
@@ -945,58 +1086,59 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport,
static inline int hclge_get_queue_id(struct hnae3_queue *queue)
{
- struct hclge_tqp *tqp = container_of(queue, struct hclge_tqp, q);
+ struct hclge_comm_tqp *tqp =
+ container_of(queue, struct hclge_comm_tqp, q);
return tqp->index;
}
-static inline bool hclge_is_reset_pending(struct hclge_dev *hdev)
-{
- return !!hdev->reset_pending;
-}
-
int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
-int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex);
+int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lane_num);
int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
u16 vlan_id, bool is_kill);
int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable);
int hclge_buffer_alloc(struct hclge_dev *hdev);
int hclge_rss_init_hw(struct hclge_dev *hdev);
-void hclge_rss_indir_init_cfg(struct hclge_dev *hdev);
void hclge_mbx_handler(struct hclge_dev *hdev);
-int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
-void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id);
+int hclge_reset_tqp(struct hnae3_handle *handle);
int hclge_cfg_flowctrl(struct hclge_dev *hdev);
int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id);
int hclge_vport_start(struct hclge_vport *vport);
void hclge_vport_stop(struct hclge_vport *vport);
int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu);
-int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf);
+int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
+ char *buf, int len);
u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id);
int hclge_notify_client(struct hclge_dev *hdev,
enum hnae3_reset_notify_type type);
-void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
- enum HCLGE_MAC_ADDR_TYPE mac_type);
-void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
- bool is_write_tbl,
- enum HCLGE_MAC_ADDR_TYPE mac_type);
+int hclge_update_mac_list(struct hclge_vport *vport,
+ enum HCLGE_MAC_NODE_STATE state,
+ enum HCLGE_MAC_ADDR_TYPE mac_type,
+ const unsigned char *addr);
+int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
+ const u8 *old_addr, const u8 *new_addr);
void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
enum HCLGE_MAC_ADDR_TYPE mac_type);
-void hclge_uninit_vport_mac_table(struct hclge_dev *hdev);
void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list);
void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev);
+void hclge_restore_mac_table_common(struct hclge_vport *vport);
+void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev);
+void hclge_restore_vport_vlan_table(struct hclge_vport *vport);
int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
struct hclge_vlan_info *vlan_info);
int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
- u16 state, u16 vlan_tag, u16 qos,
- u16 vlan_proto);
+ u16 state,
+ struct hclge_vlan_info *vlan_info);
void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time);
int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev,
struct hclge_desc *desc);
void hclge_report_hw_error(struct hclge_dev *hdev,
enum hnae3_hw_error_type type);
void hclge_inform_vf_promisc_info(struct hclge_vport *vport);
-void hclge_dbg_dump_rst_info(struct hclge_dev *hdev);
+int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len);
+int hclge_push_vf_link_status(struct hclge_vport *vport);
+int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en);
+int hclge_mac_update_stats(struct hclge_dev *hdev);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 3d850f6b1e37..a7b06c63143c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -4,6 +4,22 @@
#include "hclge_main.h"
#include "hclge_mbx.h"
#include "hnae3.h"
+#include "hclge_comm_rss.h"
+
+#define CREATE_TRACE_POINTS
+#include "hclge_trace.h"
+
+static u16 hclge_errno_to_resp(int errno)
+{
+ int resp = abs(errno);
+
+ /* The status for pf to vf msg cmd is u16, constrainted by HW.
+ * We need to keep the same type with it.
+ * The intput errno is the stander error code, it's safely to
+ * use a u16 to store the abs(errno).
+ */
+ return (u16)resp;
+}
/* hclge_gen_resp_to_vf: used to generate a synchronous response to VF when PF
* receives a mailbox message from VF.
@@ -14,44 +30,60 @@
*/
static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *vf_to_pf_req,
- int resp_status,
- u8 *resp_data, u16 resp_data_len)
+ struct hclge_respond_to_vf_msg *resp_msg)
{
struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
struct hclge_dev *hdev = vport->back;
- enum hclge_cmd_status status;
+ enum hclge_comm_cmd_status status;
struct hclge_desc desc;
+ u16 resp;
resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
- if (resp_data_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
+ if (resp_msg->len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
dev_err(&hdev->pdev->dev,
"PF fail to gen resp to VF len %u exceeds max len %u\n",
- resp_data_len,
+ resp_msg->len,
HCLGE_MBX_MAX_RESP_DATA_SIZE);
- /* If resp_data_len is too long, set the value to max length
+ /* If resp_msg->len is too long, set the value to max length
* and return the msg to VF
*/
- resp_data_len = HCLGE_MBX_MAX_RESP_DATA_SIZE;
+ resp_msg->len = HCLGE_MBX_MAX_RESP_DATA_SIZE;
}
hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
resp_pf_to_vf->dest_vfid = vf_to_pf_req->mbx_src_vfid;
resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len;
+ resp_pf_to_vf->match_id = vf_to_pf_req->match_id;
+
+ resp_pf_to_vf->msg.code = cpu_to_le16(HCLGE_MBX_PF_VF_RESP);
+ resp_pf_to_vf->msg.vf_mbx_msg_code =
+ cpu_to_le16(vf_to_pf_req->msg.code);
+ resp_pf_to_vf->msg.vf_mbx_msg_subcode =
+ cpu_to_le16(vf_to_pf_req->msg.subcode);
+ resp = hclge_errno_to_resp(resp_msg->status);
+ if (resp < SHRT_MAX) {
+ resp_pf_to_vf->msg.resp_status = cpu_to_le16(resp);
+ } else {
+ dev_warn(&hdev->pdev->dev,
+ "failed to send response to VF, response status %u is out-of-bound\n",
+ resp);
+ resp_pf_to_vf->msg.resp_status = cpu_to_le16(EIO);
+ }
- resp_pf_to_vf->msg[0] = HCLGE_MBX_PF_VF_RESP;
- resp_pf_to_vf->msg[1] = vf_to_pf_req->msg[0];
- resp_pf_to_vf->msg[2] = vf_to_pf_req->msg[1];
- resp_pf_to_vf->msg[3] = (resp_status == 0) ? 0 : 1;
+ if (resp_msg->len > 0)
+ memcpy(resp_pf_to_vf->msg.resp_data, resp_msg->data,
+ resp_msg->len);
- if (resp_data && resp_data_len > 0)
- memcpy(&resp_pf_to_vf->msg[4], resp_data, resp_data_len);
+ trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf);
status = hclge_cmd_send(&hdev->hw, &desc, 1);
if (status)
dev_err(&hdev->pdev->dev,
- "PF failed(=%d) to send response to VF\n", status);
+ "failed to send response to VF, status: %d, vfid: %u, code: %u, subcode: %u.\n",
+ status, vf_to_pf_req->mbx_src_vfid,
+ vf_to_pf_req->msg.code, vf_to_pf_req->msg.subcode);
return status;
}
@@ -61,24 +93,33 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
{
struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
struct hclge_dev *hdev = vport->back;
- enum hclge_cmd_status status;
+ enum hclge_comm_cmd_status status;
struct hclge_desc desc;
+ if (msg_len > HCLGE_MBX_MAX_MSG_SIZE) {
+ dev_err(&hdev->pdev->dev,
+ "msg data length(=%u) exceeds maximum(=%u)\n",
+ msg_len, HCLGE_MBX_MAX_MSG_SIZE);
+ return -EMSGSIZE;
+ }
+
resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
resp_pf_to_vf->dest_vfid = dest_vfid;
resp_pf_to_vf->msg_len = msg_len;
- resp_pf_to_vf->msg[0] = mbx_opcode;
+ resp_pf_to_vf->msg.code = cpu_to_le16(mbx_opcode);
+
+ memcpy(resp_pf_to_vf->msg.msg_data, msg, msg_len);
- memcpy(&resp_pf_to_vf->msg[1], msg, msg_len);
+ trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf);
status = hclge_cmd_send(&hdev->hw, &desc, 1);
if (status)
dev_err(&hdev->pdev->dev,
- "PF failed(=%d) to send mailbox message to VF\n",
- status);
+ "failed to send mailbox to VF, status: %d, vfid: %u, opcode: %u\n",
+ status, dest_vfid, mbx_opcode);
return status;
}
@@ -86,8 +127,8 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
{
struct hclge_dev *hdev = vport->back;
+ __le16 msg_data;
u16 reset_type;
- u8 msg_data[2];
u8 dest_vfid;
BUILD_BUG_ON(HNAE3_MAX_RESET > U16_MAX);
@@ -101,10 +142,10 @@ int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
else
reset_type = HNAE3_VF_FUNC_RESET;
- memcpy(&msg_data[0], &reset_type, sizeof(u16));
+ msg_data = cpu_to_le16(reset_type);
/* send this requested info to VF */
- return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
+ return hclge_send_mbx_msg(vport, (u8 *)&msg_data, sizeof(msg_data),
HCLGE_MBX_ASSERTING_RESET, dest_vfid);
}
@@ -116,7 +157,7 @@ static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head)
while (chain) {
chain_tmp = chain->next;
- kzfree(chain);
+ kfree_sensitive(chain);
chain = chain_tmp;
}
}
@@ -137,22 +178,31 @@ static int hclge_get_ring_chain_from_mbx(
struct hclge_vport *vport)
{
struct hnae3_ring_chain_node *cur_chain, *new_chain;
+ struct hclge_dev *hdev = vport->back;
int ring_num;
int i;
- ring_num = req->msg[2];
+ ring_num = req->msg.ring_num;
- if (ring_num > ((HCLGE_MBX_VF_MSG_DATA_NUM -
- HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) /
- HCLGE_MBX_RING_NODE_VARIABLE_NUM))
- return -ENOMEM;
+ if (ring_num > HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM)
+ return -EINVAL;
+
+ for (i = 0; i < ring_num; i++) {
+ if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) {
+ dev_err(&hdev->pdev->dev, "tqp index(%u) is out of range(0-%u)\n",
+ req->msg.param[i].tqp_index,
+ vport->nic.kinfo.rss_size - 1U);
+ return -EINVAL;
+ }
+ }
- hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]);
+ hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B,
+ req->msg.param[0].ring_type);
ring_chain->tqp_index =
- hclge_get_queue_id(vport->nic.kinfo.tqp[req->msg[4]]);
+ hclge_get_queue_id(vport->nic.kinfo.tqp
+ [req->msg.param[0].tqp_index]);
hnae3_set_field(ring_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S,
- req->msg[5]);
+ HNAE3_RING_GL_IDX_S, req->msg.param[0].int_gl_index);
cur_chain = ring_chain;
@@ -162,18 +212,15 @@ static int hclge_get_ring_chain_from_mbx(
goto err;
hnae3_set_bit(new_chain->flag, HNAE3_RING_TYPE_B,
- req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
- HCLGE_MBX_RING_MAP_BASIC_MSG_NUM]);
+ req->msg.param[i].ring_type);
new_chain->tqp_index =
hclge_get_queue_id(vport->nic.kinfo.tqp
- [req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
- HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 1]]);
+ [req->msg.param[i].tqp_index]);
hnae3_set_field(new_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
HNAE3_RING_GL_IDX_S,
- req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
- HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 2]);
+ req->msg.param[i].int_gl_index);
cur_chain->next = new_chain;
cur_chain = new_chain;
@@ -189,7 +236,7 @@ static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
struct hclge_mbx_vf_to_pf_cmd *req)
{
struct hnae3_ring_chain_node ring_chain;
- int vector_id = req->msg[1];
+ int vector_id = req->msg.vector_id;
int ret;
memset(&ring_chain, 0, sizeof(ring_chain));
@@ -204,201 +251,227 @@ static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
return ret;
}
-static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *req)
+static int hclge_query_ring_vector_map(struct hclge_vport *vport,
+ struct hnae3_ring_chain_node *ring_chain,
+ struct hclge_desc *desc)
{
-#define HCLGE_MBX_BC_INDEX 1
-#define HCLGE_MBX_UC_INDEX 2
-#define HCLGE_MBX_MC_INDEX 3
+ struct hclge_ctrl_vector_chain_cmd *req =
+ (struct hclge_ctrl_vector_chain_cmd *)desc->data;
+ struct hclge_dev *hdev = vport->back;
+ u16 tqp_type_and_id;
+ int status;
+
+ hclge_cmd_setup_basic_desc(desc, HCLGE_OPC_ADD_RING_TO_VECTOR, true);
- bool en_bc = req->msg[HCLGE_MBX_BC_INDEX] ? true : false;
- bool en_uc = req->msg[HCLGE_MBX_UC_INDEX] ? true : false;
- bool en_mc = req->msg[HCLGE_MBX_MC_INDEX] ? true : false;
+ tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[0]);
+ hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S,
+ hnae3_get_bit(ring_chain->flag, HNAE3_RING_TYPE_B));
+ hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S,
+ ring_chain->tqp_index);
+ req->tqp_type_and_id[0] = cpu_to_le16(tqp_type_and_id);
+ req->vfid = vport->vport_id;
+
+ status = hclge_cmd_send(&hdev->hw, desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Get VF ring vector map info fail, status is %d.\n",
+ status);
+
+ return status;
+}
+
+static int hclge_get_vf_ring_vector_map(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *req,
+ struct hclge_respond_to_vf_msg *resp)
+{
+#define HCLGE_LIMIT_RING_NUM 1
+#define HCLGE_RING_TYPE_OFFSET 0
+#define HCLGE_TQP_INDEX_OFFSET 1
+#define HCLGE_INT_GL_INDEX_OFFSET 2
+#define HCLGE_VECTOR_ID_OFFSET 3
+#define HCLGE_RING_VECTOR_MAP_INFO_LEN 4
+ struct hnae3_ring_chain_node ring_chain;
+ struct hclge_desc desc;
+ struct hclge_ctrl_vector_chain_cmd *data =
+ (struct hclge_ctrl_vector_chain_cmd *)desc.data;
+ u16 tqp_type_and_id;
+ u8 int_gl_index;
int ret;
- if (!vport->vf_info.trusted) {
- en_uc = false;
- en_mc = false;
+ req->msg.ring_num = HCLGE_LIMIT_RING_NUM;
+
+ memset(&ring_chain, 0, sizeof(ring_chain));
+ ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport);
+ if (ret)
+ return ret;
+
+ ret = hclge_query_ring_vector_map(vport, &ring_chain, &desc);
+ if (ret) {
+ hclge_free_vector_ring_chain(&ring_chain);
+ return ret;
}
- ret = hclge_set_vport_promisc_mode(vport, en_uc, en_mc, en_bc);
- if (req->mbx_need_resp)
- hclge_gen_resp_to_vf(vport, req, ret, NULL, 0);
+ tqp_type_and_id = le16_to_cpu(data->tqp_type_and_id[0]);
+ int_gl_index = hnae3_get_field(tqp_type_and_id,
+ HCLGE_INT_GL_IDX_M, HCLGE_INT_GL_IDX_S);
- vport->vf_info.promisc_enable = (en_uc || en_mc) ? 1 : 0;
+ resp->data[HCLGE_RING_TYPE_OFFSET] = req->msg.param[0].ring_type;
+ resp->data[HCLGE_TQP_INDEX_OFFSET] = req->msg.param[0].tqp_index;
+ resp->data[HCLGE_INT_GL_INDEX_OFFSET] = int_gl_index;
+ resp->data[HCLGE_VECTOR_ID_OFFSET] = data->int_vector_id_l;
+ resp->len = HCLGE_RING_VECTOR_MAP_INFO_LEN;
+
+ hclge_free_vector_ring_chain(&ring_chain);
return ret;
}
-void hclge_inform_vf_promisc_info(struct hclge_vport *vport)
+static void hclge_set_vf_promisc_mode(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *req)
{
- u8 dest_vfid = (u8)vport->vport_id;
- u8 msg_data[2];
+ struct hnae3_handle *handle = &vport->nic;
+ struct hclge_dev *hdev = vport->back;
- memcpy(&msg_data[0], &vport->vf_info.promisc_enable, sizeof(u16));
+ vport->vf_info.request_uc_en = req->msg.en_uc;
+ vport->vf_info.request_mc_en = req->msg.en_mc;
+ vport->vf_info.request_bc_en = req->msg.en_bc;
+
+ if (req->msg.en_limit_promisc)
+ set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags);
+ else
+ clear_bit(HNAE3_PFLAG_LIMIT_PROMISC,
+ &handle->priv_flags);
- hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
- HCLGE_MBX_PUSH_PROMISC_INFO, dest_vfid);
+ set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
+ hclge_task_schedule(hdev, 0);
}
static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
- const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]);
+#define HCLGE_MBX_VF_OLD_MAC_ADDR_OFFSET 6
+
+ const u8 *mac_addr = (const u8 *)(mbx_req->msg.data);
struct hclge_dev *hdev = vport->back;
int status;
- if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_MODIFY) {
- const u8 *old_addr = (const u8 *)(&mbx_req->msg[8]);
+ if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_MODIFY) {
+ const u8 *old_addr = (const u8 *)
+ (&mbx_req->msg.data[HCLGE_MBX_VF_OLD_MAC_ADDR_OFFSET]);
/* If VF MAC has been configured by the host then it
* cannot be overridden by the MAC specified by the VM.
*/
if (!is_zero_ether_addr(vport->vf_info.mac) &&
- !ether_addr_equal(mac_addr, vport->vf_info.mac)) {
- status = -EPERM;
- goto out;
- }
-
- if (!is_valid_ether_addr(mac_addr)) {
- status = -EINVAL;
- goto out;
- }
-
- hclge_rm_uc_addr_common(vport, old_addr);
- status = hclge_add_uc_addr_common(vport, mac_addr);
- if (status) {
- hclge_add_uc_addr_common(vport, old_addr);
- } else {
- hclge_rm_vport_mac_table(vport, mac_addr,
- false, HCLGE_MAC_ADDR_UC);
- hclge_add_vport_mac_table(vport, mac_addr,
- HCLGE_MAC_ADDR_UC);
- }
- } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_ADD) {
- status = hclge_add_uc_addr_common(vport, mac_addr);
- if (!status)
- hclge_add_vport_mac_table(vport, mac_addr,
- HCLGE_MAC_ADDR_UC);
- } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_REMOVE) {
- status = hclge_rm_uc_addr_common(vport, mac_addr);
- if (!status)
- hclge_rm_vport_mac_table(vport, mac_addr,
- false, HCLGE_MAC_ADDR_UC);
+ !ether_addr_equal(mac_addr, vport->vf_info.mac))
+ return -EPERM;
+
+ if (!is_valid_ether_addr(mac_addr))
+ return -EINVAL;
+
+ spin_lock_bh(&vport->mac_list_lock);
+ status = hclge_update_mac_node_for_dev_addr(vport, old_addr,
+ mac_addr);
+ spin_unlock_bh(&vport->mac_list_lock);
+ hclge_task_schedule(hdev, 0);
+ } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_ADD) {
+ status = hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD,
+ HCLGE_MAC_ADDR_UC, mac_addr);
+ } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_REMOVE) {
+ status = hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL,
+ HCLGE_MAC_ADDR_UC, mac_addr);
} else {
dev_err(&hdev->pdev->dev,
"failed to set unicast mac addr, unknown subcode %u\n",
- mbx_req->msg[1]);
+ mbx_req->msg.subcode);
return -EIO;
}
-out:
- if (mbx_req->mbx_need_resp & HCLGE_MBX_NEED_RESP_BIT)
- hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
-
- return 0;
+ return status;
}
static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req,
- bool gen_resp)
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
- const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]);
+ const u8 *mac_addr = (const u8 *)(mbx_req->msg.data);
struct hclge_dev *hdev = vport->back;
- u8 resp_len = 0;
- u8 resp_data;
- int status;
- if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_ADD) {
- status = hclge_add_mc_addr_common(vport, mac_addr);
- if (!status)
- hclge_add_vport_mac_table(vport, mac_addr,
- HCLGE_MAC_ADDR_MC);
- } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_REMOVE) {
- status = hclge_rm_mc_addr_common(vport, mac_addr);
- if (!status)
- hclge_rm_vport_mac_table(vport, mac_addr,
- false, HCLGE_MAC_ADDR_MC);
+ if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_ADD) {
+ hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD,
+ HCLGE_MAC_ADDR_MC, mac_addr);
+ } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_REMOVE) {
+ hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL,
+ HCLGE_MAC_ADDR_MC, mac_addr);
} else {
dev_err(&hdev->pdev->dev,
"failed to set mcast mac addr, unknown subcode %u\n",
- mbx_req->msg[1]);
+ mbx_req->msg.subcode);
return -EIO;
}
- if (gen_resp)
- hclge_gen_resp_to_vf(vport, mbx_req, status,
- &resp_data, resp_len);
-
return 0;
}
int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
- u16 state, u16 vlan_tag, u16 qos,
- u16 vlan_proto)
+ u16 state,
+ struct hclge_vlan_info *vlan_info)
{
-#define MSG_DATA_SIZE 8
+ struct hclge_mbx_port_base_vlan base_vlan;
- u8 msg_data[MSG_DATA_SIZE];
+ base_vlan.state = cpu_to_le16(state);
+ base_vlan.vlan_proto = cpu_to_le16(vlan_info->vlan_proto);
+ base_vlan.qos = cpu_to_le16(vlan_info->qos);
+ base_vlan.vlan_tag = cpu_to_le16(vlan_info->vlan_tag);
- memcpy(&msg_data[0], &state, sizeof(u16));
- memcpy(&msg_data[2], &vlan_proto, sizeof(u16));
- memcpy(&msg_data[4], &qos, sizeof(u16));
- memcpy(&msg_data[6], &vlan_tag, sizeof(u16));
-
- return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
+ return hclge_send_mbx_msg(vport, (u8 *)&base_vlan, sizeof(base_vlan),
HCLGE_MBX_PUSH_VLAN_INFO, vfid);
}
static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ struct hclge_respond_to_vf_msg *resp_msg)
{
+#define HCLGE_MBX_VLAN_STATE_OFFSET 0
+#define HCLGE_MBX_VLAN_INFO_OFFSET 2
+
+ struct hnae3_handle *handle = &vport->nic;
+ struct hclge_dev *hdev = vport->back;
struct hclge_vf_vlan_cfg *msg_cmd;
- int status = 0;
-
- msg_cmd = (struct hclge_vf_vlan_cfg *)mbx_req->msg;
- if (msg_cmd->subcode == HCLGE_MBX_VLAN_FILTER) {
- struct hnae3_handle *handle = &vport->nic;
- u16 vlan, proto;
- bool is_kill;
-
- is_kill = !!msg_cmd->is_kill;
- vlan = msg_cmd->vlan;
- proto = msg_cmd->proto;
- status = hclge_set_vlan_filter(handle, cpu_to_be16(proto),
- vlan, is_kill);
- if (mbx_req->mbx_need_resp)
- return hclge_gen_resp_to_vf(vport, mbx_req, status,
- NULL, 0);
- } else if (msg_cmd->subcode == HCLGE_MBX_VLAN_RX_OFF_CFG) {
- struct hnae3_handle *handle = &vport->nic;
- bool en = msg_cmd->is_kill ? true : false;
-
- status = hclge_en_hw_strip_rxvtag(handle, en);
- } else if (mbx_req->msg[1] == HCLGE_MBX_PORT_BASE_VLAN_CFG) {
- struct hclge_vlan_info *vlan_info;
- u16 *state;
-
- state = (u16 *)&mbx_req->msg[2];
- vlan_info = (struct hclge_vlan_info *)&mbx_req->msg[4];
- status = hclge_update_port_base_vlan_cfg(vport, *state,
- vlan_info);
- } else if (mbx_req->msg[1] == HCLGE_MBX_GET_PORT_BASE_VLAN_STATE) {
- u8 state;
-
- state = vport->port_base_vlan_cfg.state;
- status = hclge_gen_resp_to_vf(vport, mbx_req, 0, &state,
- sizeof(u8));
+ __be16 proto;
+ u16 vlan_id;
+
+ msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg;
+ switch (msg_cmd->subcode) {
+ case HCLGE_MBX_VLAN_FILTER:
+ proto = cpu_to_be16(le16_to_cpu(msg_cmd->proto));
+ vlan_id = le16_to_cpu(msg_cmd->vlan);
+ return hclge_set_vlan_filter(handle, proto, vlan_id,
+ msg_cmd->is_kill);
+ case HCLGE_MBX_VLAN_RX_OFF_CFG:
+ return hclge_en_hw_strip_rxvtag(handle, msg_cmd->enable);
+ case HCLGE_MBX_GET_PORT_BASE_VLAN_STATE:
+ /* vf does not need to know about the port based VLAN state
+ * on device HNAE3_DEVICE_VERSION_V3. So always return disable
+ * on device HNAE3_DEVICE_VERSION_V3 if vf queries the port
+ * based VLAN state.
+ */
+ resp_msg->data[0] =
+ hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3 ?
+ HNAE3_PORT_BASE_VLAN_DISABLE :
+ vport->port_base_vlan_cfg.state;
+ resp_msg->len = sizeof(u8);
+ return 0;
+ case HCLGE_MBX_ENABLE_VLAN_FILTER:
+ return hclge_enable_vport_vlan_filter(vport, msg_cmd->enable);
+ default:
+ return 0;
}
-
- return status;
}
static int hclge_set_vf_alive(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req,
- bool gen_resp)
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
- bool alive = !!mbx_req->msg[2];
+ bool alive = !!mbx_req->msg.data[0];
int ret = 0;
if (alive)
@@ -409,86 +482,90 @@ static int hclge_set_vf_alive(struct hclge_vport *vport,
return ret;
}
-static int hclge_get_vf_tcinfo(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req,
- bool gen_resp)
+static void hclge_get_basic_info(struct hclge_vport *vport,
+ struct hclge_respond_to_vf_msg *resp_msg)
{
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
- u8 vf_tc_map = 0;
+ struct hnae3_ae_dev *ae_dev = vport->back->ae_dev;
+ struct hclge_basic_info *basic_info;
unsigned int i;
- int ret;
+ u32 pf_caps;
- for (i = 0; i < kinfo->num_tc; i++)
- vf_tc_map |= BIT(i);
+ basic_info = (struct hclge_basic_info *)resp_msg->data;
+ for (i = 0; i < kinfo->tc_info.num_tc; i++)
+ basic_info->hw_tc_map |= BIT(i);
- ret = hclge_gen_resp_to_vf(vport, mbx_req, 0, &vf_tc_map,
- sizeof(vf_tc_map));
+ pf_caps = le32_to_cpu(basic_info->pf_caps);
+ if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
+ hnae3_set_bit(pf_caps, HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, 1);
- return ret;
+ basic_info->pf_caps = cpu_to_le32(pf_caps);
+ resp_msg->len = HCLGE_MBX_MAX_RESP_DATA_SIZE;
}
-static int hclge_get_vf_queue_info(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req,
- bool gen_resp)
+static void hclge_get_vf_queue_info(struct hclge_vport *vport,
+ struct hclge_respond_to_vf_msg *resp_msg)
{
#define HCLGE_TQPS_RSS_INFO_LEN 6
- u8 resp_data[HCLGE_TQPS_RSS_INFO_LEN];
+
+ struct hclge_mbx_vf_queue_info *queue_info;
struct hclge_dev *hdev = vport->back;
/* get the queue related info */
- memcpy(&resp_data[0], &vport->alloc_tqps, sizeof(u16));
- memcpy(&resp_data[2], &vport->nic.kinfo.rss_size, sizeof(u16));
- memcpy(&resp_data[4], &hdev->rx_buf_len, sizeof(u16));
-
- return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
- HCLGE_TQPS_RSS_INFO_LEN);
+ queue_info = (struct hclge_mbx_vf_queue_info *)resp_msg->data;
+ queue_info->num_tqps = cpu_to_le16(vport->alloc_tqps);
+ queue_info->rss_size = cpu_to_le16(vport->nic.kinfo.rss_size);
+ queue_info->rx_buf_len = cpu_to_le16(hdev->rx_buf_len);
+ resp_msg->len = HCLGE_TQPS_RSS_INFO_LEN;
}
-static int hclge_get_vf_mac_addr(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+static void hclge_get_vf_mac_addr(struct hclge_vport *vport,
+ struct hclge_respond_to_vf_msg *resp_msg)
{
- return hclge_gen_resp_to_vf(vport, mbx_req, 0, vport->vf_info.mac,
- ETH_ALEN);
+ ether_addr_copy(resp_msg->data, vport->vf_info.mac);
+ resp_msg->len = ETH_ALEN;
}
-static int hclge_get_vf_queue_depth(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req,
- bool gen_resp)
+static void hclge_get_vf_queue_depth(struct hclge_vport *vport,
+ struct hclge_respond_to_vf_msg *resp_msg)
{
#define HCLGE_TQPS_DEPTH_INFO_LEN 4
- u8 resp_data[HCLGE_TQPS_DEPTH_INFO_LEN];
+
+ struct hclge_mbx_vf_queue_depth *queue_depth;
struct hclge_dev *hdev = vport->back;
/* get the queue depth info */
- memcpy(&resp_data[0], &hdev->num_tx_desc, sizeof(u16));
- memcpy(&resp_data[2], &hdev->num_rx_desc, sizeof(u16));
- return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
- HCLGE_TQPS_DEPTH_INFO_LEN);
+ queue_depth = (struct hclge_mbx_vf_queue_depth *)resp_msg->data;
+ queue_depth->num_tx_desc = cpu_to_le16(hdev->num_tx_desc);
+ queue_depth->num_rx_desc = cpu_to_le16(hdev->num_rx_desc);
+
+ resp_msg->len = HCLGE_TQPS_DEPTH_INFO_LEN;
}
-static int hclge_get_vf_media_type(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+static void hclge_get_vf_media_type(struct hclge_vport *vport,
+ struct hclge_respond_to_vf_msg *resp_msg)
{
+#define HCLGE_VF_MEDIA_TYPE_OFFSET 0
+#define HCLGE_VF_MODULE_TYPE_OFFSET 1
+#define HCLGE_VF_MEDIA_TYPE_LENGTH 2
+
struct hclge_dev *hdev = vport->back;
- u8 resp_data[2];
- resp_data[0] = hdev->hw.mac.media_type;
- resp_data[1] = hdev->hw.mac.module_type;
- return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
- sizeof(resp_data));
+ resp_msg->data[HCLGE_VF_MEDIA_TYPE_OFFSET] =
+ hdev->hw.mac.media_type;
+ resp_msg->data[HCLGE_VF_MODULE_TYPE_OFFSET] =
+ hdev->hw.mac.module_type;
+ resp_msg->len = HCLGE_VF_MEDIA_TYPE_LENGTH;
}
-static int hclge_get_link_info(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+int hclge_push_vf_link_status(struct hclge_vport *vport)
{
#define HCLGE_VF_LINK_STATE_UP 1U
#define HCLGE_VF_LINK_STATE_DOWN 0U
+ struct hclge_mbx_link_status link_info;
struct hclge_dev *hdev = vport->back;
u16 link_status;
- u8 msg_data[8];
- u8 dest_vfid;
- u16 duplex;
/* mac.link can only be 0 or 1 */
switch (vport->vf_info.link_state) {
@@ -504,68 +581,78 @@ static int hclge_get_link_info(struct hclge_vport *vport,
break;
}
- duplex = hdev->hw.mac.duplex;
- memcpy(&msg_data[0], &link_status, sizeof(u16));
- memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32));
- memcpy(&msg_data[6], &duplex, sizeof(u16));
- dest_vfid = mbx_req->mbx_src_vfid;
+ link_info.link_status = cpu_to_le16(link_status);
+ link_info.speed = cpu_to_le32(hdev->hw.mac.speed);
+ link_info.duplex = cpu_to_le16(hdev->hw.mac.duplex);
+ link_info.flag = HCLGE_MBX_PUSH_LINK_STATUS_EN;
/* send this requested info to VF */
- return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
- HCLGE_MBX_LINK_STAT_CHANGE, dest_vfid);
+ return hclge_send_mbx_msg(vport, (u8 *)&link_info, sizeof(link_info),
+ HCLGE_MBX_LINK_STAT_CHANGE, vport->vport_id);
}
static void hclge_get_link_mode(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
#define HCLGE_SUPPORTED 1
+ struct hclge_mbx_link_mode link_mode;
struct hclge_dev *hdev = vport->back;
unsigned long advertising;
unsigned long supported;
unsigned long send_data;
- u8 msg_data[10];
u8 dest_vfid;
advertising = hdev->hw.mac.advertising[0];
supported = hdev->hw.mac.supported[0];
dest_vfid = mbx_req->mbx_src_vfid;
- msg_data[0] = mbx_req->msg[2];
-
- send_data = msg_data[0] == HCLGE_SUPPORTED ? supported : advertising;
+ send_data = mbx_req->msg.data[0] == HCLGE_SUPPORTED ? supported :
+ advertising;
+ link_mode.idx = cpu_to_le16((u16)mbx_req->msg.data[0]);
+ link_mode.link_mode = cpu_to_le64(send_data);
- memcpy(&msg_data[2], &send_data, sizeof(unsigned long));
- hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
+ hclge_send_mbx_msg(vport, (u8 *)&link_mode, sizeof(link_mode),
HCLGE_MBX_LINK_STAT_MODE, dest_vfid);
}
-static void hclge_mbx_reset_vf_queue(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+static int hclge_mbx_reset_vf_queue(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ struct hclge_respond_to_vf_msg *resp_msg)
{
+#define HCLGE_RESET_ALL_QUEUE_DONE 1U
+ struct hnae3_handle *handle = &vport->nic;
+ struct hclge_dev *hdev = vport->back;
u16 queue_id;
+ int ret;
+
+ queue_id = le16_to_cpu(*(__le16 *)mbx_req->msg.data);
+ resp_msg->data[0] = HCLGE_RESET_ALL_QUEUE_DONE;
+ resp_msg->len = sizeof(u8);
- memcpy(&queue_id, &mbx_req->msg[2], sizeof(queue_id));
+ /* pf will reset vf's all queues at a time. So it is unnecessary
+ * to reset queues if queue_id > 0, just return success.
+ */
+ if (queue_id > 0)
+ return 0;
- hclge_reset_vf_queue(vport, queue_id);
+ ret = hclge_reset_tqp(handle);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "failed to reset vf %u queue, ret = %d\n",
+ vport->vport_id - HCLGE_VF_VPORT_START_NUM, ret);
- /* send response msg to VF after queue reset complete */
- hclge_gen_resp_to_vf(vport, mbx_req, 0, NULL, 0);
+ return ret;
}
-static void hclge_reset_vf(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+static int hclge_reset_vf(struct hclge_vport *vport)
{
struct hclge_dev *hdev = vport->back;
- int ret;
dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %u!",
- vport->vport_id);
+ vport->vport_id - HCLGE_VF_VPORT_START_NUM);
- ret = hclge_func_reset_cmd(hdev, vport->vport_id);
- hclge_gen_resp_to_vf(vport, mbx_req, ret, NULL, 0);
+ return hclge_func_reset_cmd(hdev, vport->vport_id);
}
-static void hclge_vf_keep_alive(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+static void hclge_vf_keep_alive(struct hclge_vport *vport)
{
vport->last_active_jiffies = jiffies;
}
@@ -573,45 +660,64 @@ static void hclge_vf_keep_alive(struct hclge_vport *vport,
static int hclge_set_vf_mtu(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
- int ret;
+ struct hclge_mbx_mtu_info *mtu_info;
u32 mtu;
- memcpy(&mtu, &mbx_req->msg[2], sizeof(mtu));
- ret = hclge_set_vport_mtu(vport, mtu);
+ mtu_info = (struct hclge_mbx_mtu_info *)mbx_req->msg.data;
+ mtu = le32_to_cpu(mtu_info->mtu);
- return hclge_gen_resp_to_vf(vport, mbx_req, ret, NULL, 0);
+ return hclge_set_vport_mtu(vport, mtu);
}
static int hclge_get_queue_id_in_pf(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ struct hclge_respond_to_vf_msg *resp_msg)
{
+ struct hnae3_handle *handle = &vport->nic;
+ struct hclge_dev *hdev = vport->back;
u16 queue_id, qid_in_pf;
- u8 resp_data[2];
- memcpy(&queue_id, &mbx_req->msg[2], sizeof(queue_id));
- qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id);
- memcpy(resp_data, &qid_in_pf, sizeof(qid_in_pf));
+ queue_id = le16_to_cpu(*(__le16 *)mbx_req->msg.data);
+ if (queue_id >= handle->kinfo.num_tqps) {
+ dev_err(&hdev->pdev->dev, "Invalid queue id(%u) from VF %u\n",
+ queue_id, mbx_req->mbx_src_vfid);
+ return -EINVAL;
+ }
- return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
- sizeof(resp_data));
+ qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id);
+ *(__le16 *)resp_msg->data = cpu_to_le16(qid_in_pf);
+ resp_msg->len = sizeof(qid_in_pf);
+ return 0;
}
static int hclge_get_rss_key(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ struct hclge_respond_to_vf_msg *resp_msg)
{
#define HCLGE_RSS_MBX_RESP_LEN 8
- u8 resp_data[HCLGE_RSS_MBX_RESP_LEN];
struct hclge_dev *hdev = vport->back;
+ struct hclge_comm_rss_cfg *rss_cfg;
u8 index;
- index = mbx_req->msg[2];
+ index = mbx_req->msg.data[0];
+ rss_cfg = &hdev->rss_cfg;
+
+ /* Check the query index of rss_hash_key from VF, make sure no
+ * more than the size of rss_hash_key.
+ */
+ if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) >
+ sizeof(rss_cfg->rss_hash_key)) {
+ dev_warn(&hdev->pdev->dev,
+ "failed to get the rss hash key, the index(%u) invalid !\n",
+ index);
+ return -EINVAL;
+ }
- memcpy(&resp_data[0],
- &hdev->vport[0].rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN],
+ memcpy(resp_msg->data,
+ &rss_cfg->rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN],
HCLGE_RSS_MBX_RESP_LEN);
-
- return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
- HCLGE_RSS_MBX_RESP_LEN);
+ resp_msg->len = HCLGE_RSS_MBX_RESP_LEN;
+ return 0;
}
static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code)
@@ -634,20 +740,17 @@ static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code)
static void hclge_handle_link_change_event(struct hclge_dev *hdev,
struct hclge_mbx_vf_to_pf_cmd *req)
{
-#define LINK_STATUS_OFFSET 1
-#define LINK_FAIL_CODE_OFFSET 2
-
hclge_task_schedule(hdev, 0);
- if (!req->msg[LINK_STATUS_OFFSET])
- hclge_link_fail_parse(hdev, req->msg[LINK_FAIL_CODE_OFFSET]);
+ if (!req->msg.subcode)
+ hclge_link_fail_parse(hdev, req->msg.data[0]);
}
static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
{
- u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG);
+ u32 tail = hclge_read_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG);
- return tail == hw->cmq.crq.next_to_use;
+ return tail == hw->hw.cmq.crq.next_to_use;
}
static void hclge_handle_ncsi_error(struct hclge_dev *hdev)
@@ -659,18 +762,305 @@ static void hclge_handle_ncsi_error(struct hclge_dev *hdev)
ae_dev->ops->reset_event(hdev->pdev, NULL);
}
+static void hclge_handle_vf_tbl(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_vf_vlan_cfg *msg_cmd;
+
+ msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg;
+ if (msg_cmd->subcode == HCLGE_MBX_VPORT_LIST_CLEAR) {
+ hclge_rm_vport_all_mac_table(vport, true, HCLGE_MAC_ADDR_UC);
+ hclge_rm_vport_all_mac_table(vport, true, HCLGE_MAC_ADDR_MC);
+ hclge_rm_vport_all_vlan_table(vport, true);
+ } else {
+ dev_warn(&hdev->pdev->dev, "Invalid cmd(%u)\n",
+ msg_cmd->subcode);
+ }
+}
+
+static int
+hclge_mbx_map_ring_to_vector_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_map_unmap_ring_to_vf_vector(param->vport, true,
+ param->req);
+}
+
+static int
+hclge_mbx_unmap_ring_to_vector_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_map_unmap_ring_to_vf_vector(param->vport, false,
+ param->req);
+}
+
+static int
+hclge_mbx_get_ring_vector_map_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_get_vf_ring_vector_map(param->vport, param->req,
+ param->resp_msg);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "PF fail(%d) to get VF ring vector map\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_set_promisc_mode_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_set_vf_promisc_mode(param->vport, param->req);
+ return 0;
+}
+
+static int hclge_mbx_set_unicast_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_uc_mac_addr(param->vport, param->req);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "PF fail(%d) to set VF UC MAC Addr\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_set_multicast_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_mc_mac_addr(param->vport, param->req);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "PF fail(%d) to set VF MC MAC Addr\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_set_vlan_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_vlan_cfg(param->vport, param->req, param->resp_msg);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "PF failed(%d) to config VF's VLAN\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_set_alive_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_alive(param->vport, param->req);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "PF failed(%d) to set VF's ALIVE\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_get_qinfo_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_vf_queue_info(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_get_qdepth_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_vf_queue_depth(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_get_basic_info_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_basic_info(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_get_link_status_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_push_vf_link_status(param->vport);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "failed to inform link stat to VF, ret = %d\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_queue_reset_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_mbx_reset_vf_queue(param->vport, param->req,
+ param->resp_msg);
+}
+
+static int hclge_mbx_reset_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_reset_vf(param->vport);
+}
+
+static int hclge_mbx_keep_alive_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_vf_keep_alive(param->vport);
+ return 0;
+}
+
+static int hclge_mbx_set_mtu_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_mtu(param->vport, param->req);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "VF fail(%d) to set mtu\n", ret);
+ return ret;
+}
+
+static int hclge_mbx_get_qid_in_pf_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_get_queue_id_in_pf(param->vport, param->req,
+ param->resp_msg);
+}
+
+static int hclge_mbx_get_rss_key_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_get_rss_key(param->vport, param->req, param->resp_msg);
+}
+
+static int hclge_mbx_get_link_mode_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_link_mode(param->vport, param->req);
+ return 0;
+}
+
+static int
+hclge_mbx_get_vf_flr_status_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_rm_vport_all_mac_table(param->vport, false,
+ HCLGE_MAC_ADDR_UC);
+ hclge_rm_vport_all_mac_table(param->vport, false,
+ HCLGE_MAC_ADDR_MC);
+ hclge_rm_vport_all_vlan_table(param->vport, false);
+ return 0;
+}
+
+static int hclge_mbx_vf_uninit_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_rm_vport_all_mac_table(param->vport, true,
+ HCLGE_MAC_ADDR_UC);
+ hclge_rm_vport_all_mac_table(param->vport, true,
+ HCLGE_MAC_ADDR_MC);
+ hclge_rm_vport_all_vlan_table(param->vport, true);
+ return 0;
+}
+
+static int hclge_mbx_get_media_type_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_vf_media_type(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_push_link_status_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_handle_link_change_event(param->vport->back, param->req);
+ return 0;
+}
+
+static int hclge_mbx_get_mac_addr_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_vf_mac_addr(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_ncsi_error_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_handle_ncsi_error(param->vport->back);
+ return 0;
+}
+
+static int hclge_mbx_handle_vf_tbl_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_handle_vf_tbl(param->vport, param->req);
+ return 0;
+}
+
+static const hclge_mbx_ops_fn hclge_mbx_ops_list[HCLGE_MBX_OPCODE_MAX] = {
+ [HCLGE_MBX_RESET] = hclge_mbx_reset_handler,
+ [HCLGE_MBX_SET_UNICAST] = hclge_mbx_set_unicast_handler,
+ [HCLGE_MBX_SET_MULTICAST] = hclge_mbx_set_multicast_handler,
+ [HCLGE_MBX_SET_VLAN] = hclge_mbx_set_vlan_handler,
+ [HCLGE_MBX_MAP_RING_TO_VECTOR] = hclge_mbx_map_ring_to_vector_handler,
+ [HCLGE_MBX_UNMAP_RING_TO_VECTOR] = hclge_mbx_unmap_ring_to_vector_handler,
+ [HCLGE_MBX_SET_PROMISC_MODE] = hclge_mbx_set_promisc_mode_handler,
+ [HCLGE_MBX_GET_QINFO] = hclge_mbx_get_qinfo_handler,
+ [HCLGE_MBX_GET_QDEPTH] = hclge_mbx_get_qdepth_handler,
+ [HCLGE_MBX_GET_BASIC_INFO] = hclge_mbx_get_basic_info_handler,
+ [HCLGE_MBX_GET_RSS_KEY] = hclge_mbx_get_rss_key_handler,
+ [HCLGE_MBX_GET_MAC_ADDR] = hclge_mbx_get_mac_addr_handler,
+ [HCLGE_MBX_GET_LINK_STATUS] = hclge_mbx_get_link_status_handler,
+ [HCLGE_MBX_QUEUE_RESET] = hclge_mbx_queue_reset_handler,
+ [HCLGE_MBX_KEEP_ALIVE] = hclge_mbx_keep_alive_handler,
+ [HCLGE_MBX_SET_ALIVE] = hclge_mbx_set_alive_handler,
+ [HCLGE_MBX_SET_MTU] = hclge_mbx_set_mtu_handler,
+ [HCLGE_MBX_GET_QID_IN_PF] = hclge_mbx_get_qid_in_pf_handler,
+ [HCLGE_MBX_GET_LINK_MODE] = hclge_mbx_get_link_mode_handler,
+ [HCLGE_MBX_GET_MEDIA_TYPE] = hclge_mbx_get_media_type_handler,
+ [HCLGE_MBX_VF_UNINIT] = hclge_mbx_vf_uninit_handler,
+ [HCLGE_MBX_HANDLE_VF_TBL] = hclge_mbx_handle_vf_tbl_handler,
+ [HCLGE_MBX_GET_RING_VECTOR_MAP] = hclge_mbx_get_ring_vector_map_handler,
+ [HCLGE_MBX_GET_VF_FLR_STATUS] = hclge_mbx_get_vf_flr_status_handler,
+ [HCLGE_MBX_PUSH_LINK_STATUS] = hclge_mbx_push_link_status_handler,
+ [HCLGE_MBX_NCSI_ERROR] = hclge_mbx_ncsi_error_handler,
+};
+
+static void hclge_mbx_request_handling(struct hclge_mbx_ops_param *param)
+{
+ hclge_mbx_ops_fn cmd_func = NULL;
+ struct hclge_dev *hdev;
+ int ret = 0;
+
+ hdev = param->vport->back;
+ cmd_func = hclge_mbx_ops_list[param->req->msg.code];
+ if (cmd_func)
+ ret = cmd_func(param);
+ else
+ dev_err(&hdev->pdev->dev,
+ "un-supported mailbox message, code = %u\n",
+ param->req->msg.code);
+
+ /* PF driver should not reply IMP */
+ if (hnae3_get_bit(param->req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) &&
+ param->req->msg.code < HCLGE_MBX_GET_VF_FLR_STATUS) {
+ param->resp_msg->status = ret;
+ if (time_is_before_jiffies(hdev->last_mbx_scheduled +
+ HCLGE_MBX_SCHED_TIMEOUT))
+ dev_warn(&hdev->pdev->dev,
+ "resp vport%u mbx(%u,%u) late\n",
+ param->req->mbx_src_vfid,
+ param->req->msg.code,
+ param->req->msg.subcode);
+
+ hclge_gen_resp_to_vf(param->vport, param->req, param->resp_msg);
+ }
+}
+
void hclge_mbx_handler(struct hclge_dev *hdev)
{
- struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq;
+ struct hclge_comm_cmq_ring *crq = &hdev->hw.hw.cmq.crq;
+ struct hclge_respond_to_vf_msg resp_msg;
struct hclge_mbx_vf_to_pf_cmd *req;
- struct hclge_vport *vport;
+ struct hclge_mbx_ops_param param;
struct hclge_desc *desc;
unsigned int flag;
- int ret;
+ param.resp_msg = &resp_msg;
/* handle all the mailbox requests in the queue */
while (!hclge_cmd_crq_empty(&hdev->hw)) {
- if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
+ if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE,
+ &hdev->hw.hw.comm_state)) {
dev_warn(&hdev->pdev->dev,
"command queue needs re-initializing\n");
return;
@@ -683,7 +1073,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) {
dev_warn(&hdev->pdev->dev,
"dropped invalid mailbox message, code = %u\n",
- req->msg[0]);
+ req->msg.code);
/* dropping/not processing this invalid message */
crq->desc[crq->next_to_use].flag = 0;
@@ -691,151 +1081,19 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
continue;
}
- vport = &hdev->vport[req->mbx_src_vfid];
-
- switch (req->msg[0]) {
- case HCLGE_MBX_MAP_RING_TO_VECTOR:
- ret = hclge_map_unmap_ring_to_vf_vector(vport, true,
- req);
- break;
- case HCLGE_MBX_UNMAP_RING_TO_VECTOR:
- ret = hclge_map_unmap_ring_to_vf_vector(vport, false,
- req);
- break;
- case HCLGE_MBX_SET_PROMISC_MODE:
- ret = hclge_set_vf_promisc_mode(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF fail(%d) to set VF promisc mode\n",
- ret);
- break;
- case HCLGE_MBX_SET_UNICAST:
- ret = hclge_set_vf_uc_mac_addr(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF fail(%d) to set VF UC MAC Addr\n",
- ret);
- break;
- case HCLGE_MBX_SET_MULTICAST:
- ret = hclge_set_vf_mc_mac_addr(vport, req, false);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF fail(%d) to set VF MC MAC Addr\n",
- ret);
- break;
- case HCLGE_MBX_SET_VLAN:
- ret = hclge_set_vf_vlan_cfg(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF failed(%d) to config VF's VLAN\n",
- ret);
- break;
- case HCLGE_MBX_SET_ALIVE:
- ret = hclge_set_vf_alive(vport, req, false);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF failed(%d) to set VF's ALIVE\n",
- ret);
- break;
- case HCLGE_MBX_GET_QINFO:
- ret = hclge_get_vf_queue_info(vport, req, true);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF failed(%d) to get Q info for VF\n",
- ret);
- break;
- case HCLGE_MBX_GET_QDEPTH:
- ret = hclge_get_vf_queue_depth(vport, req, true);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF failed(%d) to get Q depth for VF\n",
- ret);
- break;
-
- case HCLGE_MBX_GET_TCINFO:
- ret = hclge_get_vf_tcinfo(vport, req, true);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF failed(%d) to get TC info for VF\n",
- ret);
- break;
- case HCLGE_MBX_GET_LINK_STATUS:
- ret = hclge_get_link_info(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF fail(%d) to get link stat for VF\n",
- ret);
- break;
- case HCLGE_MBX_QUEUE_RESET:
- hclge_mbx_reset_vf_queue(vport, req);
- break;
- case HCLGE_MBX_RESET:
- hclge_reset_vf(vport, req);
- break;
- case HCLGE_MBX_KEEP_ALIVE:
- hclge_vf_keep_alive(vport, req);
- break;
- case HCLGE_MBX_SET_MTU:
- ret = hclge_set_vf_mtu(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "VF fail(%d) to set mtu\n", ret);
- break;
- case HCLGE_MBX_GET_QID_IN_PF:
- ret = hclge_get_queue_id_in_pf(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF failed(%d) to get qid for VF\n",
- ret);
- break;
- case HCLGE_MBX_GET_RSS_KEY:
- ret = hclge_get_rss_key(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF fail(%d) to get rss key for VF\n",
- ret);
- break;
- case HCLGE_MBX_GET_LINK_MODE:
- hclge_get_link_mode(vport, req);
- break;
- case HCLGE_MBX_GET_VF_FLR_STATUS:
- case HCLGE_MBX_VF_UNINIT:
- hclge_rm_vport_all_mac_table(vport, true,
- HCLGE_MAC_ADDR_UC);
- hclge_rm_vport_all_mac_table(vport, true,
- HCLGE_MAC_ADDR_MC);
- hclge_rm_vport_all_vlan_table(vport, true);
- break;
- case HCLGE_MBX_GET_MEDIA_TYPE:
- ret = hclge_get_vf_media_type(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF fail(%d) to media type for VF\n",
- ret);
- break;
- case HCLGE_MBX_PUSH_LINK_STATUS:
- hclge_handle_link_change_event(hdev, req);
- break;
- case HCLGE_MBX_GET_MAC_ADDR:
- ret = hclge_get_vf_mac_addr(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF failed(%d) to get MAC for VF\n",
- ret);
- break;
- case HCLGE_MBX_NCSI_ERROR:
- hclge_handle_ncsi_error(hdev);
- break;
- default:
- dev_err(&hdev->pdev->dev,
- "un-supported mailbox message, code = %u\n",
- req->msg[0]);
- break;
- }
+ trace_hclge_pf_mbx_get(hdev, req);
+
+ /* clear the resp_msg before processing every mailbox message */
+ memset(&resp_msg, 0, sizeof(resp_msg));
+ param.vport = &hdev->vport[req->mbx_src_vfid];
+ param.req = req;
+ hclge_mbx_request_handling(&param);
+
crq->desc[crq->next_to_use].flag = 0;
hclge_mbx_ring_ptr_move_crq(crq);
}
/* Write back CMDQ_RQ header pointer, M7 need this pointer */
- hclge_write_dev(&hdev->hw, HCLGE_NIC_CRQ_HEAD_REG, crq->next_to_use);
+ hclge_write_dev(&hdev->hw, HCLGE_COMM_NIC_CRQ_HEAD_REG,
+ crq->next_to_use);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 696c5ae922e3..85fb11de43a1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -47,8 +47,8 @@ static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum,
struct hclge_desc desc;
int ret;
- if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state))
- return 0;
+ if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state))
+ return -EBUSY;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, false);
@@ -85,8 +85,8 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
struct hclge_desc desc;
int ret;
- if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state))
- return 0;
+ if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state))
+ return -EBUSY;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, true);
@@ -155,7 +155,7 @@ int hclge_mac_mdio_config(struct hclge_dev *hdev)
ret = mdiobus_register(mdio_bus);
if (ret) {
dev_err(mdio_bus->parent,
- "Failed to register MDIO bus ret = %#x\n", ret);
+ "failed to register MDIO bus, ret = %d\n", ret);
return ret;
}
@@ -187,7 +187,7 @@ static void hclge_mac_adjust_link(struct net_device *netdev)
speed = netdev->phydev->speed;
duplex = netdev->phydev->duplex;
- ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
+ ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex, 0);
if (ret)
netdev_err(netdev, "failed to adjust link.\n");
@@ -255,6 +255,8 @@ void hclge_mac_start_phy(struct hclge_dev *hdev)
if (!phydev)
return;
+ phy_loopback(phydev, false);
+
phy_start(phydev);
}
@@ -268,3 +270,42 @@ void hclge_mac_stop_phy(struct hclge_dev *hdev)
phy_stop(phydev);
}
+
+u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr)
+{
+ struct hclge_phy_reg_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PHY_REG, true);
+
+ req = (struct hclge_phy_reg_cmd *)desc.data;
+ req->reg_addr = cpu_to_le16(reg_addr);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to read phy reg, ret = %d.\n", ret);
+
+ return le16_to_cpu(req->reg_val);
+}
+
+int hclge_write_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 val)
+{
+ struct hclge_phy_reg_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PHY_REG, false);
+
+ req = (struct hclge_phy_reg_cmd *)desc.data;
+ req->reg_addr = cpu_to_le16(reg_addr);
+ req->reg_val = cpu_to_le16(val);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to write phy reg, ret = %d.\n", ret);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
index dd9a1218a7b0..4200d0b6d931 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
@@ -4,10 +4,16 @@
#ifndef __HCLGE_MDIO_H
#define __HCLGE_MDIO_H
+#include "hnae3.h"
+
+struct hclge_dev;
+
int hclge_mac_mdio_config(struct hclge_dev *hdev);
int hclge_mac_connect_phy(struct hnae3_handle *handle);
void hclge_mac_disconnect_phy(struct hnae3_handle *handle);
void hclge_mac_start_phy(struct hclge_dev *hdev);
void hclge_mac_stop_phy(struct hclge_dev *hdev);
+u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr);
+int hclge_write_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 val);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
new file mode 100644
index 000000000000..a40b1583f114
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
@@ -0,0 +1,564 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2021 Hisilicon Limited.
+
+#include <linux/skbuff.h>
+#include "hclge_main.h"
+#include "hnae3.h"
+
+static int hclge_ptp_get_cycle(struct hclge_dev *hdev)
+{
+ struct hclge_ptp *ptp = hdev->ptp;
+
+ ptp->cycle.quo = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_QUO_REG) &
+ HCLGE_PTP_CYCLE_QUO_MASK;
+ ptp->cycle.numer = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_NUM_REG);
+ ptp->cycle.den = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_DEN_REG);
+
+ if (ptp->cycle.den == 0) {
+ dev_err(&hdev->pdev->dev, "invalid ptp cycle denominator!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hclge_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp);
+ struct hclge_ptp_cycle *cycle = &hdev->ptp->cycle;
+ u64 adj_val, adj_base, diff;
+ unsigned long flags;
+ bool is_neg = false;
+ u32 quo, numerator;
+
+ if (ppb < 0) {
+ ppb = -ppb;
+ is_neg = true;
+ }
+
+ adj_base = (u64)cycle->quo * (u64)cycle->den + (u64)cycle->numer;
+ adj_val = adj_base * ppb;
+ diff = div_u64(adj_val, 1000000000ULL);
+
+ if (is_neg)
+ adj_val = adj_base - diff;
+ else
+ adj_val = adj_base + diff;
+
+ /* This clock cycle is defined by three part: quotient, numerator
+ * and denominator. For example, 2.5ns, the quotient is 2,
+ * denominator is fixed to ptp->cycle.den, and numerator
+ * is 0.5 * ptp->cycle.den.
+ */
+ quo = div_u64_rem(adj_val, cycle->den, &numerator);
+
+ spin_lock_irqsave(&hdev->ptp->lock, flags);
+ writel(quo & HCLGE_PTP_CYCLE_QUO_MASK,
+ hdev->ptp->io_base + HCLGE_PTP_CYCLE_QUO_REG);
+ writel(numerator, hdev->ptp->io_base + HCLGE_PTP_CYCLE_NUM_REG);
+ writel(cycle->den, hdev->ptp->io_base + HCLGE_PTP_CYCLE_DEN_REG);
+ writel(HCLGE_PTP_CYCLE_ADJ_EN,
+ hdev->ptp->io_base + HCLGE_PTP_CYCLE_CFG_REG);
+ spin_unlock_irqrestore(&hdev->ptp->lock, flags);
+
+ return 0;
+}
+
+bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_ptp *ptp = hdev->ptp;
+
+ if (!test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ||
+ test_and_set_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state)) {
+ ptp->tx_skipped++;
+ return false;
+ }
+
+ ptp->tx_start = jiffies;
+ ptp->tx_skb = skb_get(skb);
+ ptp->tx_cnt++;
+
+ return true;
+}
+
+void hclge_ptp_clean_tx_hwts(struct hclge_dev *hdev)
+{
+ struct sk_buff *skb = hdev->ptp->tx_skb;
+ struct skb_shared_hwtstamps hwts;
+ u32 hi, lo;
+ u64 ns;
+
+ ns = readl(hdev->ptp->io_base + HCLGE_PTP_TX_TS_NSEC_REG) &
+ HCLGE_PTP_TX_TS_NSEC_MASK;
+ lo = readl(hdev->ptp->io_base + HCLGE_PTP_TX_TS_SEC_L_REG);
+ hi = readl(hdev->ptp->io_base + HCLGE_PTP_TX_TS_SEC_H_REG) &
+ HCLGE_PTP_TX_TS_SEC_H_MASK;
+ hdev->ptp->last_tx_seqid = readl(hdev->ptp->io_base +
+ HCLGE_PTP_TX_TS_SEQID_REG);
+
+ if (skb) {
+ hdev->ptp->tx_skb = NULL;
+ hdev->ptp->tx_cleaned++;
+
+ ns += (((u64)hi) << 32 | lo) * NSEC_PER_SEC;
+ hwts.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(skb, &hwts);
+ dev_kfree_skb_any(skb);
+ }
+
+ clear_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state);
+}
+
+void hclge_ptp_get_rx_hwts(struct hnae3_handle *handle, struct sk_buff *skb,
+ u32 nsec, u32 sec)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ unsigned long flags;
+ u64 ns = nsec;
+ u32 sec_h;
+
+ if (!test_bit(HCLGE_PTP_FLAG_RX_EN, &hdev->ptp->flags))
+ return;
+
+ /* Since the BD does not have enough space for the higher 16 bits of
+ * second, and this part will not change frequently, so read it
+ * from register.
+ */
+ spin_lock_irqsave(&hdev->ptp->lock, flags);
+ sec_h = readl(hdev->ptp->io_base + HCLGE_PTP_CUR_TIME_SEC_H_REG);
+ spin_unlock_irqrestore(&hdev->ptp->lock, flags);
+
+ ns += (((u64)sec_h) << HCLGE_PTP_SEC_H_OFFSET | sec) * NSEC_PER_SEC;
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
+ hdev->ptp->last_rx = jiffies;
+ hdev->ptp->rx_cnt++;
+}
+
+static int hclge_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp);
+ unsigned long flags;
+ u32 hi, lo;
+ u64 ns;
+
+ spin_lock_irqsave(&hdev->ptp->lock, flags);
+ ns = readl(hdev->ptp->io_base + HCLGE_PTP_CUR_TIME_NSEC_REG);
+ hi = readl(hdev->ptp->io_base + HCLGE_PTP_CUR_TIME_SEC_H_REG);
+ lo = readl(hdev->ptp->io_base + HCLGE_PTP_CUR_TIME_SEC_L_REG);
+ spin_unlock_irqrestore(&hdev->ptp->lock, flags);
+
+ ns += (((u64)hi) << HCLGE_PTP_SEC_H_OFFSET | lo) * NSEC_PER_SEC;
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+static int hclge_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp);
+ unsigned long flags;
+
+ spin_lock_irqsave(&hdev->ptp->lock, flags);
+ writel(ts->tv_nsec, hdev->ptp->io_base + HCLGE_PTP_TIME_NSEC_REG);
+ writel(ts->tv_sec >> HCLGE_PTP_SEC_H_OFFSET,
+ hdev->ptp->io_base + HCLGE_PTP_TIME_SEC_H_REG);
+ writel(ts->tv_sec & HCLGE_PTP_SEC_L_MASK,
+ hdev->ptp->io_base + HCLGE_PTP_TIME_SEC_L_REG);
+ /* synchronize the time of phc */
+ writel(HCLGE_PTP_TIME_SYNC_EN,
+ hdev->ptp->io_base + HCLGE_PTP_TIME_SYNC_REG);
+ spin_unlock_irqrestore(&hdev->ptp->lock, flags);
+
+ return 0;
+}
+
+static int hclge_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp);
+ unsigned long flags;
+ bool is_neg = false;
+ u32 adj_val = 0;
+
+ if (delta < 0) {
+ adj_val |= HCLGE_PTP_TIME_NSEC_NEG;
+ delta = -delta;
+ is_neg = true;
+ }
+
+ if (delta > HCLGE_PTP_TIME_NSEC_MASK) {
+ struct timespec64 ts;
+ s64 ns;
+
+ hclge_ptp_gettimex(ptp, &ts, NULL);
+ ns = timespec64_to_ns(&ts);
+ ns = is_neg ? ns - delta : ns + delta;
+ ts = ns_to_timespec64(ns);
+ return hclge_ptp_settime(ptp, &ts);
+ }
+
+ adj_val |= delta & HCLGE_PTP_TIME_NSEC_MASK;
+
+ spin_lock_irqsave(&hdev->ptp->lock, flags);
+ writel(adj_val, hdev->ptp->io_base + HCLGE_PTP_TIME_NSEC_REG);
+ writel(HCLGE_PTP_TIME_ADJ_EN,
+ hdev->ptp->io_base + HCLGE_PTP_TIME_ADJ_REG);
+ spin_unlock_irqrestore(&hdev->ptp->lock, flags);
+
+ return 0;
+}
+
+int hclge_ptp_get_cfg(struct hclge_dev *hdev, struct ifreq *ifr)
+{
+ if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state))
+ return -EOPNOTSUPP;
+
+ return copy_to_user(ifr->ifr_data, &hdev->ptp->ts_cfg,
+ sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
+}
+
+static int hclge_ptp_int_en(struct hclge_dev *hdev, bool en)
+{
+ struct hclge_ptp_int_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ req = (struct hclge_ptp_int_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PTP_INT_EN, false);
+ req->int_en = en ? 1 : 0;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to %s ptp interrupt, ret = %d\n",
+ en ? "enable" : "disable", ret);
+
+ return ret;
+}
+
+int hclge_ptp_cfg_qry(struct hclge_dev *hdev, u32 *cfg)
+{
+ struct hclge_ptp_cfg_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ req = (struct hclge_ptp_cfg_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PTP_MODE_CFG, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to query ptp config, ret = %d\n", ret);
+ return ret;
+ }
+
+ *cfg = le32_to_cpu(req->cfg);
+
+ return 0;
+}
+
+static int hclge_ptp_cfg(struct hclge_dev *hdev, u32 cfg)
+{
+ struct hclge_ptp_cfg_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ req = (struct hclge_ptp_cfg_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PTP_MODE_CFG, false);
+ req->cfg = cpu_to_le32(cfg);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to config ptp, ret = %d\n", ret);
+
+ return ret;
+}
+
+static int hclge_ptp_set_tx_mode(struct hwtstamp_config *cfg,
+ unsigned long *flags, u32 *ptp_cfg)
+{
+ switch (cfg->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ clear_bit(HCLGE_PTP_FLAG_TX_EN, flags);
+ break;
+ case HWTSTAMP_TX_ON:
+ set_bit(HCLGE_PTP_FLAG_TX_EN, flags);
+ *ptp_cfg |= HCLGE_PTP_TX_EN_B;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static int hclge_ptp_set_rx_mode(struct hwtstamp_config *cfg,
+ unsigned long *flags, u32 *ptp_cfg)
+{
+ int rx_filter = cfg->rx_filter;
+
+ switch (cfg->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ clear_bit(HCLGE_PTP_FLAG_RX_EN, flags);
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ set_bit(HCLGE_PTP_FLAG_RX_EN, flags);
+ *ptp_cfg |= HCLGE_PTP_RX_EN_B;
+ *ptp_cfg |= HCLGE_PTP_UDP_FULL_TYPE << HCLGE_PTP_UDP_EN_SHIFT;
+ rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ set_bit(HCLGE_PTP_FLAG_RX_EN, flags);
+ *ptp_cfg |= HCLGE_PTP_RX_EN_B;
+ *ptp_cfg |= HCLGE_PTP_UDP_FULL_TYPE << HCLGE_PTP_UDP_EN_SHIFT;
+ *ptp_cfg |= HCLGE_PTP_MSG1_V2_DEFAULT << HCLGE_PTP_MSG1_SHIFT;
+ *ptp_cfg |= HCLGE_PTP_MSG0_V2_EVENT << HCLGE_PTP_MSG0_SHIFT;
+ *ptp_cfg |= HCLGE_PTP_MSG_TYPE_V2 << HCLGE_PTP_MSG_TYPE_SHIFT;
+ rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ default:
+ return -ERANGE;
+ }
+
+ cfg->rx_filter = rx_filter;
+
+ return 0;
+}
+
+static int hclge_ptp_set_ts_mode(struct hclge_dev *hdev,
+ struct hwtstamp_config *cfg)
+{
+ unsigned long flags = hdev->ptp->flags;
+ u32 ptp_cfg = 0;
+ int ret;
+
+ if (test_bit(HCLGE_PTP_FLAG_EN, &hdev->ptp->flags))
+ ptp_cfg |= HCLGE_PTP_EN_B;
+
+ ret = hclge_ptp_set_tx_mode(cfg, &flags, &ptp_cfg);
+ if (ret)
+ return ret;
+
+ ret = hclge_ptp_set_rx_mode(cfg, &flags, &ptp_cfg);
+ if (ret)
+ return ret;
+
+ ret = hclge_ptp_cfg(hdev, ptp_cfg);
+ if (ret)
+ return ret;
+
+ hdev->ptp->flags = flags;
+ hdev->ptp->ptp_cfg = ptp_cfg;
+
+ return 0;
+}
+
+int hclge_ptp_set_cfg(struct hclge_dev *hdev, struct ifreq *ifr)
+{
+ struct hwtstamp_config cfg;
+ int ret;
+
+ if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state)) {
+ dev_err(&hdev->pdev->dev, "phc is unsupported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ ret = hclge_ptp_set_ts_mode(hdev, &cfg);
+ if (ret)
+ return ret;
+
+ hdev->ptp->ts_cfg = cfg;
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+int hclge_ptp_get_ts_info(struct hnae3_handle *handle,
+ struct ethtool_ts_info *info)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state)) {
+ dev_err(&hdev->pdev->dev, "phc is unsupported\n");
+ return -EOPNOTSUPP;
+ }
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (hdev->ptp->clock)
+ info->phc_index = ptp_clock_index(hdev->ptp->clock);
+ else
+ info->phc_index = -1;
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ);
+
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
+
+ return 0;
+}
+
+static int hclge_ptp_create_clock(struct hclge_dev *hdev)
+{
+ struct hclge_ptp *ptp;
+
+ ptp = devm_kzalloc(&hdev->pdev->dev, sizeof(*ptp), GFP_KERNEL);
+ if (!ptp)
+ return -ENOMEM;
+
+ ptp->hdev = hdev;
+ snprintf(ptp->info.name, sizeof(ptp->info.name), "%s",
+ HCLGE_DRIVER_NAME);
+ ptp->info.owner = THIS_MODULE;
+ ptp->info.max_adj = HCLGE_PTP_CYCLE_ADJ_MAX;
+ ptp->info.n_ext_ts = 0;
+ ptp->info.pps = 0;
+ ptp->info.adjfreq = hclge_ptp_adjfreq;
+ ptp->info.adjtime = hclge_ptp_adjtime;
+ ptp->info.gettimex64 = hclge_ptp_gettimex;
+ ptp->info.settime64 = hclge_ptp_settime;
+
+ ptp->info.n_alarm = 0;
+ ptp->clock = ptp_clock_register(&ptp->info, &hdev->pdev->dev);
+ if (IS_ERR(ptp->clock)) {
+ dev_err(&hdev->pdev->dev,
+ "%d failed to register ptp clock, ret = %ld\n",
+ ptp->info.n_alarm, PTR_ERR(ptp->clock));
+ return -ENODEV;
+ } else if (!ptp->clock) {
+ dev_err(&hdev->pdev->dev, "failed to register ptp clock\n");
+ return -ENODEV;
+ }
+
+ spin_lock_init(&ptp->lock);
+ ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET;
+ ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+ ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
+ hdev->ptp = ptp;
+
+ return 0;
+}
+
+static void hclge_ptp_destroy_clock(struct hclge_dev *hdev)
+{
+ ptp_clock_unregister(hdev->ptp->clock);
+ hdev->ptp->clock = NULL;
+ devm_kfree(&hdev->pdev->dev, hdev->ptp);
+ hdev->ptp = NULL;
+}
+
+int hclge_ptp_init(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ struct timespec64 ts;
+ int ret;
+
+ if (!test_bit(HNAE3_DEV_SUPPORT_PTP_B, ae_dev->caps))
+ return 0;
+
+ if (!hdev->ptp) {
+ ret = hclge_ptp_create_clock(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclge_ptp_get_cycle(hdev);
+ if (ret)
+ return ret;
+ }
+
+ ret = hclge_ptp_int_en(hdev, true);
+ if (ret)
+ goto out;
+
+ set_bit(HCLGE_PTP_FLAG_EN, &hdev->ptp->flags);
+ ret = hclge_ptp_adjfreq(&hdev->ptp->info, 0);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to init freq, ret = %d\n", ret);
+ goto out;
+ }
+
+ ret = hclge_ptp_set_ts_mode(hdev, &hdev->ptp->ts_cfg);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to init ts mode, ret = %d\n", ret);
+ goto out;
+ }
+
+ ktime_get_real_ts64(&ts);
+ ret = hclge_ptp_settime(&hdev->ptp->info, &ts);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to init ts time, ret = %d\n", ret);
+ goto out;
+ }
+
+ set_bit(HCLGE_STATE_PTP_EN, &hdev->state);
+ dev_info(&hdev->pdev->dev, "phc initializes ok!\n");
+
+ return 0;
+
+out:
+ hclge_ptp_destroy_clock(hdev);
+
+ return ret;
+}
+
+void hclge_ptp_uninit(struct hclge_dev *hdev)
+{
+ struct hclge_ptp *ptp = hdev->ptp;
+
+ if (!ptp)
+ return;
+
+ hclge_ptp_int_en(hdev, false);
+ clear_bit(HCLGE_STATE_PTP_EN, &hdev->state);
+ clear_bit(HCLGE_PTP_FLAG_EN, &ptp->flags);
+ ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+ ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
+
+ if (hclge_ptp_set_ts_mode(hdev, &ptp->ts_cfg))
+ dev_err(&hdev->pdev->dev, "failed to disable phc\n");
+
+ if (ptp->tx_skb) {
+ struct sk_buff *skb = ptp->tx_skb;
+
+ ptp->tx_skb = NULL;
+ dev_kfree_skb_any(skb);
+ }
+
+ hclge_ptp_destroy_clock(hdev);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
new file mode 100644
index 000000000000..bbee74cd8404
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+// Copyright (c) 2021 Hisilicon Limited.
+
+#ifndef __HCLGE_PTP_H
+#define __HCLGE_PTP_H
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/net_tstamp.h>
+#include <linux/types.h>
+
+struct hclge_dev;
+struct ifreq;
+
+#define HCLGE_PTP_REG_OFFSET 0x29000
+
+#define HCLGE_PTP_TX_TS_SEQID_REG 0x0
+#define HCLGE_PTP_TX_TS_NSEC_REG 0x4
+#define HCLGE_PTP_TX_TS_NSEC_MASK GENMASK(29, 0)
+#define HCLGE_PTP_TX_TS_SEC_L_REG 0x8
+#define HCLGE_PTP_TX_TS_SEC_H_REG 0xC
+#define HCLGE_PTP_TX_TS_SEC_H_MASK GENMASK(15, 0)
+#define HCLGE_PTP_TX_TS_CNT_REG 0x30
+
+#define HCLGE_PTP_TIME_SEC_H_REG 0x50
+#define HCLGE_PTP_TIME_SEC_H_MASK GENMASK(15, 0)
+#define HCLGE_PTP_TIME_SEC_L_REG 0x54
+#define HCLGE_PTP_TIME_NSEC_REG 0x58
+#define HCLGE_PTP_TIME_NSEC_MASK GENMASK(29, 0)
+#define HCLGE_PTP_TIME_NSEC_NEG BIT(31)
+#define HCLGE_PTP_TIME_SYNC_REG 0x5C
+#define HCLGE_PTP_TIME_SYNC_EN BIT(0)
+#define HCLGE_PTP_TIME_ADJ_REG 0x60
+#define HCLGE_PTP_TIME_ADJ_EN BIT(0)
+#define HCLGE_PTP_CYCLE_QUO_REG 0x64
+#define HCLGE_PTP_CYCLE_QUO_MASK GENMASK(7, 0)
+#define HCLGE_PTP_CYCLE_DEN_REG 0x68
+#define HCLGE_PTP_CYCLE_NUM_REG 0x6C
+#define HCLGE_PTP_CYCLE_CFG_REG 0x70
+#define HCLGE_PTP_CYCLE_ADJ_EN BIT(0)
+#define HCLGE_PTP_CUR_TIME_SEC_H_REG 0x74
+#define HCLGE_PTP_CUR_TIME_SEC_L_REG 0x78
+#define HCLGE_PTP_CUR_TIME_NSEC_REG 0x7C
+
+#define HCLGE_PTP_CYCLE_ADJ_MAX 500000000
+#define HCLGE_PTP_SEC_H_OFFSET 32u
+#define HCLGE_PTP_SEC_L_MASK GENMASK(31, 0)
+
+#define HCLGE_PTP_FLAG_EN 0
+#define HCLGE_PTP_FLAG_TX_EN 1
+#define HCLGE_PTP_FLAG_RX_EN 2
+
+struct hclge_ptp_cycle {
+ u32 quo;
+ u32 numer;
+ u32 den;
+};
+
+struct hclge_ptp {
+ struct hclge_dev *hdev;
+ struct ptp_clock *clock;
+ struct sk_buff *tx_skb;
+ unsigned long flags;
+ void __iomem *io_base;
+ struct ptp_clock_info info;
+ struct hwtstamp_config ts_cfg;
+ spinlock_t lock; /* protects ptp registers */
+ u32 ptp_cfg;
+ u32 last_tx_seqid;
+ struct hclge_ptp_cycle cycle;
+ unsigned long tx_start;
+ unsigned long tx_cnt;
+ unsigned long tx_skipped;
+ unsigned long tx_cleaned;
+ unsigned long last_rx;
+ unsigned long rx_cnt;
+ unsigned long tx_timeout;
+};
+
+struct hclge_ptp_int_cmd {
+#define HCLGE_PTP_INT_EN_B BIT(0)
+
+ u8 int_en;
+ u8 rsvd[23];
+};
+
+enum hclge_ptp_udp_type {
+ HCLGE_PTP_UDP_NOT_TYPE,
+ HCLGE_PTP_UDP_P13F_TYPE,
+ HCLGE_PTP_UDP_P140_TYPE,
+ HCLGE_PTP_UDP_FULL_TYPE,
+};
+
+enum hclge_ptp_msg_type {
+ HCLGE_PTP_MSG_TYPE_V2_L2,
+ HCLGE_PTP_MSG_TYPE_V2,
+ HCLGE_PTP_MSG_TYPE_V2_EVENT,
+};
+
+enum hclge_ptp_msg0_type {
+ HCLGE_PTP_MSG0_V2_DELAY_REQ = 1,
+ HCLGE_PTP_MSG0_V2_PDELAY_REQ,
+ HCLGE_PTP_MSG0_V2_DELAY_RESP,
+ HCLGE_PTP_MSG0_V2_EVENT = 0xF,
+};
+
+#define HCLGE_PTP_MSG1_V2_DEFAULT 1
+
+struct hclge_ptp_cfg_cmd {
+#define HCLGE_PTP_EN_B BIT(0)
+#define HCLGE_PTP_TX_EN_B BIT(1)
+#define HCLGE_PTP_RX_EN_B BIT(2)
+#define HCLGE_PTP_UDP_EN_SHIFT 3
+#define HCLGE_PTP_UDP_EN_MASK GENMASK(4, 3)
+#define HCLGE_PTP_MSG_TYPE_SHIFT 8
+#define HCLGE_PTP_MSG_TYPE_MASK GENMASK(9, 8)
+#define HCLGE_PTP_MSG1_SHIFT 16
+#define HCLGE_PTP_MSG1_MASK GENMASK(19, 16)
+#define HCLGE_PTP_MSG0_SHIFT 24
+#define HCLGE_PTP_MSG0_MASK GENMASK(27, 24)
+
+ __le32 cfg;
+ u8 rsvd[20];
+};
+
+static inline struct hclge_dev *hclge_ptp_get_hdev(struct ptp_clock_info *info)
+{
+ struct hclge_ptp *ptp = container_of(info, struct hclge_ptp, info);
+
+ return ptp->hdev;
+}
+
+bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb);
+void hclge_ptp_clean_tx_hwts(struct hclge_dev *hdev);
+void hclge_ptp_get_rx_hwts(struct hnae3_handle *handle, struct sk_buff *skb,
+ u32 nsec, u32 sec);
+int hclge_ptp_get_cfg(struct hclge_dev *hdev, struct ifreq *ifr);
+int hclge_ptp_set_cfg(struct hclge_dev *hdev, struct ifreq *ifr);
+int hclge_ptp_init(struct hclge_dev *hdev);
+void hclge_ptp_uninit(struct hclge_dev *hdev);
+int hclge_ptp_get_ts_info(struct hnae3_handle *handle,
+ struct ethtool_ts_info *info);
+int hclge_ptp_cfg_qry(struct hclge_dev *hdev, u32 *cfg);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 28db13253a5e..4a33f65190e2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -23,14 +23,11 @@ enum hclge_shaper_level {
#define HCLGE_SHAPER_BS_U_DEF 5
#define HCLGE_SHAPER_BS_S_DEF 20
-#define HCLGE_ETHER_MAX_RATE 100000
-
/* hclge_shaper_para_calc: calculate ir parameter for the shaper
* @ir: Rate to be config, its unit is Mbps
* @shaper_level: the shaper level. eg: port, pg, priority, queueset
- * @ir_b: IR_B parameter of IR shaper
- * @ir_u: IR_U parameter of IR shaper
- * @ir_s: IR_S parameter of IR shaper
+ * @ir_para: parameters of IR shaper
+ * @max_tm_rate: max tm rate is available to config
*
* the formula:
*
@@ -41,10 +38,12 @@ enum hclge_shaper_level {
* @return: 0: calculate sucessful, negative: fail
*/
static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
- u8 *ir_b, u8 *ir_u, u8 *ir_s)
+ struct hclge_shaper_ir_para *ir_para,
+ u32 max_tm_rate)
{
+#define DEFAULT_SHAPER_IR_B 126
#define DIVISOR_CLK (1000 * 8)
-#define DIVISOR_IR_B_126 (126 * DIVISOR_CLK)
+#define DEFAULT_DIVISOR_IR_B (DEFAULT_SHAPER_IR_B * DIVISOR_CLK)
static const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
6 * 256, /* Prioriy level */
@@ -59,7 +58,7 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
/* Calc tick */
if (shaper_level >= HCLGE_SHAPER_LVL_CNT ||
- ir > HCLGE_ETHER_MAX_RATE)
+ ir > max_tm_rate)
return -EINVAL;
tick = tick_array[shaper_level];
@@ -71,91 +70,93 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
* ir_calc = ---------------- * 1000
* tick * 1
*/
- ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick;
+ ir_calc = (DEFAULT_DIVISOR_IR_B + (tick >> 1) - 1) / tick;
if (ir_calc == ir) {
- *ir_b = 126;
- *ir_u = 0;
- *ir_s = 0;
+ ir_para->ir_b = DEFAULT_SHAPER_IR_B;
+ ir_para->ir_u = 0;
+ ir_para->ir_s = 0;
return 0;
} else if (ir_calc > ir) {
/* Increasing the denominator to select ir_s value */
while (ir_calc >= ir && ir) {
ir_s_calc++;
- ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
+ ir_calc = DEFAULT_DIVISOR_IR_B /
+ (tick * (1 << ir_s_calc));
}
- *ir_b = (ir * tick * (1 << ir_s_calc) + (DIVISOR_CLK >> 1)) /
- DIVISOR_CLK;
+ ir_para->ir_b = (ir * tick * (1 << ir_s_calc) +
+ (DIVISOR_CLK >> 1)) / DIVISOR_CLK;
} else {
/* Increasing the numerator to select ir_u value */
u32 numerator;
while (ir_calc < ir) {
ir_u_calc++;
- numerator = DIVISOR_IR_B_126 * (1 << ir_u_calc);
+ numerator = DEFAULT_DIVISOR_IR_B * (1 << ir_u_calc);
ir_calc = (numerator + (tick >> 1)) / tick;
}
if (ir_calc == ir) {
- *ir_b = 126;
+ ir_para->ir_b = DEFAULT_SHAPER_IR_B;
} else {
u32 denominator = DIVISOR_CLK * (1 << --ir_u_calc);
- *ir_b = (ir * tick + (denominator >> 1)) / denominator;
+ ir_para->ir_b = (ir * tick + (denominator >> 1)) /
+ denominator;
}
}
- *ir_u = ir_u_calc;
- *ir_s = ir_s_calc;
+ ir_para->ir_u = ir_u_calc;
+ ir_para->ir_s = ir_s_calc;
return 0;
}
-static int hclge_pfc_stats_get(struct hclge_dev *hdev,
- enum hclge_opcode_type opcode, u64 *stats)
-{
- struct hclge_desc desc[HCLGE_TM_PFC_PKT_GET_CMD_NUM];
- int ret, i, j;
-
- if (!(opcode == HCLGE_OPC_QUERY_PFC_RX_PKT_CNT ||
- opcode == HCLGE_OPC_QUERY_PFC_TX_PKT_CNT))
- return -EINVAL;
-
- for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1; i++) {
- hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
- desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- }
-
- hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
+static const u16 hclge_pfc_tx_stats_offset[] = {
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)
+};
- ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM);
- if (ret)
- return ret;
+static const u16 hclge_pfc_rx_stats_offset[] = {
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)
+};
- for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) {
- struct hclge_pfc_stats_cmd *pfc_stats =
- (struct hclge_pfc_stats_cmd *)desc[i].data;
+static void hclge_pfc_stats_get(struct hclge_dev *hdev, bool tx, u64 *stats)
+{
+ const u16 *offset;
+ int i;
- for (j = 0; j < HCLGE_TM_PFC_NUM_GET_PER_CMD; j++) {
- u32 index = i * HCLGE_TM_PFC_PKT_GET_CMD_NUM + j;
+ if (tx)
+ offset = hclge_pfc_tx_stats_offset;
+ else
+ offset = hclge_pfc_rx_stats_offset;
- if (index < HCLGE_MAX_TC_NUM)
- stats[index] =
- le64_to_cpu(pfc_stats->pkt_num[j]);
- }
- }
- return 0;
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
+ stats[i] = HCLGE_STATS_READ(&hdev->mac_stats, offset[i]);
}
-int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats)
+void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats)
{
- return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_RX_PKT_CNT, stats);
+ hclge_pfc_stats_get(hdev, false, stats);
}
-int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats)
+void hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats)
{
- return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_TX_PKT_CNT, stats);
+ hclge_pfc_stats_get(hdev, true, stats);
}
int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
@@ -247,7 +248,7 @@ static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
return 0;
}
-static int hclge_up_to_tc_map(struct hclge_dev *hdev)
+int hclge_up_to_tc_map(struct hclge_dev *hdev)
{
struct hclge_desc desc;
u8 *pri = (u8 *)desc.data;
@@ -265,6 +266,47 @@ static int hclge_up_to_tc_map(struct hclge_dev *hdev)
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
+static void hclge_dscp_to_prio_map_init(struct hclge_dev *hdev)
+{
+ u8 i;
+
+ hdev->vport[0].nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_PRIO;
+ hdev->vport[0].nic.kinfo.dscp_app_cnt = 0;
+ for (i = 0; i < HNAE3_MAX_DSCP; i++)
+ hdev->vport[0].nic.kinfo.dscp_prio[i] = HNAE3_PRIO_ID_INVALID;
+}
+
+int hclge_dscp_to_tc_map(struct hclge_dev *hdev)
+{
+ struct hclge_desc desc[HCLGE_DSCP_MAP_TC_BD_NUM];
+ u8 *req0 = (u8 *)desc[0].data;
+ u8 *req1 = (u8 *)desc[1].data;
+ u8 pri_id, tc_id, i, j;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QOS_MAP, false);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_QOS_MAP, false);
+
+ /* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */
+ for (i = 0; i < HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) {
+ pri_id = hdev->vport[0].nic.kinfo.dscp_prio[i];
+ pri_id = pri_id == HNAE3_PRIO_ID_INVALID ? 0 : pri_id;
+ tc_id = hdev->tm_info.prio_tc[pri_id];
+ /* Each dscp setting has 4 bits, so each byte saves two dscp
+ * setting
+ */
+ req0[i >> 1] |= tc_id << HCLGE_DSCP_TC_SHIFT(i);
+
+ j = i + HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM;
+ pri_id = hdev->vport[0].nic.kinfo.dscp_prio[j];
+ pri_id = pri_id == HNAE3_PRIO_ID_INVALID ? 0 : pri_id;
+ tc_id = hdev->tm_info.prio_tc[pri_id];
+ req1[i >> 1] |= tc_id << HCLGE_DSCP_TC_SHIFT(i);
+ }
+
+ return hclge_cmd_send(&hdev->hw, desc, HCLGE_DSCP_MAP_TC_BD_NUM);
+}
+
static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
u8 pg_id, u8 pri_bit_map)
{
@@ -281,8 +323,8 @@ static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
-static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
- u16 qs_id, u8 pri)
+static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, u16 qs_id, u8 pri,
+ bool link_vld)
{
struct hclge_qs_to_pri_link_cmd *map;
struct hclge_desc desc;
@@ -293,7 +335,7 @@ static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
map->qs_id = cpu_to_le16(qs_id);
map->priority = pri;
- map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK;
+ map->link_vld = link_vld ? HCLGE_TM_QS_PRI_LINK_VLD_MSK : 0;
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
@@ -303,12 +345,30 @@ static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev,
{
struct hclge_nq_to_qs_link_cmd *map;
struct hclge_desc desc;
+ u16 qs_id_l;
+ u16 qs_id_h;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false);
map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
map->nq_id = cpu_to_le16(q_id);
+
+ /* convert qs_id to the following format to support qset_id >= 1024
+ * qs_id: | 15 | 14 ~ 10 | 9 ~ 0 |
+ * / / \ \
+ * / / \ \
+ * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 |
+ * | qs_id_h | vld | qs_id_l |
+ */
+ qs_id_l = hnae3_get_field(qs_id, HCLGE_TM_QS_ID_L_MSK,
+ HCLGE_TM_QS_ID_L_S);
+ qs_id_h = hnae3_get_field(qs_id, HCLGE_TM_QS_ID_H_MSK,
+ HCLGE_TM_QS_ID_H_S);
+ hnae3_set_field(qs_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S,
+ qs_id_l);
+ hnae3_set_field(qs_id, HCLGE_TM_QS_ID_H_EXT_MSK, HCLGE_TM_QS_ID_H_EXT_S,
+ qs_id_h);
map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK);
return hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -378,7 +438,7 @@ static u32 hclge_tm_get_shapping_para(u8 ir_b, u8 ir_u, u8 ir_s,
static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
enum hclge_shap_bucket bucket, u8 pg_id,
- u32 shapping_para)
+ u32 shapping_para, u32 rate)
{
struct hclge_pg_shapping_cmd *shap_cfg_cmd;
enum hclge_opcode_type opcode;
@@ -394,38 +454,47 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para);
+ hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
+
+ shap_cfg_cmd->pg_rate = cpu_to_le32(rate);
+
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
-static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
+int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
{
struct hclge_port_shapping_cmd *shap_cfg_cmd;
+ struct hclge_shaper_ir_para ir_para;
struct hclge_desc desc;
- u8 ir_u, ir_b, ir_s;
u32 shapping_para;
int ret;
- ret = hclge_shaper_para_calc(hdev->hw.mac.speed,
- HCLGE_SHAPER_LVL_PORT,
- &ir_b, &ir_u, &ir_s);
+ ret = hclge_shaper_para_calc(hdev->hw.mac.speed, HCLGE_SHAPER_LVL_PORT,
+ &ir_para,
+ hdev->ae_dev->dev_specs.max_tm_rate);
if (ret)
return ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false);
shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
- shapping_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
+ shapping_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
+ ir_para.ir_s,
HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF);
shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para);
+ hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
+
+ shap_cfg_cmd->port_rate = cpu_to_le32(hdev->hw.mac.speed);
+
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
enum hclge_shap_bucket bucket, u8 pri_id,
- u32 shapping_para)
+ u32 shapping_para, u32 rate)
{
struct hclge_pri_shapping_cmd *shap_cfg_cmd;
enum hclge_opcode_type opcode;
@@ -442,6 +511,10 @@ static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para);
+ hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
+
+ shap_cfg_cmd->pri_rate = cpu_to_le32(rate);
+
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
@@ -515,25 +588,27 @@ int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate)
{
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
struct hclge_qs_shapping_cmd *shap_cfg_cmd;
+ struct hclge_shaper_ir_para ir_para;
struct hclge_dev *hdev = vport->back;
struct hclge_desc desc;
- u8 ir_b, ir_u, ir_s;
u32 shaper_para;
int ret, i;
if (!max_tx_rate)
- max_tx_rate = HCLGE_ETHER_MAX_RATE;
+ max_tx_rate = hdev->ae_dev->dev_specs.max_tm_rate;
ret = hclge_shaper_para_calc(max_tx_rate, HCLGE_SHAPER_LVL_QSET,
- &ir_b, &ir_u, &ir_s);
+ &ir_para,
+ hdev->ae_dev->dev_specs.max_tm_rate);
if (ret)
return ret;
- shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
+ shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
+ ir_para.ir_s,
HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF);
- for (i = 0; i < kinfo->num_tc; i++) {
+ for (i = 0; i < kinfo->tc_info.num_tc; i++) {
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG,
false);
@@ -541,10 +616,13 @@ int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate)
shap_cfg_cmd->qs_id = cpu_to_le16(vport->qs_offset + i);
shap_cfg_cmd->qs_shapping_para = cpu_to_le32(shaper_para);
+ hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
+ shap_cfg_cmd->qs_rate = cpu_to_le32(max_tx_rate);
+
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
- "vf%u, qs%u failed to set tx_rate:%d, ret=%d\n",
+ "vport%u, qs%u failed to set tx_rate:%d, ret=%d\n",
vport->vport_id, shap_cfg_cmd->qs_id,
max_tx_rate, ret);
return ret;
@@ -554,23 +632,72 @@ int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate)
return 0;
}
-static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
+static u16 hclge_vport_get_max_rss_size(struct hclge_vport *vport)
{
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hnae3_tc_info *tc_info = &kinfo->tc_info;
struct hclge_dev *hdev = vport->back;
+ u16 max_rss_size = 0;
+ int i;
+
+ if (!tc_info->mqprio_active)
+ return vport->alloc_tqps / tc_info->num_tc;
+
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ if (!(hdev->hw_tc_map & BIT(i)) || i >= tc_info->num_tc)
+ continue;
+ if (max_rss_size < tc_info->tqp_count[i])
+ max_rss_size = tc_info->tqp_count[i];
+ }
+
+ return max_rss_size;
+}
+
+static u16 hclge_vport_get_tqp_num(struct hclge_vport *vport)
+{
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hnae3_tc_info *tc_info = &kinfo->tc_info;
+ struct hclge_dev *hdev = vport->back;
+ int sum = 0;
+ int i;
+
+ if (!tc_info->mqprio_active)
+ return kinfo->rss_size * tc_info->num_tc;
+
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ if (hdev->hw_tc_map & BIT(i) && i < tc_info->num_tc)
+ sum += tc_info->tqp_count[i];
+ }
+
+ return sum;
+}
+
+static void hclge_tm_update_kinfo_rss_size(struct hclge_vport *vport)
+{
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hclge_dev *hdev = vport->back;
+ u16 vport_max_rss_size;
u16 max_rss_size;
- u8 i;
/* TC configuration is shared by PF/VF in one port, only allow
* one tc for VF for simplicity. VF's vport_id is non zero.
*/
- kinfo->num_tc = vport->vport_id ? 1 :
+ if (vport->vport_id) {
+ kinfo->tc_info.max_tc = 1;
+ kinfo->tc_info.num_tc = 1;
+ vport->qs_offset = HNAE3_MAX_TC +
+ vport->vport_id - HCLGE_VF_VPORT_START_NUM;
+ vport_max_rss_size = hdev->vf_rss_size_max;
+ } else {
+ kinfo->tc_info.max_tc = hdev->tc_max;
+ kinfo->tc_info.num_tc =
min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
- vport->qs_offset = (vport->vport_id ? HNAE3_MAX_TC : 0) +
- (vport->vport_id ? (vport->vport_id - 1) : 0);
+ vport->qs_offset = 0;
+ vport_max_rss_size = hdev->pf_rss_size_max;
+ }
- max_rss_size = min_t(u16, hdev->rss_size_max,
- vport->alloc_tqps / kinfo->num_tc);
+ max_rss_size = min_t(u16, vport_max_rss_size,
+ hclge_vport_get_max_rss_size(vport));
/* Set to user value, no larger than max_rss_size. */
if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
@@ -580,41 +707,42 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
kinfo->rss_size = kinfo->req_rss_size;
} else if (kinfo->rss_size > max_rss_size ||
(!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
- /* if user not set rss, the rss_size should compare with the
- * valid msi numbers to ensure one to one map between tqp and
- * irq as default.
- */
- if (!kinfo->req_rss_size)
- max_rss_size = min_t(u16, max_rss_size,
- (hdev->num_nic_msi - 1) /
- kinfo->num_tc);
-
/* Set to the maximum specification value (max_rss_size). */
kinfo->rss_size = max_rss_size;
}
+}
+
+static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
+{
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hclge_dev *hdev = vport->back;
+ u8 i;
- kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size;
+ hclge_tm_update_kinfo_rss_size(vport);
+ kinfo->num_tqps = hclge_vport_get_tqp_num(vport);
vport->dwrr = 100; /* 100 percent as init */
- vport->alloc_rss_size = kinfo->rss_size;
vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
+ if (vport->vport_id == PF_VPORT_ID)
+ hdev->rss_cfg.rss_size = kinfo->rss_size;
+
+ /* when enable mqprio, the tc_info has been updated. */
+ if (kinfo->tc_info.mqprio_active)
+ return;
+
for (i = 0; i < HNAE3_MAX_TC; i++) {
- if (hdev->hw_tc_map & BIT(i) && i < kinfo->num_tc) {
- kinfo->tc_info[i].enable = true;
- kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
- kinfo->tc_info[i].tqp_count = kinfo->rss_size;
- kinfo->tc_info[i].tc = i;
+ if (hdev->hw_tc_map & BIT(i) && i < kinfo->tc_info.num_tc) {
+ kinfo->tc_info.tqp_offset[i] = i * kinfo->rss_size;
+ kinfo->tc_info.tqp_count[i] = kinfo->rss_size;
} else {
/* Set to default queue if TC is disable */
- kinfo->tc_info[i].enable = false;
- kinfo->tc_info[i].tqp_offset = 0;
- kinfo->tc_info[i].tqp_count = 1;
- kinfo->tc_info[i].tc = 0;
+ kinfo->tc_info.tqp_offset[i] = 0;
+ kinfo->tc_info.tqp_count[i] = 1;
}
}
- memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc,
- sizeof_field(struct hnae3_knic_private_info, prio_tc));
+ memcpy(kinfo->tc_info.prio_tc, hdev->tm_info.prio_tc,
+ sizeof_field(struct hnae3_tc_info, prio_tc));
}
static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
@@ -631,27 +759,27 @@ static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
{
- u8 i;
+ u8 i, tc_sch_mode;
+ u32 bw_limit;
+
+ for (i = 0; i < hdev->tc_max; i++) {
+ if (i < hdev->tm_info.num_tc) {
+ tc_sch_mode = HCLGE_SCH_MODE_DWRR;
+ bw_limit = hdev->tm_info.pg_info[0].bw_limit;
+ } else {
+ tc_sch_mode = HCLGE_SCH_MODE_SP;
+ bw_limit = 0;
+ }
- for (i = 0; i < hdev->tm_info.num_tc; i++) {
hdev->tm_info.tc_info[i].tc_id = i;
- hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
+ hdev->tm_info.tc_info[i].tc_sch_mode = tc_sch_mode;
hdev->tm_info.tc_info[i].pgid = 0;
- hdev->tm_info.tc_info[i].bw_limit =
- hdev->tm_info.pg_info[0].bw_limit;
+ hdev->tm_info.tc_info[i].bw_limit = bw_limit;
}
for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
hdev->tm_info.prio_tc[i] =
(i >= hdev->tm_info.num_tc) ? 0 : i;
-
- /* DCB is enabled if we have more than 1 TC or pfc_en is
- * non-zero.
- */
- if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
- hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
- else
- hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
}
static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
@@ -668,7 +796,8 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
hdev->tm_info.pg_info[i].pg_id = i;
hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
- hdev->tm_info.pg_info[i].bw_limit = HCLGE_ETHER_MAX_RATE;
+ hdev->tm_info.pg_info[i].bw_limit =
+ hdev->ae_dev->dev_specs.max_tm_rate;
if (i != 0)
continue;
@@ -676,15 +805,17 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
for (k = 0; k < hdev->tm_info.num_tc; k++)
hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
+ for (; k < HNAE3_MAX_TC; k++)
+ hdev->tm_info.pg_info[i].tc_dwrr[k] = 0;
}
}
-static void hclge_pfc_info_init(struct hclge_dev *hdev)
+static void hclge_update_fc_mode_by_dcb_flag(struct hclge_dev *hdev)
{
- if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) {
+ if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en) {
if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
dev_warn(&hdev->pdev->dev,
- "DCB is disable, but last mode is FC_PFC\n");
+ "Only 1 tc used, but last mode is FC_PFC\n");
hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
} else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
@@ -697,6 +828,27 @@ static void hclge_pfc_info_init(struct hclge_dev *hdev)
}
}
+static void hclge_update_fc_mode(struct hclge_dev *hdev)
+{
+ if (!hdev->tm_info.pfc_en) {
+ hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
+ return;
+ }
+
+ if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
+ hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
+ hdev->tm_info.fc_mode = HCLGE_FC_PFC;
+ }
+}
+
+void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
+{
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
+ hclge_update_fc_mode(hdev);
+ else
+ hclge_update_fc_mode_by_dcb_flag(hdev);
+}
+
static void hclge_tm_schd_info_init(struct hclge_dev *hdev)
{
hclge_tm_pg_info_init(hdev);
@@ -705,7 +857,7 @@ static void hclge_tm_schd_info_init(struct hclge_dev *hdev)
hclge_tm_vport_info_update(hdev);
- hclge_pfc_info_init(hdev);
+ hclge_tm_pfc_info_update(hdev);
}
static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
@@ -729,7 +881,8 @@ static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
{
- u8 ir_u, ir_b, ir_s;
+ u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
+ struct hclge_shaper_ir_para ir_para;
u32 shaper_para;
int ret;
u32 i;
@@ -740,11 +893,11 @@ static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
/* Pg to pri */
for (i = 0; i < hdev->tm_info.num_pg; i++) {
+ u32 rate = hdev->tm_info.pg_info[i].bw_limit;
+
/* Calc shaper para */
- ret = hclge_shaper_para_calc(
- hdev->tm_info.pg_info[i].bw_limit,
- HCLGE_SHAPER_LVL_PG,
- &ir_b, &ir_u, &ir_s);
+ ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PG,
+ &ir_para, max_tm_rate);
if (ret)
return ret;
@@ -753,16 +906,18 @@ static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pg_shapping_cfg(hdev,
HCLGE_TM_SHAP_C_BUCKET, i,
- shaper_para);
+ shaper_para, rate);
if (ret)
return ret;
- shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
+ shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b,
+ ir_para.ir_u,
+ ir_para.ir_s,
HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pg_shapping_cfg(hdev,
HCLGE_TM_SHAP_P_BUCKET, i,
- shaper_para);
+ shaper_para, rate);
if (ret)
return ret;
}
@@ -794,15 +949,14 @@ static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
struct hclge_vport *vport)
{
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hnae3_tc_info *tc_info = &kinfo->tc_info;
struct hnae3_queue **tqp = kinfo->tqp;
- struct hnae3_tc_info *v_tc_info;
u32 i, j;
int ret;
- for (i = 0; i < kinfo->num_tc; i++) {
- v_tc_info = &kinfo->tc_info[i];
- for (j = 0; j < v_tc_info->tqp_count; j++) {
- struct hnae3_queue *q = tqp[v_tc_info->tqp_offset + j];
+ for (i = 0; i < tc_info->num_tc; i++) {
+ for (j = 0; j < tc_info->tqp_count[i]; j++) {
+ struct hnae3_queue *q = tqp[tc_info->tqp_offset[i] + j];
ret = hclge_tm_q_to_qs_map_cfg(hdev,
hclge_get_queue_id(q),
@@ -815,38 +969,66 @@ static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
return 0;
}
-static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
+static int hclge_tm_pri_q_qs_cfg_tc_base(struct hclge_dev *hdev)
{
struct hclge_vport *vport = hdev->vport;
+ u16 i, k;
int ret;
- u32 i, k;
- if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
- /* Cfg qs -> pri mapping, one by one mapping */
- for (k = 0; k < hdev->num_alloc_vport; k++) {
- struct hnae3_knic_private_info *kinfo =
- &vport[k].nic.kinfo;
-
- for (i = 0; i < kinfo->num_tc; i++) {
- ret = hclge_tm_qs_to_pri_map_cfg(
- hdev, vport[k].qs_offset + i, i);
- if (ret)
- return ret;
- }
+ /* Cfg qs -> pri mapping, one by one mapping */
+ for (k = 0; k < hdev->num_alloc_vport; k++) {
+ struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo;
+
+ for (i = 0; i < kinfo->tc_info.max_tc; i++) {
+ u8 pri = i < kinfo->tc_info.num_tc ? i : 0;
+ bool link_vld = i < kinfo->tc_info.num_tc;
+
+ ret = hclge_tm_qs_to_pri_map_cfg(hdev,
+ vport[k].qs_offset + i,
+ pri, link_vld);
+ if (ret)
+ return ret;
}
- } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) {
- /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */
- for (k = 0; k < hdev->num_alloc_vport; k++)
- for (i = 0; i < HNAE3_MAX_TC; i++) {
- ret = hclge_tm_qs_to_pri_map_cfg(
- hdev, vport[k].qs_offset + i, k);
- if (ret)
- return ret;
- }
- } else {
- return -EINVAL;
}
+ return 0;
+}
+
+static int hclge_tm_pri_q_qs_cfg_vnet_base(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ u16 i, k;
+ int ret;
+
+ /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */
+ for (k = 0; k < hdev->num_alloc_vport; k++)
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ ret = hclge_tm_qs_to_pri_map_cfg(hdev,
+ vport[k].qs_offset + i,
+ k, true);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ int ret;
+ u32 i;
+
+ if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE)
+ ret = hclge_tm_pri_q_qs_cfg_tc_base(hdev);
+ else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
+ ret = hclge_tm_pri_q_qs_cfg_vnet_base(hdev);
+ else
+ return -EINVAL;
+
+ if (ret)
+ return ret;
+
/* Cfg q -> qs mapping */
for (i = 0; i < hdev->num_alloc_vport; i++) {
ret = hclge_vport_q_to_qs_map(hdev, vport);
@@ -861,32 +1043,41 @@ static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
{
- u8 ir_u, ir_b, ir_s;
- u32 shaper_para;
+ u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
+ struct hclge_shaper_ir_para ir_para;
+ u32 shaper_para_c, shaper_para_p;
int ret;
u32 i;
- for (i = 0; i < hdev->tm_info.num_tc; i++) {
- ret = hclge_shaper_para_calc(
- hdev->tm_info.tc_info[i].bw_limit,
- HCLGE_SHAPER_LVL_PRI,
- &ir_b, &ir_u, &ir_s);
- if (ret)
- return ret;
+ for (i = 0; i < hdev->tc_max; i++) {
+ u32 rate = hdev->tm_info.tc_info[i].bw_limit;
+
+ if (rate) {
+ ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI,
+ &ir_para, max_tm_rate);
+ if (ret)
+ return ret;
+
+ shaper_para_c = hclge_tm_get_shapping_para(0, 0, 0,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
+ shaper_para_p = hclge_tm_get_shapping_para(ir_para.ir_b,
+ ir_para.ir_u,
+ ir_para.ir_s,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
+ } else {
+ shaper_para_c = 0;
+ shaper_para_p = 0;
+ }
- shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
- HCLGE_SHAPER_BS_U_DEF,
- HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i,
- shaper_para);
+ shaper_para_c, rate);
if (ret)
return ret;
- shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
- HCLGE_SHAPER_BS_U_DEF,
- HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i,
- shaper_para);
+ shaper_para_p, rate);
if (ret)
return ret;
}
@@ -897,12 +1088,13 @@ static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
{
struct hclge_dev *hdev = vport->back;
- u8 ir_u, ir_b, ir_s;
+ struct hclge_shaper_ir_para ir_para;
u32 shaper_para;
int ret;
ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
- &ir_b, &ir_u, &ir_s);
+ &ir_para,
+ hdev->ae_dev->dev_specs.max_tm_rate);
if (ret)
return ret;
@@ -910,15 +1102,18 @@ static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
- vport->vport_id, shaper_para);
+ vport->vport_id, shaper_para,
+ vport->bw_limit);
if (ret)
return ret;
- shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
+ shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
+ ir_para.ir_s,
HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
- vport->vport_id, shaper_para);
+ vport->vport_id, shaper_para,
+ vport->bw_limit);
if (ret)
return ret;
@@ -929,15 +1124,15 @@ static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
{
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
struct hclge_dev *hdev = vport->back;
- u8 ir_u, ir_b, ir_s;
+ u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
+ struct hclge_shaper_ir_para ir_para;
u32 i;
int ret;
- for (i = 0; i < kinfo->num_tc; i++) {
- ret = hclge_shaper_para_calc(
- hdev->tm_info.tc_info[i].bw_limit,
- HCLGE_SHAPER_LVL_QSET,
- &ir_b, &ir_u, &ir_s);
+ for (i = 0; i < kinfo->tc_info.num_tc; i++) {
+ ret = hclge_shaper_para_calc(hdev->tm_info.tc_info[i].bw_limit,
+ HCLGE_SHAPER_LVL_QSET,
+ &ir_para, max_tm_rate);
if (ret)
return ret;
}
@@ -992,7 +1187,7 @@ static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
int ret;
u32 i, k;
- for (i = 0; i < hdev->tm_info.num_tc; i++) {
+ for (i = 0; i < hdev->tc_max; i++) {
pg_info =
&hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
dwrr = pg_info->tc_dwrr[i];
@@ -1002,9 +1197,15 @@ static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
return ret;
for (k = 0; k < hdev->num_alloc_vport; k++) {
+ struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo;
+
+ if (i >= kinfo->tc_info.max_tc)
+ continue;
+
+ dwrr = i < kinfo->tc_info.num_tc ? vport[k].dwrr : 0;
ret = hclge_tm_qs_weight_cfg(
hdev, vport[k].qs_offset + i,
- vport[k].dwrr);
+ dwrr);
if (ret)
return ret;
}
@@ -1015,7 +1216,6 @@ static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev)
{
-#define DEFAULT_TC_WEIGHT 1
#define DEFAULT_TC_OFFSET 14
struct hclge_ets_tc_weight_cmd *ets_weight;
@@ -1028,13 +1228,7 @@ static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev)
for (i = 0; i < HNAE3_MAX_TC; i++) {
struct hclge_pg_info *pg_info;
- ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT;
-
- if (!(hdev->hw_tc_map & BIT(i)))
- continue;
-
- pg_info =
- &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
+ pg_info = &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
}
@@ -1056,7 +1250,7 @@ static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport)
return ret;
/* Qset dwrr */
- for (i = 0; i < kinfo->num_tc; i++) {
+ for (i = 0; i < kinfo->tc_info.num_tc; i++) {
ret = hclge_tm_qs_weight_cfg(
hdev, vport->qs_offset + i,
hdev->tm_info.pg_info[0].tc_dwrr[i]);
@@ -1099,7 +1293,7 @@ static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
ret = hclge_tm_ets_tc_dwrr_cfg(hdev);
if (ret == -EOPNOTSUPP) {
dev_warn(&hdev->pdev->dev,
- "fw %08x does't support ets tc weight cmd\n",
+ "fw %08x doesn't support ets tc weight cmd\n",
hdev->fw_version);
ret = 0;
}
@@ -1122,6 +1316,12 @@ static int hclge_tm_map_cfg(struct hclge_dev *hdev)
if (ret)
return ret;
+ if (hdev->vport[0].nic.kinfo.tc_map_mode == HNAE3_TC_MAP_MODE_DSCP) {
+ ret = hclge_dscp_to_tc_map(hdev);
+ if (ret)
+ return ret;
+ }
+
ret = hclge_tm_pg_to_pri_map(hdev);
if (ret)
return ret;
@@ -1173,6 +1373,35 @@ static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev)
return 0;
}
+static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev *hdev, u8 pri_id)
+{
+ struct hclge_vport *vport = hdev->vport;
+ int ret;
+ u8 mode;
+ u16 i;
+
+ ret = hclge_tm_pri_schd_mode_cfg(hdev, pri_id);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ struct hnae3_knic_private_info *kinfo = &vport[i].nic.kinfo;
+
+ if (pri_id >= kinfo->tc_info.max_tc)
+ continue;
+
+ mode = pri_id < kinfo->tc_info.num_tc ? HCLGE_SCH_MODE_DWRR :
+ HCLGE_SCH_MODE_SP;
+ ret = hclge_tm_qs_schd_mode_cfg(hdev,
+ vport[i].qs_offset + pri_id,
+ mode);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
{
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
@@ -1187,7 +1416,7 @@ static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
if (ret)
return ret;
- for (i = 0; i < kinfo->num_tc; i++) {
+ for (i = 0; i < kinfo->tc_info.num_tc; i++) {
u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode;
ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i,
@@ -1203,21 +1432,13 @@ static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
{
struct hclge_vport *vport = hdev->vport;
int ret;
- u8 i, k;
+ u8 i;
if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
- for (i = 0; i < hdev->tm_info.num_tc; i++) {
- ret = hclge_tm_pri_schd_mode_cfg(hdev, i);
+ for (i = 0; i < hdev->tc_max; i++) {
+ ret = hclge_tm_schd_mode_tc_base_cfg(hdev, i);
if (ret)
return ret;
-
- for (k = 0; k < hdev->num_alloc_vport; k++) {
- ret = hclge_tm_qs_schd_mode_cfg(
- hdev, vport[k].qs_offset + i,
- HCLGE_SCH_MODE_DWRR);
- if (ret)
- return ret;
- }
}
} else {
for (i = 0; i < hdev->num_alloc_vport; i++) {
@@ -1287,15 +1508,23 @@ static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
hdev->tm_info.pfc_en);
}
-/* Each Tc has a 1024 queue sets to backpress, it divides to
- * 32 group, each group contains 32 queue sets, which can be
- * represented by u32 bitmap.
+/* for the queues that use for backpress, divides to several groups,
+ * each group contains 32 queue sets, which can be represented by u32 bitmap.
*/
static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
{
+ u16 grp_id_shift = HCLGE_BP_GRP_ID_S;
+ u16 grp_id_mask = HCLGE_BP_GRP_ID_M;
+ u8 grp_num = HCLGE_BP_GRP_NUM;
int i;
- for (i = 0; i < HCLGE_BP_GRP_NUM; i++) {
+ if (hdev->num_tqps > HCLGE_TQP_MAX_SIZE_DEV_V2) {
+ grp_num = HCLGE_BP_EXT_GRP_NUM;
+ grp_id_mask = HCLGE_BP_EXT_GRP_ID_M;
+ grp_id_shift = HCLGE_BP_EXT_GRP_ID_S;
+ }
+
+ for (i = 0; i < grp_num; i++) {
u32 qs_bitmap = 0;
int k, ret;
@@ -1304,8 +1533,7 @@ static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
u16 qs_id = vport->qs_offset + tc;
u8 grp, sub_grp;
- grp = hnae3_get_field(qs_id, HCLGE_BP_GRP_ID_M,
- HCLGE_BP_GRP_ID_S);
+ grp = hnae3_get_field(qs_id, grp_id_mask, grp_id_shift);
sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
HCLGE_BP_SUB_GRP_ID_S);
if (i == grp)
@@ -1355,7 +1583,7 @@ static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
static int hclge_tm_bp_setup(struct hclge_dev *hdev)
{
- int ret = 0;
+ int ret;
int i;
for (i = 0; i < hdev->tm_info.num_tc; i++) {
@@ -1364,7 +1592,7 @@ static int hclge_tm_bp_setup(struct hclge_dev *hdev)
return ret;
}
- return ret;
+ return 0;
}
int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init)
@@ -1410,7 +1638,7 @@ void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
for (k = 0; k < hdev->num_alloc_vport; k++) {
kinfo = &vport[k].nic.kinfo;
- kinfo->prio_tc[i] = prio_tc[i];
+ kinfo->tc_info.prio_tc[i] = prio_tc[i];
}
}
}
@@ -1435,19 +1663,6 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
hclge_tm_schd_info_init(hdev);
}
-void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
-{
- /* DCB is enabled if we have more than 1 TC or pfc_en is
- * non-zero.
- */
- if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
- hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
- else
- hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
-
- hclge_pfc_info_init(hdev);
-}
-
int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
{
int ret;
@@ -1478,6 +1693,7 @@ int hclge_tm_schd_init(struct hclge_dev *hdev)
return -EINVAL;
hclge_tm_schd_info_init(hdev);
+ hclge_dscp_to_prio_map_init(hdev);
return hclge_tm_init_hw(hdev, true);
}
@@ -1493,8 +1709,407 @@ int hclge_tm_vport_map_update(struct hclge_dev *hdev)
if (ret)
return ret;
- if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE))
+ if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en)
return 0;
return hclge_tm_bp_setup(hdev);
}
+
+int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num)
+{
+ struct hclge_tm_nodes_cmd *nodes;
+ struct hclge_desc desc;
+ int ret;
+
+ if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) {
+ /* Each PF has 8 qsets and each VF has 1 qset */
+ *qset_num = HCLGE_TM_PF_MAX_QSET_NUM + pci_num_vf(hdev->pdev);
+ return 0;
+ }
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get qset num, ret = %d\n", ret);
+ return ret;
+ }
+
+ nodes = (struct hclge_tm_nodes_cmd *)desc.data;
+ *qset_num = le16_to_cpu(nodes->qset_num);
+ return 0;
+}
+
+int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num)
+{
+ struct hclge_tm_nodes_cmd *nodes;
+ struct hclge_desc desc;
+ int ret;
+
+ if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) {
+ *pri_num = HCLGE_TM_PF_MAX_PRI_NUM;
+ return 0;
+ }
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get pri num, ret = %d\n", ret);
+ return ret;
+ }
+
+ nodes = (struct hclge_tm_nodes_cmd *)desc.data;
+ *pri_num = nodes->pri_num;
+ return 0;
+}
+
+int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority,
+ u8 *link_vld)
+{
+ struct hclge_qs_to_pri_link_cmd *map;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, true);
+ map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
+ map->qs_id = cpu_to_le16(qset_id);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get qset map priority, ret = %d\n", ret);
+ return ret;
+ }
+
+ *priority = map->priority;
+ *link_vld = map->link_vld;
+ return 0;
+}
+
+int hclge_tm_get_qset_sch_mode(struct hclge_dev *hdev, u16 qset_id, u8 *mode)
+{
+ struct hclge_qs_sch_mode_cfg_cmd *qs_sch_mode;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, true);
+ qs_sch_mode = (struct hclge_qs_sch_mode_cfg_cmd *)desc.data;
+ qs_sch_mode->qs_id = cpu_to_le16(qset_id);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get qset sch mode, ret = %d\n", ret);
+ return ret;
+ }
+
+ *mode = qs_sch_mode->sch_mode;
+ return 0;
+}
+
+int hclge_tm_get_qset_weight(struct hclge_dev *hdev, u16 qset_id, u8 *weight)
+{
+ struct hclge_qs_weight_cmd *qs_weight;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, true);
+ qs_weight = (struct hclge_qs_weight_cmd *)desc.data;
+ qs_weight->qs_id = cpu_to_le16(qset_id);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get qset weight, ret = %d\n", ret);
+ return ret;
+ }
+
+ *weight = qs_weight->dwrr;
+ return 0;
+}
+
+int hclge_tm_get_qset_shaper(struct hclge_dev *hdev, u16 qset_id,
+ struct hclge_tm_shaper_para *para)
+{
+ struct hclge_qs_shapping_cmd *shap_cfg_cmd;
+ struct hclge_desc desc;
+ u32 shapping_para;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, true);
+ shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data;
+ shap_cfg_cmd->qs_id = cpu_to_le16(qset_id);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get qset %u shaper, ret = %d\n", qset_id,
+ ret);
+ return ret;
+ }
+
+ shapping_para = le32_to_cpu(shap_cfg_cmd->qs_shapping_para);
+ para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
+ para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
+ para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
+ para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
+ para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
+ para->flag = shap_cfg_cmd->flag;
+ para->rate = le32_to_cpu(shap_cfg_cmd->qs_rate);
+ return 0;
+}
+
+int hclge_tm_get_pri_sch_mode(struct hclge_dev *hdev, u8 pri_id, u8 *mode)
+{
+ struct hclge_pri_sch_mode_cfg_cmd *pri_sch_mode;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, true);
+ pri_sch_mode = (struct hclge_pri_sch_mode_cfg_cmd *)desc.data;
+ pri_sch_mode->pri_id = pri_id;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get priority sch mode, ret = %d\n", ret);
+ return ret;
+ }
+
+ *mode = pri_sch_mode->sch_mode;
+ return 0;
+}
+
+int hclge_tm_get_pri_weight(struct hclge_dev *hdev, u8 pri_id, u8 *weight)
+{
+ struct hclge_priority_weight_cmd *priority_weight;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, true);
+ priority_weight = (struct hclge_priority_weight_cmd *)desc.data;
+ priority_weight->pri_id = pri_id;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get priority weight, ret = %d\n", ret);
+ return ret;
+ }
+
+ *weight = priority_weight->dwrr;
+ return 0;
+}
+
+int hclge_tm_get_pri_shaper(struct hclge_dev *hdev, u8 pri_id,
+ enum hclge_opcode_type cmd,
+ struct hclge_tm_shaper_para *para)
+{
+ struct hclge_pri_shapping_cmd *shap_cfg_cmd;
+ struct hclge_desc desc;
+ u32 shapping_para;
+ int ret;
+
+ if (cmd != HCLGE_OPC_TM_PRI_C_SHAPPING &&
+ cmd != HCLGE_OPC_TM_PRI_P_SHAPPING)
+ return -EINVAL;
+
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
+ shap_cfg_cmd->pri_id = pri_id;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get priority shaper(%#x), ret = %d\n",
+ cmd, ret);
+ return ret;
+ }
+
+ shapping_para = le32_to_cpu(shap_cfg_cmd->pri_shapping_para);
+ para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
+ para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
+ para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
+ para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
+ para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
+ para->flag = shap_cfg_cmd->flag;
+ para->rate = le32_to_cpu(shap_cfg_cmd->pri_rate);
+ return 0;
+}
+
+int hclge_tm_get_q_to_qs_map(struct hclge_dev *hdev, u16 q_id, u16 *qset_id)
+{
+ struct hclge_nq_to_qs_link_cmd *map;
+ struct hclge_desc desc;
+ u16 qs_id_l;
+ u16 qs_id_h;
+ int ret;
+
+ map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, true);
+ map->nq_id = cpu_to_le16(q_id);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get queue to qset map, ret = %d\n", ret);
+ return ret;
+ }
+ *qset_id = le16_to_cpu(map->qset_id);
+
+ /* convert qset_id to the following format, drop the vld bit
+ * | qs_id_h | vld | qs_id_l |
+ * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 |
+ * \ \ / /
+ * \ \ / /
+ * qset_id: | 15 | 14 ~ 10 | 9 ~ 0 |
+ */
+ qs_id_l = hnae3_get_field(*qset_id, HCLGE_TM_QS_ID_L_MSK,
+ HCLGE_TM_QS_ID_L_S);
+ qs_id_h = hnae3_get_field(*qset_id, HCLGE_TM_QS_ID_H_EXT_MSK,
+ HCLGE_TM_QS_ID_H_EXT_S);
+ *qset_id = 0;
+ hnae3_set_field(*qset_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S,
+ qs_id_l);
+ hnae3_set_field(*qset_id, HCLGE_TM_QS_ID_H_MSK, HCLGE_TM_QS_ID_H_S,
+ qs_id_h);
+ return 0;
+}
+
+int hclge_tm_get_q_to_tc(struct hclge_dev *hdev, u16 q_id, u8 *tc_id)
+{
+#define HCLGE_TM_TC_MASK 0x7
+
+ struct hclge_tqp_tx_queue_tc_cmd *tc;
+ struct hclge_desc desc;
+ int ret;
+
+ tc = (struct hclge_tqp_tx_queue_tc_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TQP_TX_QUEUE_TC, true);
+ tc->queue_id = cpu_to_le16(q_id);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get queue to tc map, ret = %d\n", ret);
+ return ret;
+ }
+
+ *tc_id = tc->tc_id & HCLGE_TM_TC_MASK;
+ return 0;
+}
+
+int hclge_tm_get_pg_to_pri_map(struct hclge_dev *hdev, u8 pg_id,
+ u8 *pri_bit_map)
+{
+ struct hclge_pg_to_pri_link_cmd *map;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, true);
+ map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
+ map->pg_id = pg_id;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get pg to pri map, ret = %d\n", ret);
+ return ret;
+ }
+
+ *pri_bit_map = map->pri_bit_map;
+ return 0;
+}
+
+int hclge_tm_get_pg_weight(struct hclge_dev *hdev, u8 pg_id, u8 *weight)
+{
+ struct hclge_pg_weight_cmd *pg_weight_cmd;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, true);
+ pg_weight_cmd = (struct hclge_pg_weight_cmd *)desc.data;
+ pg_weight_cmd->pg_id = pg_id;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get pg weight, ret = %d\n", ret);
+ return ret;
+ }
+
+ *weight = pg_weight_cmd->dwrr;
+ return 0;
+}
+
+int hclge_tm_get_pg_sch_mode(struct hclge_dev *hdev, u8 pg_id, u8 *mode)
+{
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, true);
+ desc.data[0] = cpu_to_le32(pg_id);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get pg sch mode, ret = %d\n", ret);
+ return ret;
+ }
+
+ *mode = (u8)le32_to_cpu(desc.data[1]);
+ return 0;
+}
+
+int hclge_tm_get_pg_shaper(struct hclge_dev *hdev, u8 pg_id,
+ enum hclge_opcode_type cmd,
+ struct hclge_tm_shaper_para *para)
+{
+ struct hclge_pg_shapping_cmd *shap_cfg_cmd;
+ struct hclge_desc desc;
+ u32 shapping_para;
+ int ret;
+
+ if (cmd != HCLGE_OPC_TM_PG_C_SHAPPING &&
+ cmd != HCLGE_OPC_TM_PG_P_SHAPPING)
+ return -EINVAL;
+
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
+ shap_cfg_cmd->pg_id = pg_id;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get pg shaper(%#x), ret = %d\n",
+ cmd, ret);
+ return ret;
+ }
+
+ shapping_para = le32_to_cpu(shap_cfg_cmd->pg_shapping_para);
+ para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
+ para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
+ para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
+ para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
+ para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
+ para->flag = shap_cfg_cmd->flag;
+ para->rate = le32_to_cpu(shap_cfg_cmd->pg_rate);
+ return 0;
+}
+
+int hclge_tm_get_port_shaper(struct hclge_dev *hdev,
+ struct hclge_tm_shaper_para *para)
+{
+ struct hclge_port_shapping_cmd *port_shap_cfg_cmd;
+ struct hclge_desc desc;
+ u32 shapping_para;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get port shaper, ret = %d\n", ret);
+ return ret;
+ }
+
+ port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
+ shapping_para = le32_to_cpu(port_shap_cfg_cmd->port_shapping_para);
+ para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
+ para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
+ para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
+ para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
+ para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
+ para->flag = port_shap_cfg_cmd->flag;
+ para->rate = le32_to_cpu(port_shap_cfg_cmd->port_rate);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 45bcb67f90fd..68f28a98e380 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -6,6 +6,12 @@
#include <linux/types.h>
+#include "hnae3.h"
+
+struct hclge_dev;
+struct hclge_vport;
+enum hclge_opcode_type;
+
/* MAC Pause */
#define HCLGE_TX_MAC_PAUSE_EN_MSK BIT(0)
#define HCLGE_RX_MAC_PAUSE_EN_MSK BIT(1)
@@ -17,7 +23,15 @@
/* SP or DWRR */
#define HCLGE_TM_TX_SCHD_DWRR_MSK BIT(0)
-#define HCLGE_TM_TX_SCHD_SP_MSK (0xFE)
+#define HCLGE_TM_TX_SCHD_SP_MSK 0xFE
+
+#define HCLGE_ETHER_MAX_RATE 100000
+
+#define HCLGE_TM_PF_MAX_PRI_NUM 8
+#define HCLGE_TM_PF_MAX_QSET_NUM 8
+
+#define HCLGE_DSCP_MAP_TC_BD_NUM 2
+#define HCLGE_DSCP_TC_SHIFT(n) (((n) & 1) * 4)
struct hclge_pg_to_pri_link_cmd {
u8 pg_id;
@@ -37,6 +51,12 @@ struct hclge_nq_to_qs_link_cmd {
__le16 nq_id;
__le16 rsvd;
#define HCLGE_TM_Q_QS_LINK_VLD_MSK BIT(10)
+#define HCLGE_TM_QS_ID_L_MSK GENMASK(9, 0)
+#define HCLGE_TM_QS_ID_L_S 0
+#define HCLGE_TM_QS_ID_H_MSK GENMASK(14, 10)
+#define HCLGE_TM_QS_ID_H_S 10
+#define HCLGE_TM_QS_ID_H_EXT_S 11
+#define HCLGE_TM_QS_ID_H_EXT_MSK GENMASK(15, 11)
__le16 qset_id;
};
@@ -57,6 +77,18 @@ struct hclge_priority_weight_cmd {
u8 dwrr;
};
+struct hclge_pri_sch_mode_cfg_cmd {
+ u8 pri_id;
+ u8 rsvd[3];
+ u8 sch_mode;
+};
+
+struct hclge_qs_sch_mode_cfg_cmd {
+ __le16 qs_id;
+ u8 rsvd[2];
+ u8 sch_mode;
+};
+
struct hclge_qs_weight_cmd {
__le16 qs_id;
u8 dwrr;
@@ -84,22 +116,34 @@ enum hclge_shap_bucket {
HCLGE_TM_SHAP_P_BUCKET,
};
+/* set bit HCLGE_TM_RATE_VLD to 1 means use 'rate' to config shaping */
+#define HCLGE_TM_RATE_VLD 0
+
struct hclge_pri_shapping_cmd {
u8 pri_id;
u8 rsvd[3];
__le32 pri_shapping_para;
+ u8 flag;
+ u8 rsvd1[3];
+ __le32 pri_rate;
};
struct hclge_pg_shapping_cmd {
u8 pg_id;
u8 rsvd[3];
__le32 pg_shapping_para;
+ u8 flag;
+ u8 rsvd1[3];
+ __le32 pg_rate;
};
struct hclge_qs_shapping_cmd {
__le16 qs_id;
u8 rsvd[2];
__le32 qs_shapping_para;
+ u8 flag;
+ u8 rsvd1[3];
+ __le32 qs_rate;
};
#define HCLGE_BP_GRP_NUM 32
@@ -107,6 +151,11 @@ struct hclge_qs_shapping_cmd {
#define HCLGE_BP_SUB_GRP_ID_M GENMASK(4, 0)
#define HCLGE_BP_GRP_ID_S 5
#define HCLGE_BP_GRP_ID_M GENMASK(9, 5)
+
+#define HCLGE_BP_EXT_GRP_NUM 40
+#define HCLGE_BP_EXT_GRP_ID_S 5
+#define HCLGE_BP_EXT_GRP_ID_M GENMASK(10, 5)
+
struct hclge_bp_to_qs_map_cmd {
u8 tc_id;
u8 rsvd[2];
@@ -137,6 +186,36 @@ struct hclge_pfc_stats_cmd {
struct hclge_port_shapping_cmd {
__le32 port_shapping_para;
+ u8 flag;
+ u8 rsvd[3];
+ __le32 port_rate;
+};
+
+struct hclge_shaper_ir_para {
+ u8 ir_b; /* IR_B parameter of IR shaper */
+ u8 ir_u; /* IR_U parameter of IR shaper */
+ u8 ir_s; /* IR_S parameter of IR shaper */
+};
+
+struct hclge_tm_nodes_cmd {
+ u8 pg_base_id;
+ u8 pri_base_id;
+ __le16 qset_base_id;
+ __le16 queue_base_id;
+ u8 pg_num;
+ u8 pri_num;
+ __le16 qset_num;
+ __le16 queue_num;
+};
+
+struct hclge_tm_shaper_para {
+ u32 rate;
+ u8 ir_b;
+ u8 ir_u;
+ u8 ir_s;
+ u8 bs_b;
+ u8 bs_s;
+ u8 flag;
};
#define hclge_tm_set_field(dest, string, val) \
@@ -144,8 +223,8 @@ struct hclge_port_shapping_cmd {
(HCLGE_TM_SHAP_##string##_MSK), \
(HCLGE_TM_SHAP_##string##_LSH), val)
#define hclge_tm_get_field(src, string) \
- hnae3_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \
- (HCLGE_TM_SHAP_##string##_LSH))
+ hnae3_get_field((src), HCLGE_TM_SHAP_##string##_MSK, \
+ HCLGE_TM_SHAP_##string##_LSH)
int hclge_tm_schd_init(struct hclge_dev *hdev);
int hclge_tm_vport_map_update(struct hclge_dev *hdev);
@@ -158,8 +237,34 @@ int hclge_tm_dwrr_cfg(struct hclge_dev *hdev);
int hclge_tm_init_hw(struct hclge_dev *hdev, bool init);
int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx);
int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
-int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats);
-int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats);
+void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats);
+void hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats);
int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate);
-
+int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev);
+int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num);
+int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num);
+int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority,
+ u8 *link_vld);
+int hclge_tm_get_qset_sch_mode(struct hclge_dev *hdev, u16 qset_id, u8 *mode);
+int hclge_tm_get_qset_weight(struct hclge_dev *hdev, u16 qset_id, u8 *weight);
+int hclge_tm_get_qset_shaper(struct hclge_dev *hdev, u16 qset_id,
+ struct hclge_tm_shaper_para *para);
+int hclge_tm_get_pri_sch_mode(struct hclge_dev *hdev, u8 pri_id, u8 *mode);
+int hclge_tm_get_pri_weight(struct hclge_dev *hdev, u8 pri_id, u8 *weight);
+int hclge_tm_get_pri_shaper(struct hclge_dev *hdev, u8 pri_id,
+ enum hclge_opcode_type cmd,
+ struct hclge_tm_shaper_para *para);
+int hclge_tm_get_q_to_qs_map(struct hclge_dev *hdev, u16 q_id, u16 *qset_id);
+int hclge_tm_get_q_to_tc(struct hclge_dev *hdev, u16 q_id, u8 *tc_id);
+int hclge_tm_get_pg_to_pri_map(struct hclge_dev *hdev, u8 pg_id,
+ u8 *pri_bit_map);
+int hclge_tm_get_pg_weight(struct hclge_dev *hdev, u8 pg_id, u8 *weight);
+int hclge_tm_get_pg_sch_mode(struct hclge_dev *hdev, u8 pg_id, u8 *mode);
+int hclge_tm_get_pg_shaper(struct hclge_dev *hdev, u8 pg_id,
+ enum hclge_opcode_type cmd,
+ struct hclge_tm_shaper_para *para);
+int hclge_tm_get_port_shaper(struct hclge_dev *hdev,
+ struct hclge_tm_shaper_para *para);
+int hclge_up_to_tc_map(struct hclge_dev *hdev);
+int hclge_dscp_to_tc_map(struct hclge_dev *hdev);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
new file mode 100644
index 000000000000..8510b88d4982
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2018-2020 Hisilicon Limited. */
+
+/* This must be outside ifdef _HCLGE_TRACE_H */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hns3
+
+#if !defined(_HCLGE_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _HCLGE_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+#define PF_GET_MBX_LEN (sizeof(struct hclge_mbx_vf_to_pf_cmd) / sizeof(u32))
+#define PF_SEND_MBX_LEN (sizeof(struct hclge_mbx_pf_to_vf_cmd) / sizeof(u32))
+
+TRACE_EVENT(hclge_pf_mbx_get,
+ TP_PROTO(
+ struct hclge_dev *hdev,
+ struct hclge_mbx_vf_to_pf_cmd *req),
+ TP_ARGS(hdev, req),
+
+ TP_STRUCT__entry(
+ __field(u8, vfid)
+ __field(u8, code)
+ __field(u8, subcode)
+ __string(pciname, pci_name(hdev->pdev))
+ __string(devname, &hdev->vport[0].nic.kinfo.netdev->name)
+ __array(u32, mbx_data, PF_GET_MBX_LEN)
+ ),
+
+ TP_fast_assign(
+ __entry->vfid = req->mbx_src_vfid;
+ __entry->code = req->msg.code;
+ __entry->subcode = req->msg.subcode;
+ __assign_str(pciname, pci_name(hdev->pdev));
+ __assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name);
+ memcpy(__entry->mbx_data, req,
+ sizeof(struct hclge_mbx_vf_to_pf_cmd));
+ ),
+
+ TP_printk(
+ "%s %s vfid:%u code:%u subcode:%u data:%s",
+ __get_str(pciname), __get_str(devname), __entry->vfid,
+ __entry->code, __entry->subcode,
+ __print_array(__entry->mbx_data, PF_GET_MBX_LEN, sizeof(u32))
+ )
+);
+
+TRACE_EVENT(hclge_pf_mbx_send,
+ TP_PROTO(
+ struct hclge_dev *hdev,
+ struct hclge_mbx_pf_to_vf_cmd *req),
+ TP_ARGS(hdev, req),
+
+ TP_STRUCT__entry(
+ __field(u8, vfid)
+ __field(u16, code)
+ __string(pciname, pci_name(hdev->pdev))
+ __string(devname, &hdev->vport[0].nic.kinfo.netdev->name)
+ __array(u32, mbx_data, PF_SEND_MBX_LEN)
+ ),
+
+ TP_fast_assign(
+ __entry->vfid = req->dest_vfid;
+ __entry->code = le16_to_cpu(req->msg.code);
+ __assign_str(pciname, pci_name(hdev->pdev));
+ __assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name);
+ memcpy(__entry->mbx_data, req,
+ sizeof(struct hclge_mbx_pf_to_vf_cmd));
+ ),
+
+ TP_printk(
+ "%s %s vfid:%u code:%u data:%s",
+ __get_str(pciname), __get_str(devname), __entry->vfid,
+ __entry->code,
+ __print_array(__entry->mbx_data, PF_SEND_MBX_LEN, sizeof(u32))
+ )
+);
+
+#endif /* _HCLGE_TRACE_H_ */
+
+/* This must be outside ifdef _HCLGE_TRACE_H */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE hclge_trace
+#include <trace/define_trace.h>