aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/hisilicon/hns3/hns3vf
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/hisilicon/hns3/hns3vf')
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c52
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c148
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h17
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c30
4 files changed, 209 insertions, 38 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index 9441b453d38d..71f356fc2446 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -27,26 +27,39 @@ static int hclgevf_ring_space(struct hclgevf_cmq_ring *ring)
return ring->desc_num - used - 1;
}
+static int hclgevf_is_valid_csq_clean_head(struct hclgevf_cmq_ring *ring,
+ int head)
+{
+ int ntu = ring->next_to_use;
+ int ntc = ring->next_to_clean;
+
+ if (ntu > ntc)
+ return head >= ntc && head <= ntu;
+
+ return head >= ntc || head <= ntu;
+}
+
static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw)
{
+ struct hclgevf_dev *hdev = container_of(hw, struct hclgevf_dev, hw);
struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
- u16 ntc = csq->next_to_clean;
- struct hclgevf_desc *desc;
int clean = 0;
u32 head;
- desc = &csq->desc[ntc];
head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
- while (head != ntc) {
- memset(desc, 0, sizeof(*desc));
- ntc++;
- if (ntc == csq->desc_num)
- ntc = 0;
- desc = &csq->desc[ntc];
- clean++;
+ rmb(); /* Make sure head is ready before touch any data */
+
+ if (!hclgevf_is_valid_csq_clean_head(csq, head)) {
+ dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head,
+ csq->next_to_use, csq->next_to_clean);
+ dev_warn(&hdev->pdev->dev,
+ "Disabling any further commands to IMP firmware\n");
+ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+ return -EIO;
}
- csq->next_to_clean = ntc;
+ clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
+ csq->next_to_clean = head;
return clean;
}
@@ -321,13 +334,13 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
int ret;
spin_lock_bh(&hdev->hw.cmq.csq.lock);
- spin_lock_bh(&hdev->hw.cmq.crq.lock);
+ spin_lock(&hdev->hw.cmq.crq.lock);
/* initialize the pointers of async rx queue of mailbox */
hdev->arq.hdev = hdev;
hdev->arq.head = 0;
hdev->arq.tail = 0;
- hdev->arq.count = 0;
+ atomic_set(&hdev->arq.count, 0);
hdev->hw.cmq.csq.next_to_clean = 0;
hdev->hw.cmq.csq.next_to_use = 0;
hdev->hw.cmq.crq.next_to_clean = 0;
@@ -335,7 +348,7 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
hclgevf_cmd_init_regs(&hdev->hw);
- spin_unlock_bh(&hdev->hw.cmq.crq.lock);
+ spin_unlock(&hdev->hw.cmq.crq.lock);
spin_unlock_bh(&hdev->hw.cmq.csq.lock);
clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
@@ -344,8 +357,8 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
* reset may happen when lower level reset is being processed.
*/
if (hclgevf_is_reset_pending(hdev)) {
- set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
- return -EBUSY;
+ ret = -EBUSY;
+ goto err_cmd_init;
}
/* get firmware version */
@@ -353,13 +366,18 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
if (ret) {
dev_err(&hdev->pdev->dev,
"failed(%d) to query firmware version\n", ret);
- return ret;
+ goto err_cmd_init;
}
hdev->fw_version = version;
dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
return 0;
+
+err_cmd_init:
+ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+
+ return ret;
}
static void hclgevf_cmd_uninit_regs(struct hclgevf_hw *hw)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 8bc28e6f465f..5d53467ee2d2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -245,6 +245,27 @@ static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
return 0;
}
+static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev)
+{
+ struct hnae3_handle *nic = &hdev->nic;
+ u8 resp_msg;
+ int ret;
+
+ ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
+ HCLGE_MBX_GET_PORT_BASE_VLAN_STATE,
+ NULL, 0, true, &resp_msg, sizeof(u8));
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "VF request to get port based vlan state failed %d",
+ ret);
+ return ret;
+ }
+
+ nic->port_base_vlan_state = resp_msg;
+
+ return 0;
+}
+
static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
{
#define HCLGEVF_TQPS_RSS_INFO_LEN 6
@@ -307,6 +328,26 @@ static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id)
return qid_in_pf;
}
+static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev)
+{
+ u8 resp_msg[2];
+ int ret;
+
+ ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_MEDIA_TYPE, 0, NULL, 0,
+ true, resp_msg, sizeof(resp_msg));
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "VF request to get the pf port media type failed %d",
+ ret);
+ return ret;
+ }
+
+ hdev->hw.mac.media_type = resp_msg[0];
+ hdev->hw.mac.module_type = resp_msg[1];
+
+ return 0;
+}
+
static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
{
struct hclgevf_tqp *tqp;
@@ -404,7 +445,7 @@ void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
}
}
-void hclgevf_update_link_mode(struct hclgevf_dev *hdev)
+static void hclgevf_update_link_mode(struct hclgevf_dev *hdev)
{
#define HCLGEVF_ADVERTISING 0
#define HCLGEVF_SUPPORTED 1
@@ -1375,9 +1416,11 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
case HNAE3_VF_FUNC_RESET:
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL,
0, true, NULL, sizeof(u8));
+ hdev->rst_stats.vf_func_rst_cnt++;
break;
case HNAE3_FLR_RESET:
set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
+ hdev->rst_stats.flr_rst_cnt++;
break;
default:
break;
@@ -1400,7 +1443,7 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
* know if device is undergoing reset
*/
ae_dev->reset_type = hdev->reset_type;
- hdev->reset_count++;
+ hdev->rst_stats.rst_cnt++;
rtnl_lock();
/* bring down the nic to stop any ongoing TX/RX */
@@ -1426,6 +1469,8 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
goto err_reset;
}
+ hdev->rst_stats.hw_rst_done_cnt++;
+
rtnl_lock();
/* now, re-initialize the nic client and ae device*/
@@ -1444,6 +1489,7 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
hdev->last_reset_time = jiffies;
ae_dev->reset_type = HNAE3_NONE_RESET;
+ hdev->rst_stats.rst_done_cnt++;
return ret;
err_reset_lock:
@@ -1455,6 +1501,8 @@ err_reset:
*/
hclgevf_cmd_init(hdev);
dev_err(&hdev->pdev->dev, "failed to reset VF\n");
+ if (hclgevf_is_reset_pending(hdev))
+ hclgevf_reset_task_schedule(hdev);
return ret;
}
@@ -1564,8 +1612,7 @@ static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
{
- if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) &&
- !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) {
+ if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) {
set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state);
schedule_work(&hdev->rst_service_task);
}
@@ -1603,6 +1650,7 @@ static void hclgevf_service_timer(struct timer_list *t)
mod_timer(&hdev->service_timer, jiffies + 5 * HZ);
+ hdev->stats_timer++;
hclgevf_task_schedule(hdev);
}
@@ -1711,7 +1759,7 @@ static void hclgevf_keep_alive_task(struct work_struct *work)
hdev = container_of(work, struct hclgevf_dev, keep_alive_task);
- if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
+ if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
return;
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL,
@@ -1723,9 +1771,16 @@ static void hclgevf_keep_alive_task(struct work_struct *work)
static void hclgevf_service_task(struct work_struct *work)
{
+ struct hnae3_handle *handle;
struct hclgevf_dev *hdev;
hdev = container_of(work, struct hclgevf_dev, service_task);
+ handle = &hdev->nic;
+
+ if (hdev->stats_timer >= HCLGEVF_STATS_TIMER_INTERVAL) {
+ hclgevf_tqps_update_stats(handle);
+ hdev->stats_timer = 0;
+ }
/* request the link status from the PF. PF would be able to tell VF
* about such updates in future so we might remove this later
@@ -1762,6 +1817,7 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B);
*clearval = cmdq_src_reg;
+ hdev->rst_stats.vf_rst_cnt++;
return HCLGEVF_VECTOR0_EVENT_RST;
}
@@ -1814,6 +1870,11 @@ static int hclgevf_configure(struct hclgevf_dev *hdev)
{
int ret;
+ /* get current port based vlan state from PF */
+ ret = hclgevf_get_port_base_vlan_filter_state(hdev);
+ if (ret)
+ return ret;
+
/* get queue configuration from PF */
ret = hclgevf_get_queue_info(hdev);
if (ret)
@@ -1824,6 +1885,10 @@ static int hclgevf_configure(struct hclgevf_dev *hdev)
if (ret)
return ret;
+ ret = hclgevf_get_pf_media_type(hdev);
+ if (ret)
+ return ret;
+
/* get tc configuration from PF */
return hclgevf_get_tc_info(hdev);
}
@@ -1986,8 +2051,10 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle)
set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
- for (i = 0; i < handle->kinfo.num_tqps; i++)
- hclgevf_reset_tqp(handle, i);
+ if (hdev->reset_type != HNAE3_VF_RESET)
+ for (i = 0; i < handle->kinfo.num_tqps; i++)
+ if (hclgevf_reset_tqp(handle, i))
+ break;
/* reset tqp stats */
hclgevf_reset_tqp_stats(handle);
@@ -2007,9 +2074,15 @@ static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive)
static int hclgevf_client_start(struct hnae3_handle *handle)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int ret;
+
+ ret = hclgevf_set_alive(handle, true);
+ if (ret)
+ return ret;
mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ);
- return hclgevf_set_alive(handle, true);
+
+ return 0;
}
static void hclgevf_client_stop(struct hnae3_handle *handle)
@@ -2051,6 +2124,10 @@ static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
{
set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+ if (hdev->keep_alive_timer.function)
+ del_timer_sync(&hdev->keep_alive_timer);
+ if (hdev->keep_alive_task.func)
+ cancel_work_sync(&hdev->keep_alive_task);
if (hdev->service_timer.function)
del_timer_sync(&hdev->service_timer);
if (hdev->service_task.func)
@@ -2155,6 +2232,23 @@ static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
hclgevf_free_vector(hdev, 0);
}
+static void hclgevf_info_show(struct hclgevf_dev *hdev)
+{
+ struct device *dev = &hdev->pdev->dev;
+
+ dev_info(dev, "VF info begin:\n");
+
+ dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
+ dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
+ dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
+ dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
+ dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
+ dev_info(dev, "PF media type of this VF: %d\n",
+ hdev->hw.mac.media_type);
+
+ dev_info(dev, "VF info end.\n");
+}
+
static int hclgevf_init_client_instance(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev)
{
@@ -2172,6 +2266,9 @@ static int hclgevf_init_client_instance(struct hnae3_client *client,
hnae3_set_client_init_flag(client, ae_dev, 1);
+ if (netif_msg_drv(&hdev->nic))
+ hclgevf_info_show(hdev);
+
if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
struct hnae3_client *rc = hdev->roce_client;
@@ -2651,12 +2748,16 @@ static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable)
return hclgevf_config_gro(hdev, enable);
}
-static void hclgevf_get_media_type(struct hnae3_handle *handle,
- u8 *media_type)
+static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type,
+ u8 *module_type)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
if (media_type)
*media_type = hdev->hw.mac.media_type;
+
+ if (module_type)
+ *module_type = hdev->hw.mac.module_type;
}
static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle)
@@ -2677,7 +2778,7 @@ static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- return hdev->reset_count;
+ return hdev->rst_stats.hw_rst_done_cnt;
}
static void hclgevf_get_link_mode(struct hnae3_handle *handle,
@@ -2756,6 +2857,31 @@ static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
}
}
+void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
+ u8 *port_base_vlan_info, u8 data_size)
+{
+ struct hnae3_handle *nic = &hdev->nic;
+
+ rtnl_lock();
+ hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ rtnl_unlock();
+
+ /* send msg to PF and wait update port based vlan info */
+ hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
+ HCLGE_MBX_PORT_BASE_VLAN_CFG,
+ port_base_vlan_info, data_size,
+ false, NULL, 0);
+
+ if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
+ nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
+ else
+ nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
+
+ rtnl_lock();
+ hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
+ rtnl_unlock();
+}
+
static const struct hnae3_ae_ops hclgevf_ops = {
.init_ae_dev = hclgevf_init_ae_dev,
.uninit_ae_dev = hclgevf_uninit_ae_dev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index c128863ee7d0..cc52f54f8c08 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -116,6 +116,8 @@
#define HCLGEVF_S_IP_BIT BIT(3)
#define HCLGEVF_V_TAG_BIT BIT(4)
+#define HCLGEVF_STATS_TIMER_INTERVAL (36)
+
enum hclgevf_evt_cause {
HCLGEVF_VECTOR0_EVENT_RST,
HCLGEVF_VECTOR0_EVENT_MBX,
@@ -141,6 +143,7 @@ enum hclgevf_states {
struct hclgevf_mac {
u8 media_type;
+ u8 module_type;
u8 mac_addr[ETH_ALEN];
int link;
u8 duplex;
@@ -210,6 +213,15 @@ struct hclgevf_misc_vector {
int vector_irq;
};
+struct hclgevf_rst_stats {
+ u32 rst_cnt; /* the number of reset */
+ u32 vf_func_rst_cnt; /* the number of VF function reset */
+ u32 flr_rst_cnt; /* the number of FLR */
+ u32 vf_rst_cnt; /* the number of VF reset */
+ u32 rst_done_cnt; /* the number of reset completed */
+ u32 hw_rst_done_cnt; /* the number of HW reset completed */
+};
+
struct hclgevf_dev {
struct pci_dev *pdev;
struct hnae3_ae_dev *ae_dev;
@@ -227,7 +239,7 @@ struct hclgevf_dev {
#define HCLGEVF_RESET_REQUESTED 0
#define HCLGEVF_RESET_PENDING 1
unsigned long reset_state; /* requested, pending */
- unsigned long reset_count; /* the number of reset has been done */
+ struct hclgevf_rst_stats rst_stats;
u32 reset_attempts;
u32 fw_version;
@@ -272,6 +284,7 @@ struct hclgevf_dev {
struct hnae3_client *nic_client;
struct hnae3_client *roce_client;
u32 flag;
+ u32 stats_timer;
};
static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev)
@@ -290,4 +303,6 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
u8 duplex);
void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev);
void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev);
+void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
+ u8 *port_base_vlan_info, u8 data_size);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index 7dc3c9f79169..30f2e9352cf3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -49,8 +49,8 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
if (i >= HCLGEVF_MAX_TRY_TIMES) {
dev_err(&hdev->pdev->dev,
- "VF could not get mbx resp(=%d) from PF in %d tries\n",
- hdev->mbx_resp.received_resp, i);
+ "VF could not get mbx(%d,%d) resp(=%d) from PF in %d tries\n",
+ code0, code1, hdev->mbx_resp.received_resp, i);
return -EIO;
}
@@ -68,8 +68,11 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
if (!(r_code0 == code0 && r_code1 == code1 && !mbx_resp->resp_status)) {
dev_err(&hdev->pdev->dev,
- "VF could not match resp code(code0=%d,code1=%d), %d",
+ "VF could not match resp code(code0=%d,code1=%d), %d\n",
code0, code1, mbx_resp->resp_status);
+ dev_err(&hdev->pdev->dev,
+ "VF could not match resp r_code(r_code0=%d,r_code1=%d)\n",
+ r_code0, r_code1);
return -EIO;
}
@@ -95,6 +98,8 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
}
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
+ req->mbx_need_resp |= need_resp ? HCLGE_MBX_NEED_RESP_BIT :
+ ~HCLGE_MBX_NEED_RESP_BIT;
req->msg[0] = code;
req->msg[1] = subcode;
memcpy(&req->msg[2], msg_data, msg_len);
@@ -198,6 +203,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
case HCLGE_MBX_LINK_STAT_CHANGE:
case HCLGE_MBX_ASSERTING_RESET:
case HCLGE_MBX_LINK_STAT_MODE:
+ case HLCGE_MBX_PUSH_VLAN_INFO:
/* set this mbx event as pending. This is required as we
* might loose interrupt event when mbx task is busy
* handling. This shall be cleared when mbx task just
@@ -208,7 +214,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
/* we will drop the async msg if we find ARQ as full
* and continue with next message
*/
- if (hdev->arq.count >= HCLGE_MBX_MAX_ARQ_MSG_NUM) {
+ if (atomic_read(&hdev->arq.count) >=
+ HCLGE_MBX_MAX_ARQ_MSG_NUM) {
dev_warn(&hdev->pdev->dev,
"Async Q full, dropping msg(%d)\n",
req->msg[1]);
@@ -220,7 +227,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
memcpy(&msg_q[0], req->msg,
HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16));
hclge_mbx_tail_ptr_move_arq(hdev->arq);
- hdev->arq.count++;
+ atomic_inc(&hdev->arq.count);
hclgevf_mbx_task_schedule(hdev);
@@ -243,8 +250,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
{
enum hnae3_reset_type reset_type;
- u16 link_status;
- u16 *msg_q;
+ u16 link_status, state;
+ u16 *msg_q, *vlan_info;
u8 duplex;
u32 speed;
u32 tail;
@@ -272,7 +279,6 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
link_status = le16_to_cpu(msg_q[1]);
memcpy(&speed, &msg_q[2], sizeof(speed));
duplex = (u8)le16_to_cpu(msg_q[4]);
- hdev->hw.mac.media_type = (u8)le16_to_cpu(msg_q[5]);
/* update upper layer with new link link status */
hclgevf_update_link_status(hdev, link_status);
@@ -300,6 +306,12 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
hclgevf_reset_task_schedule(hdev);
break;
+ case HLCGE_MBX_PUSH_VLAN_INFO:
+ state = le16_to_cpu(msg_q[1]);
+ vlan_info = &msg_q[1];
+ hclgevf_update_port_base_vlan_info(hdev, state,
+ (u8 *)vlan_info, 8);
+ break;
default:
dev_err(&hdev->pdev->dev,
"fetched unsupported(%d) message from arq\n",
@@ -308,7 +320,7 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
}
hclge_mbx_head_ptr_move_arq(hdev->arq);
- hdev->arq.count--;
+ atomic_dec(&hdev->arq.count);
msg_q = hdev->arq.msg_q[hdev->arq.head];
}
}