aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c')
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c312
1 files changed, 311 insertions, 1 deletions
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
index bc2f87e6cb5d..5078c0c73863 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
@@ -153,7 +153,6 @@ enum hinic_mbox_tx_status {
(MBOX_MSG_ID(func_to_func_mbox) + 1) & MBOX_MSG_ID_MASK)
#define FUNC_ID_OFF_SET_8B 8
-#define FUNC_ID_OFF_SET_10B 10
/* max message counter wait to process for one function */
#define HINIC_MAX_MSG_CNT_TO_PROCESS 10
@@ -189,6 +188,37 @@ enum mbox_aeq_trig_type {
TRIGGER,
};
+static bool check_func_id(struct hinic_hwdev *hwdev, u16 src_func_idx,
+ const void *buf_in, u16 in_size, u16 offset)
+{
+ u16 func_idx;
+
+ if (in_size < offset + sizeof(func_idx)) {
+ dev_warn(&hwdev->hwif->pdev->dev,
+ "Receive mailbox msg len: %d less than %d Bytes is invalid\n",
+ in_size, offset);
+ return false;
+ }
+
+ func_idx = *((u16 *)((u8 *)buf_in + offset));
+
+ if (src_func_idx != func_idx) {
+ dev_warn(&hwdev->hwif->pdev->dev,
+ "Receive mailbox function id: 0x%x not equal to msg function id: 0x%x\n",
+ src_func_idx, func_idx);
+ return false;
+ }
+
+ return true;
+}
+
+bool hinic_mbox_check_func_id_8B(struct hinic_hwdev *hwdev, u16 func_idx,
+ void *buf_in, u16 in_size)
+{
+ return check_func_id(hwdev, func_idx, buf_in, in_size,
+ FUNC_ID_OFF_SET_8B);
+}
+
/**
* hinic_register_pf_mbox_cb - register mbox callback for pf
* @hwdev: the pointer to hw device
@@ -486,6 +516,111 @@ err_alloc_rcv_mbox_msg:
kfree(rcv_mbox_temp);
}
+static int set_vf_mbox_random_id(struct hinic_hwdev *hwdev, u16 func_id)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+ struct hinic_set_random_id rand_info = {0};
+ u16 out_size = sizeof(rand_info);
+ struct hinic_pfhwdev *pfhwdev;
+ int ret;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ rand_info.version = HINIC_CMD_VER_FUNC_ID;
+ rand_info.func_idx = func_id;
+ rand_info.vf_in_pf = func_id - hinic_glb_pf_vf_offset(hwdev->hwif);
+ rand_info.random_id = get_random_u32();
+
+ func_to_func->vf_mbx_rand_id[func_id] = rand_info.random_id;
+
+ ret = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_SET_VF_RANDOM_ID,
+ &rand_info, sizeof(rand_info),
+ &rand_info, &out_size, HINIC_MGMT_MSG_SYNC);
+ if ((rand_info.status != HINIC_MGMT_CMD_UNSUPPORTED &&
+ rand_info.status) || !out_size || ret) {
+ dev_err(&hwdev->hwif->pdev->dev, "Set VF random id failed, err: %d, status: 0x%x, out size: 0x%x\n",
+ ret, rand_info.status, out_size);
+ return -EIO;
+ }
+
+ if (rand_info.status == HINIC_MGMT_CMD_UNSUPPORTED)
+ return rand_info.status;
+
+ func_to_func->vf_mbx_old_rand_id[func_id] =
+ func_to_func->vf_mbx_rand_id[func_id];
+
+ return 0;
+}
+
+static void update_random_id_work_handler(struct work_struct *work)
+{
+ struct hinic_mbox_work *mbox_work =
+ container_of(work, struct hinic_mbox_work, work);
+ struct hinic_mbox_func_to_func *func_to_func;
+ u16 src = mbox_work->src_func_idx;
+
+ func_to_func = mbox_work->func_to_func;
+
+ if (set_vf_mbox_random_id(func_to_func->hwdev, src))
+ dev_warn(&func_to_func->hwdev->hwif->pdev->dev, "Update VF id: 0x%x random id failed\n",
+ mbox_work->src_func_idx);
+
+ kfree(mbox_work);
+}
+
+static bool check_vf_mbox_random_id(struct hinic_mbox_func_to_func *func_to_func,
+ u8 *header)
+{
+ struct hinic_hwdev *hwdev = func_to_func->hwdev;
+ struct hinic_mbox_work *mbox_work = NULL;
+ u64 mbox_header = *((u64 *)header);
+ u16 offset, src;
+ u32 random_id;
+ int vf_in_pf;
+
+ src = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX);
+
+ if (IS_PF_OR_PPF_SRC(src) || !func_to_func->support_vf_random)
+ return true;
+
+ if (!HINIC_IS_PPF(hwdev->hwif)) {
+ offset = hinic_glb_pf_vf_offset(hwdev->hwif);
+ vf_in_pf = src - offset;
+
+ if (vf_in_pf < 1 || vf_in_pf > hwdev->nic_cap.max_vf) {
+ dev_warn(&hwdev->hwif->pdev->dev,
+ "Receive vf id(0x%x) is invalid, vf id should be from 0x%x to 0x%x\n",
+ src, offset + 1,
+ hwdev->nic_cap.max_vf + offset);
+ return false;
+ }
+ }
+
+ random_id = be32_to_cpu(*(u32 *)(header + MBOX_SEG_LEN +
+ MBOX_HEADER_SZ));
+
+ if (random_id == func_to_func->vf_mbx_rand_id[src] ||
+ random_id == func_to_func->vf_mbx_old_rand_id[src])
+ return true;
+
+ dev_warn(&hwdev->hwif->pdev->dev,
+ "The mailbox random id(0x%x) of func_id(0x%x) doesn't match with pf reservation(0x%x)\n",
+ random_id, src, func_to_func->vf_mbx_rand_id[src]);
+
+ mbox_work = kzalloc(sizeof(*mbox_work), GFP_KERNEL);
+ if (!mbox_work)
+ return false;
+
+ mbox_work->func_to_func = func_to_func;
+ mbox_work->src_func_idx = src;
+
+ INIT_WORK(&mbox_work->work, update_random_id_work_handler);
+ queue_work(func_to_func->workq, &mbox_work->work);
+
+ return false;
+}
+
void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size)
{
struct hinic_mbox_func_to_func *func_to_func;
@@ -504,6 +639,9 @@ void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size)
return;
}
+ if (!check_vf_mbox_random_id(func_to_func, header))
+ return;
+
recv_mbox = (dir == HINIC_HWIF_DIRECT_SEND) ?
&func_to_func->mbox_send[src] :
&func_to_func->mbox_resp[src];
@@ -650,6 +788,7 @@ wait_for_mbox_seg_completion(struct hinic_mbox_func_to_func *func_to_func,
if (!wait_for_completion_timeout(done, jif)) {
dev_err(&hwdev->hwif->pdev->dev, "Send mailbox segment timeout\n");
dump_mox_reg(hwdev);
+ hinic_dump_aeq_info(hwdev);
return -ETIMEDOUT;
}
@@ -897,6 +1036,7 @@ int hinic_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
set_mbox_to_func_event(func_to_func, EVENT_TIMEOUT);
dev_err(&func_to_func->hwif->pdev->dev,
"Send mbox msg timeout, msg_id: %d\n", msg_info.msg_id);
+ hinic_dump_aeq_info(func_to_func->hwdev);
err = -ETIMEDOUT;
goto err_send_mbox;
}
@@ -1095,15 +1235,156 @@ static void free_mbox_wb_status(struct hinic_mbox_func_to_func *func_to_func)
send_mbox->wb_paddr);
}
+bool hinic_mbox_check_cmd_valid(struct hinic_hwdev *hwdev,
+ struct vf_cmd_check_handle *cmd_handle,
+ u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, u8 size)
+{
+ u16 src_idx = vf_id + hinic_glb_pf_vf_offset(hwdev->hwif);
+ int i;
+
+ for (i = 0; i < size; i++) {
+ if (cmd == cmd_handle[i].cmd) {
+ if (cmd_handle[i].check_cmd)
+ return cmd_handle[i].check_cmd(hwdev, src_idx,
+ buf_in, in_size);
+ else
+ return true;
+ }
+ }
+
+ dev_err(&hwdev->hwif->pdev->dev,
+ "PF Receive VF(%d) unsupported cmd(0x%x)\n",
+ vf_id + hinic_glb_pf_vf_offset(hwdev->hwif), cmd);
+
+ return false;
+}
+
+static bool hinic_cmdq_check_vf_ctxt(struct hinic_hwdev *hwdev,
+ struct hinic_cmdq_ctxt *cmdq_ctxt)
+{
+ struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info;
+ u64 curr_pg_pfn, wq_block_pfn;
+
+ if (cmdq_ctxt->ppf_idx != HINIC_HWIF_PPF_IDX(hwdev->hwif) ||
+ cmdq_ctxt->cmdq_type > HINIC_MAX_CMDQ_TYPES)
+ return false;
+
+ curr_pg_pfn = HINIC_CMDQ_CTXT_PAGE_INFO_GET
+ (ctxt_info->curr_wqe_page_pfn, CURR_WQE_PAGE_PFN);
+ wq_block_pfn = HINIC_CMDQ_CTXT_BLOCK_INFO_GET
+ (ctxt_info->wq_block_pfn, WQ_BLOCK_PFN);
+ /* VF must use 0-level CLA */
+ if (curr_pg_pfn != wq_block_pfn)
+ return false;
+
+ return true;
+}
+
+static bool check_cmdq_ctxt(struct hinic_hwdev *hwdev, u16 func_idx,
+ void *buf_in, u16 in_size)
+{
+ if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size))
+ return false;
+
+ return hinic_cmdq_check_vf_ctxt(hwdev, buf_in);
+}
+
+#define HW_CTX_QPS_VALID(hw_ctxt) \
+ ((hw_ctxt)->rq_depth >= HINIC_QUEUE_MIN_DEPTH && \
+ (hw_ctxt)->rq_depth <= HINIC_QUEUE_MAX_DEPTH && \
+ (hw_ctxt)->sq_depth >= HINIC_QUEUE_MIN_DEPTH && \
+ (hw_ctxt)->sq_depth <= HINIC_QUEUE_MAX_DEPTH && \
+ (hw_ctxt)->rx_buf_sz_idx <= HINIC_MAX_RX_BUFFER_SIZE)
+
+static bool hw_ctxt_qps_param_valid(struct hinic_cmd_hw_ioctxt *hw_ctxt)
+{
+ if (HW_CTX_QPS_VALID(hw_ctxt))
+ return true;
+
+ if (!hw_ctxt->rq_depth && !hw_ctxt->sq_depth &&
+ !hw_ctxt->rx_buf_sz_idx)
+ return true;
+
+ return false;
+}
+
+static bool check_hwctxt(struct hinic_hwdev *hwdev, u16 func_idx,
+ void *buf_in, u16 in_size)
+{
+ struct hinic_cmd_hw_ioctxt *hw_ctxt = buf_in;
+
+ if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size))
+ return false;
+
+ if (hw_ctxt->ppf_idx != HINIC_HWIF_PPF_IDX(hwdev->hwif))
+ return false;
+
+ if (hw_ctxt->set_cmdq_depth) {
+ if (hw_ctxt->cmdq_depth >= HINIC_QUEUE_MIN_DEPTH &&
+ hw_ctxt->cmdq_depth <= HINIC_QUEUE_MAX_DEPTH)
+ return true;
+
+ return false;
+ }
+
+ return hw_ctxt_qps_param_valid(hw_ctxt);
+}
+
+static bool check_set_wq_page_size(struct hinic_hwdev *hwdev, u16 func_idx,
+ void *buf_in, u16 in_size)
+{
+ struct hinic_wq_page_size *page_size_info = buf_in;
+
+ if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size))
+ return false;
+
+ if (page_size_info->ppf_idx != HINIC_HWIF_PPF_IDX(hwdev->hwif))
+ return false;
+
+ if (((1U << page_size_info->page_size) * SZ_4K) !=
+ HINIC_DEFAULT_WQ_PAGE_SIZE)
+ return false;
+
+ return true;
+}
+
+static struct vf_cmd_check_handle hw_cmd_support_vf[] = {
+ {HINIC_COMM_CMD_START_FLR, hinic_mbox_check_func_id_8B},
+ {HINIC_COMM_CMD_DMA_ATTR_SET, hinic_mbox_check_func_id_8B},
+ {HINIC_COMM_CMD_CMDQ_CTXT_SET, check_cmdq_ctxt},
+ {HINIC_COMM_CMD_CMDQ_CTXT_GET, check_cmdq_ctxt},
+ {HINIC_COMM_CMD_HWCTXT_SET, check_hwctxt},
+ {HINIC_COMM_CMD_HWCTXT_GET, check_hwctxt},
+ {HINIC_COMM_CMD_SQ_HI_CI_SET, hinic_mbox_check_func_id_8B},
+ {HINIC_COMM_CMD_RES_STATE_SET, hinic_mbox_check_func_id_8B},
+ {HINIC_COMM_CMD_IO_RES_CLEAR, hinic_mbox_check_func_id_8B},
+ {HINIC_COMM_CMD_CEQ_CTRL_REG_WR_BY_UP, hinic_mbox_check_func_id_8B},
+ {HINIC_COMM_CMD_MSI_CTRL_REG_WR_BY_UP, hinic_mbox_check_func_id_8B},
+ {HINIC_COMM_CMD_MSI_CTRL_REG_RD_BY_UP, hinic_mbox_check_func_id_8B},
+ {HINIC_COMM_CMD_L2NIC_RESET, hinic_mbox_check_func_id_8B},
+ {HINIC_COMM_CMD_PAGESIZE_SET, check_set_wq_page_size},
+};
+
static int comm_pf_mbox_handler(void *handle, u16 vf_id, u8 cmd, void *buf_in,
u16 in_size, void *buf_out, u16 *out_size)
{
+ u8 size = ARRAY_SIZE(hw_cmd_support_vf);
struct hinic_hwdev *hwdev = handle;
struct hinic_pfhwdev *pfhwdev;
int err = 0;
pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+ if (!hinic_mbox_check_cmd_valid(handle, hw_cmd_support_vf, vf_id, cmd,
+ buf_in, in_size, size)) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "PF Receive VF: %d common cmd: 0x%x or mbox len: 0x%x is invalid\n",
+ vf_id + hinic_glb_pf_vf_offset(hwdev->hwif), cmd,
+ in_size);
+ return HINIC_MBOX_VF_CMD_ERROR;
+ }
+
if (cmd == HINIC_COMM_CMD_START_FLR) {
*out_size = 0;
} else {
@@ -1208,3 +1489,32 @@ void hinic_func_to_func_free(struct hinic_hwdev *hwdev)
kfree(func_to_func);
}
+
+int hinic_vf_mbox_random_id_init(struct hinic_hwdev *hwdev)
+{
+ u16 vf_offset;
+ u8 vf_in_pf;
+ int err = 0;
+
+ if (HINIC_IS_VF(hwdev->hwif))
+ return 0;
+
+ vf_offset = hinic_glb_pf_vf_offset(hwdev->hwif);
+
+ for (vf_in_pf = 1; vf_in_pf <= hwdev->nic_cap.max_vf; vf_in_pf++) {
+ err = set_vf_mbox_random_id(hwdev, vf_offset + vf_in_pf);
+ if (err)
+ break;
+ }
+
+ if (err == HINIC_MGMT_CMD_UNSUPPORTED) {
+ hwdev->func_to_func->support_vf_random = false;
+ err = 0;
+ dev_warn(&hwdev->hwif->pdev->dev, "Mgmt is unsupported to set VF%d random id\n",
+ vf_in_pf - 1);
+ } else if (!err) {
+ hwdev->func_to_func->support_vf_random = true;
+ }
+
+ return err;
+}