diff options
Diffstat (limited to 'drivers/infiniband/hw/hns')
23 files changed, 1639 insertions, 7421 deletions
diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig index 18d10ebf900b..ab3fbba70789 100644 --- a/drivers/infiniband/hw/hns/Kconfig +++ b/drivers/infiniband/hw/hns/Kconfig @@ -5,22 +5,9 @@ config INFINIBAND_HNS depends on ARM64 || (COMPILE_TEST && 64BIT) depends on (HNS_DSAF && HNS_ENET) || HNS3 help - This is a RoCE/RDMA driver for the Hisilicon RoCE engine. The engine - is used in Hisilicon Hip06 and more further ICT SoC based on - platform device. + This is a RoCE/RDMA driver for the Hisilicon RoCE engine. - To compile HIP06 or HIP08 driver as module, choose M here. - -config INFINIBAND_HNS_HIP06 - bool "Hisilicon Hip06 Family RoCE support" - depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET - depends on INFINIBAND_HNS=m || (HNS_DSAF=y && HNS_ENET=y) - help - RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and - Hip07 SoC. These RoCE engines are platform devices. - - To compile this driver, choose Y here: if INFINIBAND_HNS is m, this - module will be called hns-roce-hw-v1 + To compile HIP08 driver as module, choose M here. config INFINIBAND_HNS_HIP08 bool "Hisilicon Hip08 Family RoCE support" diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile index e105945b94a1..a7d259238305 100644 --- a/drivers/infiniband/hw/hns/Makefile +++ b/drivers/infiniband/hw/hns/Makefile @@ -9,12 +9,7 @@ hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \ hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \ hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o -ifdef CONFIG_INFINIBAND_HNS_HIP06 -hns-roce-hw-v1-objs := hns_roce_hw_v1.o $(hns-roce-objs) -obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v1.o -endif - ifdef CONFIG_INFINIBAND_HNS_HIP08 -hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_v2_dfx.o $(hns-roce-objs) +hns-roce-hw-v2-objs := hns_roce_hw_v2.o $(hns-roce-objs) obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v2.o endif diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c index cc258edec331..480c062dd04f 100644 --- a/drivers/infiniband/hw/hns/hns_roce_ah.c +++ b/drivers/infiniband/hw/hns/hns_roce_ah.c @@ -30,7 +30,6 @@ * SOFTWARE. */ -#include <linux/platform_device.h> #include <linux/pci.h> #include <rdma/ib_addr.h> #include <rdma/ib_cache.h> @@ -42,9 +41,8 @@ static inline u16 get_ah_udp_sport(const struct rdma_ah_attr *ah_attr) u16 sport; if (!fl) - sport = get_random_u32() % - (IB_ROCE_UDP_ENCAP_VALID_PORT_MAX + 1 - - IB_ROCE_UDP_ENCAP_VALID_PORT_MIN) + + sport = prandom_u32_max(IB_ROCE_UDP_ENCAP_VALID_PORT_MAX + 1 - + IB_ROCE_UDP_ENCAP_VALID_PORT_MIN) + IB_ROCE_UDP_ENCAP_VALID_PORT_MIN; else sport = rdma_flow_label_to_udp_sport(fl); @@ -61,7 +59,7 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, struct hns_roce_ah *ah = to_hr_ah(ibah); int ret = 0; - if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 && udata) + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && udata) return -EOPNOTSUPP; ah->av.port = rdma_ah_get_port_num(ah_attr); @@ -80,7 +78,7 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN); /* HIP08 needs to record vlan info in Address Vector */ - if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08) { + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { ret = rdma_read_gid_l2_fields(ah_attr->grh.sgid_attr, &ah->av.vlan_id, NULL); if (ret) diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c index d4fa0fd52294..11a78ceae568 100644 --- a/drivers/infiniband/hw/hns/hns_roce_alloc.c +++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c @@ -31,10 +31,9 @@ * SOFTWARE. */ -#include <linux/platform_device.h> #include <linux/vmalloc.h> -#include "hns_roce_device.h" #include <rdma/ib_umem.h> +#include "hns_roce_device.h" void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf) { diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c index 84f3f2b5f097..864413607571 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cmd.c +++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c @@ -31,7 +31,6 @@ */ #include <linux/dmapool.h> -#include <linux/platform_device.h> #include "hns_roce_common.h" #include "hns_roce_device.h" #include "hns_roce_cmd.h" @@ -39,45 +38,36 @@ #define CMD_POLL_TOKEN 0xffff #define CMD_MAX_NUM 32 -static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, u64 in_param, - u64 out_param, u32 in_modifier, - u8 op_modifier, u16 op, u16 token, - int event) +static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, + struct hns_roce_mbox_msg *mbox_msg) { - return hr_dev->hw->post_mbox(hr_dev, in_param, out_param, in_modifier, - op_modifier, op, token, event); + return hr_dev->hw->post_mbox(hr_dev, mbox_msg); } /* this should be called with "poll_sem" */ -static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param, - u64 out_param, unsigned long in_modifier, - u8 op_modifier, u16 op, - unsigned int timeout) +static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, + struct hns_roce_mbox_msg *mbox_msg) { int ret; - ret = hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param, - in_modifier, op_modifier, op, - CMD_POLL_TOKEN, 0); + ret = hns_roce_cmd_mbox_post_hw(hr_dev, mbox_msg); if (ret) { dev_err_ratelimited(hr_dev->dev, - "failed to post mailbox %x in poll mode, ret = %d.\n", - op, ret); + "failed to post mailbox 0x%x in poll mode, ret = %d.\n", + mbox_msg->cmd, ret); return ret; } - return hr_dev->hw->poll_mbox_done(hr_dev, timeout); + return hr_dev->hw->poll_mbox_done(hr_dev); } -static int hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param, - u64 out_param, unsigned long in_modifier, - u8 op_modifier, u16 op, unsigned int timeout) +static int hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, + struct hns_roce_mbox_msg *mbox_msg) { int ret; down(&hr_dev->cmd.poll_sem); - ret = __hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param, in_modifier, - op_modifier, op, timeout); + ret = __hns_roce_cmd_mbox_poll(hr_dev, mbox_msg); up(&hr_dev->cmd.poll_sem); return ret; @@ -91,7 +81,7 @@ void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status, if (unlikely(token != context->token)) { dev_err_ratelimited(hr_dev->dev, - "[cmd] invalid ae token %x,context token is %x!\n", + "[cmd] invalid ae token 0x%x, context token is 0x%x.\n", token, context->token); return; } @@ -101,10 +91,8 @@ void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status, complete(&context->done); } -static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param, - u64 out_param, unsigned long in_modifier, - u8 op_modifier, u16 op, - unsigned int timeout) +static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, + struct hns_roce_mbox_msg *mbox_msg) { struct hns_roce_cmdq *cmd = &hr_dev->cmd; struct hns_roce_cmd_context *context; @@ -125,66 +113,70 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param, reinit_completion(&context->done); - ret = hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param, - in_modifier, op_modifier, op, - context->token, 1); + mbox_msg->token = context->token; + ret = hns_roce_cmd_mbox_post_hw(hr_dev, mbox_msg); if (ret) { dev_err_ratelimited(dev, - "failed to post mailbox %x in event mode, ret = %d.\n", - op, ret); + "failed to post mailbox 0x%x in event mode, ret = %d.\n", + mbox_msg->cmd, ret); goto out; } if (!wait_for_completion_timeout(&context->done, - msecs_to_jiffies(timeout))) { - dev_err_ratelimited(dev, "[cmd] token %x mailbox %x timeout.\n", - context->token, op); + msecs_to_jiffies(HNS_ROCE_CMD_TIMEOUT_MSECS))) { + dev_err_ratelimited(dev, "[cmd] token 0x%x mailbox 0x%x timeout.\n", + context->token, mbox_msg->cmd); ret = -EBUSY; goto out; } ret = context->result; if (ret) - dev_err_ratelimited(dev, "[cmd] token %x mailbox %x error %d\n", - context->token, op, ret); + dev_err_ratelimited(dev, "[cmd] token 0x%x mailbox 0x%x error %d.\n", + context->token, mbox_msg->cmd, ret); out: context->busy = 0; return ret; } -static int hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param, - u64 out_param, unsigned long in_modifier, - u8 op_modifier, u16 op, unsigned int timeout) +static int hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, + struct hns_roce_mbox_msg *mbox_msg) { int ret; down(&hr_dev->cmd.event_sem); - ret = __hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param, in_modifier, - op_modifier, op, timeout); + ret = __hns_roce_cmd_mbox_wait(hr_dev, mbox_msg); up(&hr_dev->cmd.event_sem); return ret; } int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param, - unsigned long in_modifier, u8 op_modifier, u16 op, - unsigned int timeout) + u8 cmd, unsigned long tag) { + struct hns_roce_mbox_msg mbox_msg = {}; bool is_busy; if (hr_dev->hw->chk_mbox_avail) if (!hr_dev->hw->chk_mbox_avail(hr_dev, &is_busy)) return is_busy ? -EBUSY : 0; - if (hr_dev->cmd.use_events) - return hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param, - in_modifier, op_modifier, op, - timeout); - else - return hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param, - in_modifier, op_modifier, op, - timeout); + mbox_msg.in_param = in_param; + mbox_msg.out_param = out_param; + mbox_msg.cmd = cmd; + mbox_msg.tag = tag; + + if (hr_dev->cmd.use_events) { + mbox_msg.event_en = 1; + + return hns_roce_cmd_mbox_wait(hr_dev, &mbox_msg); + } else { + mbox_msg.event_en = 0; + mbox_msg.token = CMD_POLL_TOKEN; + + return hns_roce_cmd_mbox_poll(hr_dev, &mbox_msg); + } } int hns_roce_cmd_init(struct hns_roce_dev *hr_dev) @@ -270,3 +262,15 @@ void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev, dma_pool_free(hr_dev->cmd.pool, mailbox->buf, mailbox->dma); kfree(mailbox); } + +int hns_roce_create_hw_ctx(struct hns_roce_dev *dev, + struct hns_roce_cmd_mailbox *mailbox, + u8 cmd, unsigned long idx) +{ + return hns_roce_cmd_mbox(dev, mailbox->dma, 0, cmd, idx); +} + +int hns_roce_destroy_hw_ctx(struct hns_roce_dev *dev, u8 cmd, unsigned long idx) +{ + return hns_roce_cmd_mbox(dev, 0, 0, cmd, idx); +} diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.h b/drivers/infiniband/hw/hns/hns_roce_cmd.h index 8025e7f657fa..052a3d60905a 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cmd.h +++ b/drivers/infiniband/hw/hns/hns_roce_cmd.h @@ -140,12 +140,16 @@ enum { }; int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param, - unsigned long in_modifier, u8 op_modifier, u16 op, - unsigned int timeout); + u8 cmd, unsigned long tag); struct hns_roce_cmd_mailbox * hns_roce_alloc_cmd_mailbox(struct hns_roce_dev *hr_dev); void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev, struct hns_roce_cmd_mailbox *mailbox); +int hns_roce_create_hw_ctx(struct hns_roce_dev *dev, + struct hns_roce_cmd_mailbox *mailbox, + u8 cmd, unsigned long idx); +int hns_roce_destroy_hw_ctx(struct hns_roce_dev *dev, u8 cmd, + unsigned long idx); #endif /* _HNS_ROCE_CMD_H */ diff --git a/drivers/infiniband/hw/hns/hns_roce_common.h b/drivers/infiniband/hw/hns/hns_roce_common.h index b73e55de83ac..465d1f914b6c 100644 --- a/drivers/infiniband/hw/hns/hns_roce_common.h +++ b/drivers/infiniband/hw/hns/hns_roce_common.h @@ -104,208 +104,6 @@ #define hr_reg_read(ptr, field) _hr_reg_read(ptr, field) -#define ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S 3 -#define ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S 4 - -#define ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S 5 - -#define ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S 6 - -#define ROCEE_GLB_CFG_ROCEE_PORT_ST_S 10 -#define ROCEE_GLB_CFG_ROCEE_PORT_ST_M \ - (((1UL << 6) - 1) << ROCEE_GLB_CFG_ROCEE_PORT_ST_S) - -#define ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S 16 - -#define ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S 0 -#define ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M \ - (((1UL << 24) - 1) << ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S) - -#define ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S 24 -#define ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M \ - (((1UL << 4) - 1) << ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S) - -#define ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S 0 -#define ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M \ - (((1UL << 24) - 1) << ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S) - -#define ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S 24 -#define ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M \ - (((1UL << 4) - 1) << ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S) - -#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S 0 -#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M \ - (((1UL << 16) - 1) << ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S) - -#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S 16 -#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M \ - (((1UL << 16) - 1) << ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S) - -#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S 0 -#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M \ - (((1UL << 16) - 1) << ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S) - -#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S 16 -#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M \ - (((1UL << 16) - 1) << ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S) - -#define ROCEE_RAQ_WL_ROCEE_RAQ_WL_S 0 -#define ROCEE_RAQ_WL_ROCEE_RAQ_WL_M \ - (((1UL << 8) - 1) << ROCEE_RAQ_WL_ROCEE_RAQ_WL_S) - -#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S 0 -#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M \ - (((1UL << 15) - 1) << \ - ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S) - -#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S 16 -#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M \ - (((1UL << 4) - 1) << \ - ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S) - -#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S 20 - -#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE 21 - -#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S 0 -#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M \ - (((1UL << 5) - 1) << ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S) - -#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S 5 -#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M \ - (((1UL << 5) - 1) << ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S) - -#define ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S 0 -#define ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M \ - (((1UL << 5) - 1) << ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S) - -#define ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S 5 -#define ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M \ - (((1UL << 5) - 1) << ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S) - -#define ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S 0 -#define ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M \ - (((1UL << 5) - 1) << ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S) - -#define ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S 8 -#define ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M \ - (((1UL << 5) - 1) << ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S) - -#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S 0 -#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M \ - (((1UL << 19) - 1) << ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S) - -#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_S 19 - -#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S 20 -#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M \ - (((1UL << 2) - 1) << ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S) - -#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S 22 -#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M \ - (((1UL << 5) - 1) << ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S) - -#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S 31 - -#define ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S 0 -#define ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M \ - (((1UL << 3) - 1) << ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S) - -#define ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S 0 -#define ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M \ - (((1UL << 15) - 1) << ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S) - -#define ROCEE_MB6_ROCEE_MB_CMD_S 0 -#define ROCEE_MB6_ROCEE_MB_CMD_M \ - (((1UL << 8) - 1) << ROCEE_MB6_ROCEE_MB_CMD_S) - -#define ROCEE_MB6_ROCEE_MB_CMD_MDF_S 8 -#define ROCEE_MB6_ROCEE_MB_CMD_MDF_M \ - (((1UL << 4) - 1) << ROCEE_MB6_ROCEE_MB_CMD_MDF_S) - -#define ROCEE_MB6_ROCEE_MB_EVENT_S 14 - -#define ROCEE_MB6_ROCEE_MB_HW_RUN_S 15 - -#define ROCEE_MB6_ROCEE_MB_TOKEN_S 16 -#define ROCEE_MB6_ROCEE_MB_TOKEN_M \ - (((1UL << 16) - 1) << ROCEE_MB6_ROCEE_MB_TOKEN_S) - -#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S 0 -#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M \ - (((1UL << 24) - 1) << ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S) - -#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S 24 -#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M \ - (((1UL << 4) - 1) << ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S) - -#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S 28 -#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M \ - (((1UL << 3) - 1) << ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S) - -#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S 31 - -#define ROCEE_SMAC_H_ROCEE_SMAC_H_S 0 -#define ROCEE_SMAC_H_ROCEE_SMAC_H_M \ - (((1UL << 16) - 1) << ROCEE_SMAC_H_ROCEE_SMAC_H_S) - -#define ROCEE_SMAC_H_ROCEE_PORT_MTU_S 16 -#define ROCEE_SMAC_H_ROCEE_PORT_MTU_M \ - (((1UL << 4) - 1) << ROCEE_SMAC_H_ROCEE_PORT_MTU_S) - -#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S 0 -#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M \ - (((1UL << 2) - 1) << ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S) - -#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S 8 -#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M \ - (((1UL << 4) - 1) << ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S) - -#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S 17 - -#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S 0 -#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M \ - (((1UL << 5) - 1) << ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S) - -#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S 16 -#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M \ - (((1UL << 16) - 1) << ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S) - -#define ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S 0 -#define ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M \ - (((1UL << 16) - 1) << ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S) - -#define ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S 16 -#define ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S 1 -#define ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S 0 - -#define ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S 0 -#define ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S 1 - -#define ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S 0 - -#define ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S 0 -#define ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_M \ - (((1UL << 28) - 1) << ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S) - -#define ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S 0 -#define ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M \ - (((1UL << 28) - 1) << ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) - -#define ROCEE_SDB_INV_CNT_SDB_INV_CNT_S 0 -#define ROCEE_SDB_INV_CNT_SDB_INV_CNT_M \ - (((1UL << 16) - 1) << ROCEE_SDB_INV_CNT_SDB_INV_CNT_S) - -#define ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S 0 -#define ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M \ - (((1UL << 16) - 1) << ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S) - -#define ROCEE_SDB_CNT_CMP_BITS 16 - -#define ROCEE_TSP_BP_ST_QH_FIFO_ENTRY_S 20 - -#define ROCEE_CNT_CLR_CE_CNT_CLR_CE_S 0 - /*************ROCEE_REG DEFINITION****************/ #define ROCEE_VENDOR_ID_REG 0x0 #define ROCEE_VENDOR_PART_ID_REG 0x4 diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c index d763f097599f..736dc2f993b4 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c @@ -30,7 +30,6 @@ * SOFTWARE. */ -#include <linux/platform_device.h> #include <rdma/ib_umem.h> #include <rdma/uverbs_ioctl.h> #include "hns_roce_device.h" @@ -101,12 +100,39 @@ static void free_cqn(struct hns_roce_dev *hr_dev, unsigned long cqn) mutex_unlock(&cq_table->bank_mutex); } +static int hns_roce_create_cqc(struct hns_roce_dev *hr_dev, + struct hns_roce_cq *hr_cq, + u64 *mtts, dma_addr_t dma_handle) +{ + struct ib_device *ibdev = &hr_dev->ib_dev; + struct hns_roce_cmd_mailbox *mailbox; + int ret; + + mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); + if (IS_ERR(mailbox)) { + ibdev_err(ibdev, "failed to alloc mailbox for CQC.\n"); + return PTR_ERR(mailbox); + } + + hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle); + + ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_CQC, + hr_cq->cqn); + if (ret) + ibdev_err(ibdev, + "failed to send create cmd for CQ(0x%lx), ret = %d.\n", + hr_cq->cqn, ret); + + hns_roce_free_cmd_mailbox(hr_dev, mailbox); + + return ret; +} + static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) { struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; struct ib_device *ibdev = &hr_dev->ib_dev; - struct hns_roce_cmd_mailbox *mailbox; - u64 mtts[MTT_MIN_COUNT] = { 0 }; + u64 mtts[MTT_MIN_COUNT] = {}; dma_addr_t dma_handle; int ret; @@ -122,7 +148,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) if (ret) { ibdev_err(ibdev, "failed to get CQ(0x%lx) context, ret = %d.\n", hr_cq->cqn, ret); - goto err_out; + return ret; } ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL)); @@ -131,41 +157,17 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) goto err_put; } - /* Allocate mailbox memory */ - mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); - if (IS_ERR(mailbox)) { - ret = PTR_ERR(mailbox); - goto err_xa; - } - - hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle); - - /* Send mailbox to hw */ - ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 0, - HNS_ROCE_CMD_CREATE_CQC, HNS_ROCE_CMD_TIMEOUT_MSECS); - hns_roce_free_cmd_mailbox(hr_dev, mailbox); - if (ret) { - ibdev_err(ibdev, - "failed to send create cmd for CQ(0x%lx), ret = %d.\n", - hr_cq->cqn, ret); + ret = hns_roce_create_cqc(hr_dev, hr_cq, mtts, dma_handle); + if (ret) goto err_xa; - } - - hr_cq->cons_index = 0; - hr_cq->arm_sn = 1; - - refcount_set(&hr_cq->refcount, 1); - init_completion(&hr_cq->free); return 0; err_xa: xa_erase(&cq_table->array, hr_cq->cqn); - err_put: hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn); -err_out: return ret; } @@ -175,9 +177,8 @@ static void free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) struct device *dev = hr_dev->dev; int ret; - ret = hns_roce_cmd_mbox(hr_dev, 0, 0, hr_cq->cqn, 1, - HNS_ROCE_CMD_DESTROY_CQC, - HNS_ROCE_CMD_TIMEOUT_MSECS); + ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_CQC, + hr_cq->cqn); if (ret) dev_err(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n", ret, hr_cq->cqn); @@ -406,15 +407,6 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr, goto err_cqn; } - /* - * For the QP created by kernel space, tptr value should be initialized - * to zero; For the QP created by user space, it will cause synchronous - * problems if tptr is set to zero here, so we initialize it in user - * space. - */ - if (!udata && hr_cq->tptr_addr) - *hr_cq->tptr_addr = 0; - if (udata) { resp.cqn = hr_cq->cqn; ret = ib_copy_to_udata(udata, &resp, @@ -423,6 +415,11 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr, goto err_cqc; } + hr_cq->cons_index = 0; + hr_cq->arm_sn = 1; + refcount_set(&hr_cq->refcount, 1); + init_completion(&hr_cq->free); + return 0; err_cqc: @@ -441,9 +438,6 @@ int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device); struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); - if (hr_dev->hw->destroy_cq) - hr_dev->hw->destroy_cq(ib_cq, udata); - free_cqc(hr_dev, hr_cq); free_cqn(hr_dev, hr_cq->cqn); free_cq_db(hr_dev, hr_cq, udata); @@ -460,7 +454,7 @@ void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn) hr_cq = xa_load(&hr_dev->cq_table.array, cqn & (hr_dev->caps.num_cqs - 1)); if (!hr_cq) { - dev_warn(hr_dev->dev, "Completion event for bogus CQ 0x%06x\n", + dev_warn(hr_dev->dev, "completion event for bogus CQ 0x%06x\n", cqn); return; } @@ -481,14 +475,14 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type) hr_cq = xa_load(&hr_dev->cq_table.array, cqn & (hr_dev->caps.num_cqs - 1)); if (!hr_cq) { - dev_warn(dev, "Async event for bogus CQ 0x%06x\n", cqn); + dev_warn(dev, "async event for bogus CQ 0x%06x\n", cqn); return; } if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID && event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR && event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) { - dev_err(dev, "Unexpected event type 0x%x on CQ 0x%06x\n", + dev_err(dev, "unexpected event type 0x%x on CQ 0x%06x\n", event_type, cqn); return; } diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c index 751470c7a2ce..5c4c0480832b 100644 --- a/drivers/infiniband/hw/hns/hns_roce_db.c +++ b/drivers/infiniband/hw/hns/hns_roce_db.c @@ -4,7 +4,6 @@ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. */ -#include <linux/platform_device.h> #include <rdma/ib_umem.h> #include "hns_roce_device.h" diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 43e17d61cb63..723e55a7de8d 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -36,36 +36,18 @@ #include <rdma/ib_verbs.h> #include <rdma/hns-abi.h> -#define DRV_NAME "hns_roce" - #define PCI_REVISION_ID_HIP08 0x21 #define PCI_REVISION_ID_HIP09 0x30 -#define HNS_ROCE_HW_VER1 ('h' << 24 | 'i' << 16 | '0' << 8 | '6') - #define HNS_ROCE_MAX_MSG_LEN 0x80000000 #define HNS_ROCE_IB_MIN_SQ_STRIDE 6 #define BA_BYTE_LEN 8 -/* Hardware specification only for v1 engine */ #define HNS_ROCE_MIN_CQE_NUM 0x40 -#define HNS_ROCE_MIN_WQE_NUM 0x20 #define HNS_ROCE_MIN_SRQ_WQE_NUM 1 -/* Hardware specification only for v1 engine */ -#define HNS_ROCE_MAX_INNER_MTPT_NUM 0x7 -#define HNS_ROCE_MAX_MTPT_PBL_NUM 0x100000 - -#define HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS 20 -#define HNS_ROCE_MAX_FREE_CQ_WAIT_CNT \ - (5000 / HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS) -#define HNS_ROCE_CQE_WCMD_EMPTY_BIT 0x2 -#define HNS_ROCE_MIN_CQE_CNT 16 - -#define HNS_ROCE_RESERVED_SGE 1 - #define HNS_ROCE_MAX_IRQ_NUM 128 #define HNS_ROCE_SGE_IN_WQE 2 @@ -102,18 +84,12 @@ #define HNS_ROCE_FRMR_MAX_PA 512 #define PKEY_ID 0xffff -#define GUID_LEN 8 #define NODE_DESC_SIZE 64 #define DB_REG_OFFSET 0x1000 /* Configure to HW for PAGE_SIZE larger than 4KB */ #define PG_SHIFT_OFFSET (PAGE_SHIFT - 12) -#define PAGES_SHIFT_8 8 -#define PAGES_SHIFT_16 16 -#define PAGES_SHIFT_24 24 -#define PAGES_SHIFT_32 32 - #define HNS_ROCE_IDX_QUE_ENTRY_SZ 4 #define SRQ_DB_REG 0x230 @@ -122,11 +98,6 @@ #define CQ_BANKID_SHIFT 2 -/* The chip implementation of the consumer index is calculated - * according to twice the actual EQ depth - */ -#define EQ_DEPTH_COEFF 2 - enum { SERV_TYPE_RC, SERV_TYPE_UC, @@ -135,16 +106,6 @@ enum { SERV_TYPE_XRC = 5, }; -enum hns_roce_qp_state { - HNS_ROCE_QP_STATE_RST, - HNS_ROCE_QP_STATE_INIT, - HNS_ROCE_QP_STATE_RTR, - HNS_ROCE_QP_STATE_RTS, - HNS_ROCE_QP_STATE_SQD, - HNS_ROCE_QP_STATE_ERR, - HNS_ROCE_QP_NUM_STATE, -}; - enum hns_roce_event { HNS_ROCE_EVENT_TYPE_PATH_MIG = 0x01, HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED = 0x02, @@ -168,8 +129,6 @@ enum hns_roce_event { HNS_ROCE_EVENT_TYPE_INVALID_XRCETH = 0x17, }; -#define HNS_ROCE_CAP_FLAGS_EX_SHIFT 12 - enum { HNS_ROCE_CAP_FLAG_REREG_MR = BIT(0), HNS_ROCE_CAP_FLAG_ROCE_V1_V2 = BIT(1), @@ -182,6 +141,7 @@ enum { HNS_ROCE_CAP_FLAG_FRMR = BIT(8), HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL = BIT(9), HNS_ROCE_CAP_FLAG_ATOMIC = BIT(10), + HNS_ROCE_CAP_FLAG_DIRECT_WQE = BIT(12), HNS_ROCE_CAP_FLAG_SDI_MODE = BIT(14), HNS_ROCE_CAP_FLAG_STASH = BIT(17), }; @@ -227,7 +187,7 @@ struct hns_roce_uar { enum hns_roce_mmap_type { HNS_ROCE_MMAP_TYPE_DB = 1, - HNS_ROCE_MMAP_TYPE_TPTR, + HNS_ROCE_MMAP_TYPE_DWQE, }; struct hns_user_mmap_entry { @@ -242,7 +202,6 @@ struct hns_roce_ucontext { struct list_head page_list; struct mutex page_mutex; struct hns_user_mmap_entry *db_mmap_entry; - struct hns_user_mmap_entry *tptr_mmap_entry; }; struct hns_roce_pd { @@ -281,7 +240,6 @@ struct hns_roce_hem_table { /* Single obj size */ unsigned long obj_size; unsigned long table_chunk_size; - int lowmem; struct mutex mutex; struct hns_roce_hem **hem; u64 **bt_l1; @@ -345,19 +303,16 @@ struct hns_roce_mw { u32 pbl_buf_pg_sz; }; -/* Only support 4K page size for mr register */ -#define MR_SIZE_4K 0 - struct hns_roce_mr { struct ib_mr ibmr; u64 iova; /* MR's virtual original addr */ u64 size; /* Address range of MR */ u32 key; /* Key of MR */ u32 pd; /* PD num of MR */ - u32 access; /* Access permission of MR */ + u32 access; /* Access permission of MR */ int enabled; /* MR's active status */ - int type; /* MR's register type */ - u32 pbl_hop_num; /* multi-hop number */ + int type; /* MR's register type */ + u32 pbl_hop_num; /* multi-hop number */ struct hns_roce_mtr pbl_mtr; u32 npages; dma_addr_t *page_list; @@ -374,17 +329,17 @@ struct hns_roce_wq { u32 wqe_cnt; /* WQE num */ u32 max_gs; u32 rsv_sge; - int offset; - int wqe_shift; /* WQE size */ + u32 offset; + u32 wqe_shift; /* WQE size */ u32 head; u32 tail; void __iomem *db_reg; }; struct hns_roce_sge { - unsigned int sge_cnt; /* SGE num */ - int offset; - int sge_shift; /* SGE size */ + unsigned int sge_cnt; /* SGE num */ + u32 offset; + u32 sge_shift; /* SGE size */ }; struct hns_roce_buf_list { @@ -453,7 +408,6 @@ struct hns_roce_cq { u32 cons_index; u32 *set_ci_db; void __iomem *db_reg; - u16 *tptr_addr; int arm_sn; int cqe_size; unsigned long cqn; @@ -468,7 +422,7 @@ struct hns_roce_cq { struct hns_roce_idx_que { struct hns_roce_mtr mtr; - int entry_shift; + u32 entry_shift; unsigned long *bitmap; u32 head; u32 tail; @@ -480,7 +434,7 @@ struct hns_roce_srq { u32 wqe_cnt; int max_gs; u32 rsv_sge; - int wqe_shift; + u32 wqe_shift; u32 cqn; u32 xrcdn; void __iomem *db_reg; @@ -539,10 +493,6 @@ struct hns_roce_srq_table { struct hns_roce_hem_table table; }; -struct hns_roce_raq_table { - struct hns_roce_buf_list *e_raq_buf; -}; - struct hns_roce_av { u8 port; u8 gid_index; @@ -572,6 +522,11 @@ struct hns_roce_cmd_context { u16 busy; }; +enum hns_roce_cmdq_state { + HNS_ROCE_CMDQ_STATE_NORMAL, + HNS_ROCE_CMDQ_STATE_FATAL_ERR, +}; + struct hns_roce_cmdq { struct dma_pool *pool; struct semaphore poll_sem; @@ -591,6 +546,7 @@ struct hns_roce_cmdq { * close device, switch into poll mode(non event mode) */ u8 use_events; + enum hns_roce_cmdq_state state; }; struct hns_roce_cmd_mailbox { @@ -598,6 +554,15 @@ struct hns_roce_cmd_mailbox { dma_addr_t dma; }; +struct hns_roce_mbox_msg { + u64 in_param; + u64 out_param; + u8 cmd; + u32 tag; + u16 token; + u8 event_en; +}; + struct hns_roce_dev; struct hns_roce_rinl_sge { @@ -627,17 +592,12 @@ struct hns_roce_work { u32 queue_num; }; -enum { - HNS_ROCE_QP_CAP_DIRECT_WQE = BIT(5), -}; - struct hns_roce_qp { struct ib_qp ibqp; struct hns_roce_wq rq; struct hns_roce_db rdb; struct hns_roce_db sdb; unsigned long en_flags; - u32 doorbell_qpn; enum ib_sig_type sq_signal_bits; struct hns_roce_wq sq; @@ -650,9 +610,7 @@ struct hns_roce_qp { u8 sl; u8 resp_depth; u8 state; - u32 access_flags; u32 atomic_rd_en; - u32 pkey_index; u32 qkey; void (*event)(struct hns_roce_qp *qp, enum hns_roce_event event_type); @@ -667,14 +625,16 @@ struct hns_roce_qp { u32 next_sge; enum ib_mtu path_mtu; u32 max_inline_data; + u8 free_mr_en; /* 0: flush needed, 1: unneeded */ unsigned long flush_flag; struct hns_roce_work flush_work; struct hns_roce_rinl_buf rq_inl_buf; - struct list_head node; /* all qps are on a list */ - struct list_head rq_node; /* all recv qps are on a list */ - struct list_head sq_node; /* all send qps are on a list */ + struct list_head node; /* all qps are on a list */ + struct list_head rq_node; /* all recv qps are on a list */ + struct list_head sq_node; /* all send qps are on a list */ + struct hns_user_mmap_entry *dwqe_mmap_entry; }; struct hns_roce_ib_iboe { @@ -684,16 +644,16 @@ struct hns_roce_ib_iboe { u8 phy_port[HNS_ROCE_MAX_PORTS]; }; -enum { - HNS_ROCE_EQ_STAT_INVALID = 0, - HNS_ROCE_EQ_STAT_VALID = 2, -}; - struct hns_roce_ceqe { __le32 comp; __le32 rsv[15]; }; +#define CEQE_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_ceqe, h, l) + +#define CEQE_CQN CEQE_FIELD_LOC(23, 0) +#define CEQE_OWNER CEQE_FIELD_LOC(31, 31) + struct hns_roce_aeqe { __le32 asyn; union { @@ -713,6 +673,13 @@ struct hns_roce_aeqe { __le32 rsv[12]; }; +#define AEQE_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_aeqe, h, l) + +#define AEQE_EVENT_TYPE AEQE_FIELD_LOC(7, 0) +#define AEQE_SUB_TYPE AEQE_FIELD_LOC(15, 8) +#define AEQE_OWNER AEQE_FIELD_LOC(31, 31) +#define AEQE_EVENT_QUEUE_NUM AEQE_FIELD_LOC(55, 32) + struct hns_roce_eq { struct hns_roce_dev *hr_dev; void __iomem *db_reg; @@ -720,12 +687,9 @@ struct hns_roce_eq { int type_flag; /* Aeq:1 ceq:0 */ int eqn; u32 entries; - u32 log_entries; int eqe_size; int irq; - int log_page_size; u32 cons_index; - struct hns_roce_buf_list *buf_list; int over_ignore; int coalesce; int arm_st; @@ -740,7 +704,6 @@ struct hns_roce_eq { struct hns_roce_eq_table { struct hns_roce_eq *eq; - void __iomem **eqc_base; /* only for hw v1 */ }; enum cong_type { @@ -761,19 +724,17 @@ struct hns_roce_caps { u32 max_sq_sg; u32 max_sq_inline; u32 max_rq_sg; - u32 max_extend_sg; + u32 rsv0; u32 num_qps; u32 num_pi_qps; u32 reserved_qps; - int num_qpc_timer; - int num_cqc_timer; - int num_srqs; + u32 num_srqs; u32 max_wqes; u32 max_srq_wrs; u32 max_srq_sges; u32 max_sq_desc_sz; u32 max_rq_desc_sz; - u32 max_srq_desc_sz; + u32 rsv2; int max_qp_init_rdma; int max_qp_dest_rdma; u32 num_cqs; @@ -781,12 +742,12 @@ struct hns_roce_caps { u32 min_cqes; u32 min_wqes; u32 reserved_cqs; - int reserved_srqs; + u32 reserved_srqs; int num_aeq_vectors; int num_comp_vectors; int num_other_vectors; u32 num_mtpts; - u32 num_mtt_segs; + u32 rsv1; u32 num_srqwqe_segs; u32 num_idx_segs; int reserved_mrws; @@ -855,7 +816,7 @@ struct hns_roce_caps { u32 cqc_timer_ba_pg_sz; u32 cqc_timer_buf_pg_sz; u32 cqc_timer_hop_num; - u32 cqe_ba_pg_sz; /* page_size = 4K*(2^cqe_ba_pg_sz) */ + u32 cqe_ba_pg_sz; /* page_size = 4K*(2^cqe_ba_pg_sz) */ u32 cqe_buf_pg_sz; u32 cqe_hop_num; u32 srqwqe_ba_pg_sz; @@ -874,7 +835,7 @@ struct hns_roce_caps { u32 gmv_hop_num; u32 sl_num; u32 llm_buf_pg_sz; - u32 chunk_sz; /* chunk size in non multihop mode */ + u32 chunk_sz; /* chunk size in non multihop mode */ u64 flags; u16 default_ceq_max_cnt; u16 default_ceq_period; @@ -885,11 +846,6 @@ struct hns_roce_caps { enum cong_type cong_type; }; -struct hns_roce_dfx_hw { - int (*query_cqc_info)(struct hns_roce_dev *hr_dev, u32 cqn, - int *buffer); -}; - enum hns_roce_device_state { HNS_ROCE_DEVICE_STATE_INITED, HNS_ROCE_DEVICE_STATE_RST_DOWN, @@ -897,26 +853,21 @@ enum hns_roce_device_state { }; struct hns_roce_hw { - int (*reset)(struct hns_roce_dev *hr_dev, bool enable); int (*cmq_init)(struct hns_roce_dev *hr_dev); void (*cmq_exit)(struct hns_roce_dev *hr_dev); int (*hw_profile)(struct hns_roce_dev *hr_dev); int (*hw_init)(struct hns_roce_dev *hr_dev); void (*hw_exit)(struct hns_roce_dev *hr_dev); - int (*post_mbox)(struct hns_roce_dev *hr_dev, u64 in_param, - u64 out_param, u32 in_modifier, u8 op_modifier, u16 op, - u16 token, int event); - int (*poll_mbox_done)(struct hns_roce_dev *hr_dev, - unsigned int timeout); + int (*post_mbox)(struct hns_roce_dev *hr_dev, + struct hns_roce_mbox_msg *mbox_msg); + int (*poll_mbox_done)(struct hns_roce_dev *hr_dev); bool (*chk_mbox_avail)(struct hns_roce_dev *hr_dev, bool *is_busy); - int (*set_gid)(struct hns_roce_dev *hr_dev, u32 port, int gid_index, + int (*set_gid)(struct hns_roce_dev *hr_dev, int gid_index, const union ib_gid *gid, const struct ib_gid_attr *attr); int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, const u8 *addr); - void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port, - enum ib_mtu mtu); int (*write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf, - struct hns_roce_mr *mr, unsigned long mtpt_idx); + struct hns_roce_mr *mr); int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr, int flags, void *mb_buf); @@ -927,34 +878,33 @@ struct hns_roce_hw { struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts, dma_addr_t dma_handle); int (*set_hem)(struct hns_roce_dev *hr_dev, - struct hns_roce_hem_table *table, int obj, int step_idx); + struct hns_roce_hem_table *table, int obj, u32 step_idx); int (*clear_hem)(struct hns_roce_dev *hr_dev, struct hns_roce_hem_table *table, int obj, - int step_idx); + u32 step_idx); int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr, int attr_mask, enum ib_qp_state cur_state, enum ib_qp_state new_state); int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp); - int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr, - struct ib_udata *udata); - int (*destroy_cq)(struct ib_cq *ibcq, struct ib_udata *udata); + void (*dereg_mr)(struct hns_roce_dev *hr_dev); int (*init_eq)(struct hns_roce_dev *hr_dev); void (*cleanup_eq)(struct hns_roce_dev *hr_dev); int (*write_srqc)(struct hns_roce_srq *srq, void *mb_buf); + int (*query_cqc)(struct hns_roce_dev *hr_dev, u32 cqn, void *buffer); + int (*query_qpc)(struct hns_roce_dev *hr_dev, u32 qpn, void *buffer); + int (*query_mpt)(struct hns_roce_dev *hr_dev, u32 key, void *buffer); const struct ib_device_ops *hns_roce_dev_ops; const struct ib_device_ops *hns_roce_dev_srq_ops; }; struct hns_roce_dev { struct ib_device ib_dev; - struct platform_device *pdev; struct pci_dev *pci_dev; struct device *dev; struct hns_roce_uar priv_uar; const char *irq_names[HNS_ROCE_MAX_IRQ_NUM]; spinlock_t sm_lock; - spinlock_t bt_cmd_lock; bool active; bool is_reset; bool dis_db; @@ -1001,15 +951,14 @@ struct hns_roce_dev { int loop_idc; u32 sdb_offset; u32 odb_offset; - dma_addr_t tptr_dma_addr; /* only for hw v1 */ - u32 tptr_size; /* only for hw v1 */ const struct hns_roce_hw *hw; void *priv; struct workqueue_struct *irq_workq; - const struct hns_roce_dfx_hw *dfx; + struct work_struct ecc_work; u32 func_num; u32 is_vf; u32 cong_algo_tmpl_id; + u64 dwqe_page; }; static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev) @@ -1158,7 +1107,7 @@ void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev); /* hns roce hw need current block and next block addr from mtt */ #define MTT_MIN_COUNT 2 int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, - int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr); + u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr); int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, struct hns_roce_buf_attr *buf_attr, unsigned int page_shift, struct ib_udata *udata, @@ -1205,9 +1154,6 @@ struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset); int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata); -int hns_roce_hw_destroy_mpt(struct hns_roce_dev *hr_dev, - struct hns_roce_cmd_mailbox *mailbox, - unsigned long mpt_index); unsigned long key_to_hw_index(u32 key); int hns_roce_alloc_mw(struct ib_mw *mw, struct ib_udata *udata); @@ -1245,7 +1191,6 @@ void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, unsigned int n); void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, unsigned int n); bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq, struct ib_cq *ib_cq); -enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state); void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq); void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, @@ -1277,8 +1222,12 @@ u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u32 port, int gid_index); void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev); int hns_roce_init(struct hns_roce_dev *hr_dev); void hns_roce_exit(struct hns_roce_dev *hr_dev); -int hns_roce_fill_res_cq_entry(struct sk_buff *msg, - struct ib_cq *ib_cq); +int hns_roce_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq); +int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq); +int hns_roce_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp); +int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp); +int hns_roce_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr); +int hns_roce_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr); struct hns_user_mmap_entry * hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address, size_t length, diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c index fa15d79eabb3..aa8a08d1c014 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.c +++ b/drivers/infiniband/hw/hns/hns_roce_hem.c @@ -31,7 +31,6 @@ * SOFTWARE. */ -#include <linux/platform_device.h> #include "hns_roce_device.h" #include "hns_roce_hem.h" #include "hns_roce_common.h" @@ -456,7 +455,7 @@ static int alloc_mhop_hem(struct hns_roce_dev *hr_dev, * alloc bt space chunk for MTT/CQE. */ size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size : bt_size; - flag = (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | __GFP_NOWARN; + flag = GFP_KERNEL | __GFP_NOWARN; table->hem[index->buf] = hns_roce_alloc_hem(hr_dev, size >> PAGE_SHIFT, size, flag); if (!table->hem[index->buf]) { @@ -489,7 +488,7 @@ static int set_mhop_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem_index *index) { struct ib_device *ibdev = &hr_dev->ib_dev; - int step_idx; + u32 step_idx; int ret = 0; if (index->inited & HEM_INDEX_L0) { @@ -589,8 +588,7 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev, table->hem[i] = hns_roce_alloc_hem(hr_dev, table->table_chunk_size >> PAGE_SHIFT, table->table_chunk_size, - (table->lowmem ? GFP_KERNEL : - GFP_HIGHUSER) | __GFP_NOWARN); + GFP_KERNEL | __GFP_NOWARN); if (!table->hem[i]) { ret = -ENOMEM; goto out; @@ -619,7 +617,7 @@ static void clear_mhop_hem(struct hns_roce_dev *hr_dev, struct ib_device *ibdev = &hr_dev->ib_dev; u32 hop_num = mhop->hop_num; u32 chunk_ba_num; - int step_idx; + u32 step_idx; index->inited = HEM_INDEX_BUF; chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN; @@ -726,9 +724,6 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev, int length; int i, j; - if (!table->lowmem) - return NULL; - mutex_lock(&table->mutex); if (!hns_roce_check_whether_mhop(hr_dev, table->type)) { @@ -784,8 +779,7 @@ out: int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev, struct hns_roce_hem_table *table, u32 type, - unsigned long obj_size, unsigned long nobj, - int use_lowmem) + unsigned long obj_size, unsigned long nobj) { unsigned long obj_per_chunk; unsigned long num_hem; @@ -862,7 +856,6 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev, table->type = type; table->num_hem = num_hem; table->obj_size = obj_size; - table->lowmem = use_lowmem; mutex_init(&table->mutex); return 0; @@ -933,7 +926,7 @@ void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev, if (table->hem[i]) { if (hr_dev->hw->clear_hem(hr_dev, table, i * table->table_chunk_size / table->obj_size, 0)) - dev_err(dev, "Clear HEM base address failed.\n"); + dev_err(dev, "clear HEM base address failed.\n"); hns_roce_free_hem(hr_dev, table->hem[i]); } @@ -987,7 +980,7 @@ struct hns_roce_hem_head { static struct hns_roce_hem_item * hem_list_alloc_item(struct hns_roce_dev *hr_dev, int start, int end, int count, - bool exist_bt, int bt_level) + bool exist_bt) { struct hns_roce_hem_item *hem; @@ -1196,7 +1189,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev, start_aligned = (distance / step) * step + r->offset; end = min_t(int, start_aligned + step - 1, max_ofs); cur = hem_list_alloc_item(hr_dev, start_aligned, end, unit, - true, level); + true); if (!cur) { ret = -ENOMEM; goto err_exit; @@ -1248,7 +1241,7 @@ alloc_root_hem(struct hns_roce_dev *hr_dev, int unit, int *max_ba_num, /* indicate to last region */ r = ®ions[region_cnt - 1]; hem = hem_list_alloc_item(hr_dev, offset, r->offset + r->count - 1, - ba_num, true, 0); + ba_num, true); if (!hem) return ERR_PTR(-ENOMEM); @@ -1265,7 +1258,7 @@ static int alloc_fake_root_bt(struct hns_roce_dev *hr_dev, void *cpu_base, struct hns_roce_hem_item *hem; hem = hem_list_alloc_item(hr_dev, r->offset, r->offset + r->count - 1, - r->count, false, 0); + r->count, false); if (!hem) return -ENOMEM; @@ -1422,7 +1415,7 @@ int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev, &hem_list->btm_bt); if (ret) { dev_err(hr_dev->dev, - "alloc hem trunk fail ret=%d!\n", ret); + "alloc hem trunk fail ret = %d!\n", ret); goto err_alloc; } } @@ -1431,7 +1424,7 @@ int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev, ret = hem_list_alloc_root_bt(hr_dev, hem_list, unit, regions, region_cnt); if (ret) - dev_err(hr_dev->dev, "alloc hem root fail ret=%d!\n", ret); + dev_err(hr_dev->dev, "alloc hem root fail ret = %d!\n", ret); else return 0; @@ -1469,19 +1462,17 @@ void hns_roce_hem_list_init(struct hns_roce_hem_list *hem_list) void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_hem_list *hem_list, - int offset, int *mtt_cnt, u64 *phy_addr) + int offset, int *mtt_cnt) { struct list_head *head = &hem_list->btm_bt; struct hns_roce_hem_item *hem, *temp_hem; void *cpu_base = NULL; - u64 phy_base = 0; int nr = 0; list_for_each_entry_safe(hem, temp_hem, head, sibling) { if (hem_list_page_is_in_range(hem, offset)) { nr = offset - hem->start; cpu_base = hem->addr + nr * BA_BYTE_LEN; - phy_base = hem->dma_addr + nr * BA_BYTE_LEN; nr = hem->end + 1 - offset; break; } @@ -1490,8 +1481,5 @@ void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev, if (mtt_cnt) *mtt_cnt = nr; - if (phy_addr) - *phy_addr = phy_base; - return cpu_base; } diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h index 2d84a6b3f05d..7d23d3c51da4 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.h +++ b/drivers/infiniband/hw/hns/hns_roce_hem.h @@ -111,8 +111,7 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev, dma_addr_t *dma_handle); int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev, struct hns_roce_hem_table *table, u32 type, - unsigned long obj_size, unsigned long nobj, - int use_lowmem); + unsigned long obj_size, unsigned long nobj); void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev, struct hns_roce_hem_table *table); void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev); @@ -132,7 +131,7 @@ void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev, struct hns_roce_hem_list *hem_list); void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_hem_list *hem_list, - int offset, int *mtt_cnt, u64 *phy_addr); + int offset, int *mtt_cnt); static inline void hns_roce_hem_first(struct hns_roce_hem *hem, struct hns_roce_hem_iter *iter) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c deleted file mode 100644 index f4af3992ba95..000000000000 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ /dev/null @@ -1,4675 +0,0 @@ -/* - * Copyright (c) 2016 Hisilicon Limited. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include <linux/platform_device.h> -#include <linux/acpi.h> -#include <linux/etherdevice.h> -#include <linux/interrupt.h> -#include <linux/of.h> -#include <linux/of_platform.h> -#include <rdma/ib_umem.h> -#include "hns_roce_common.h" -#include "hns_roce_device.h" -#include "hns_roce_cmd.h" -#include "hns_roce_hem.h" -#include "hns_roce_hw_v1.h" - -/** - * hns_get_gid_index - Get gid index. - * @hr_dev: pointer to structure hns_roce_dev. - * @port: port, value range: 0 ~ MAX - * @gid_index: gid_index, value range: 0 ~ MAX - * Description: - * N ports shared gids, allocation method as follow: - * GID[0][0], GID[1][0],.....GID[N - 1][0], - * GID[0][0], GID[1][0],.....GID[N - 1][0], - * And so on - */ -u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u32 port, int gid_index) -{ - return gid_index * hr_dev->caps.num_ports + port; -} - -static void set_data_seg(struct hns_roce_wqe_data_seg *dseg, struct ib_sge *sg) -{ - dseg->lkey = cpu_to_le32(sg->lkey); - dseg->addr = cpu_to_le64(sg->addr); - dseg->len = cpu_to_le32(sg->length); -} - -static void set_raddr_seg(struct hns_roce_wqe_raddr_seg *rseg, u64 remote_addr, - u32 rkey) -{ - rseg->raddr = cpu_to_le64(remote_addr); - rseg->rkey = cpu_to_le32(rkey); - rseg->len = 0; -} - -static int hns_roce_v1_post_send(struct ib_qp *ibqp, - const struct ib_send_wr *wr, - const struct ib_send_wr **bad_wr) -{ - struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); - struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah); - struct hns_roce_ud_send_wqe *ud_sq_wqe = NULL; - struct hns_roce_wqe_ctrl_seg *ctrl = NULL; - struct hns_roce_wqe_data_seg *dseg = NULL; - struct hns_roce_qp *qp = to_hr_qp(ibqp); - struct device *dev = &hr_dev->pdev->dev; - struct hns_roce_sq_db sq_db = {}; - int ps_opcode, i; - unsigned long flags = 0; - void *wqe = NULL; - __le32 doorbell[2]; - const u8 *smac; - int ret = 0; - int loopback; - u32 wqe_idx; - int nreq; - - if (unlikely(ibqp->qp_type != IB_QPT_GSI && - ibqp->qp_type != IB_QPT_RC)) { - dev_err(dev, "un-supported QP type\n"); - *bad_wr = NULL; - return -EOPNOTSUPP; - } - - spin_lock_irqsave(&qp->sq.lock, flags); - - for (nreq = 0; wr; ++nreq, wr = wr->next) { - if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { - ret = -ENOMEM; - *bad_wr = wr; - goto out; - } - - wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1); - - if (unlikely(wr->num_sge > qp->sq.max_gs)) { - dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n", - wr->num_sge, qp->sq.max_gs); - ret = -EINVAL; - *bad_wr = wr; - goto out; - } - - wqe = hns_roce_get_send_wqe(qp, wqe_idx); - qp->sq.wrid[wqe_idx] = wr->wr_id; - - /* Corresponding to the RC and RD type wqe process separately */ - if (ibqp->qp_type == IB_QPT_GSI) { - ud_sq_wqe = wqe; - roce_set_field(ud_sq_wqe->dmac_h, - UD_SEND_WQE_U32_4_DMAC_0_M, - UD_SEND_WQE_U32_4_DMAC_0_S, - ah->av.mac[0]); - roce_set_field(ud_sq_wqe->dmac_h, - UD_SEND_WQE_U32_4_DMAC_1_M, - UD_SEND_WQE_U32_4_DMAC_1_S, - ah->av.mac[1]); - roce_set_field(ud_sq_wqe->dmac_h, - UD_SEND_WQE_U32_4_DMAC_2_M, - UD_SEND_WQE_U32_4_DMAC_2_S, - ah->av.mac[2]); - roce_set_field(ud_sq_wqe->dmac_h, - UD_SEND_WQE_U32_4_DMAC_3_M, - UD_SEND_WQE_U32_4_DMAC_3_S, - ah->av.mac[3]); - - roce_set_field(ud_sq_wqe->u32_8, - UD_SEND_WQE_U32_8_DMAC_4_M, - UD_SEND_WQE_U32_8_DMAC_4_S, - ah->av.mac[4]); - roce_set_field(ud_sq_wqe->u32_8, - UD_SEND_WQE_U32_8_DMAC_5_M, - UD_SEND_WQE_U32_8_DMAC_5_S, - ah->av.mac[5]); - - smac = (const u8 *)hr_dev->dev_addr[qp->port]; - loopback = ether_addr_equal_unaligned(ah->av.mac, - smac) ? 1 : 0; - roce_set_bit(ud_sq_wqe->u32_8, - UD_SEND_WQE_U32_8_LOOPBACK_INDICATOR_S, - loopback); - - roce_set_field(ud_sq_wqe->u32_8, - UD_SEND_WQE_U32_8_OPERATION_TYPE_M, - UD_SEND_WQE_U32_8_OPERATION_TYPE_S, - HNS_ROCE_WQE_OPCODE_SEND); - roce_set_field(ud_sq_wqe->u32_8, - UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_M, - UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S, - 2); - roce_set_bit(ud_sq_wqe->u32_8, - UD_SEND_WQE_U32_8_SEND_GL_ROUTING_HDR_FLAG_S, - 1); - - ud_sq_wqe->u32_8 |= (wr->send_flags & IB_SEND_SIGNALED ? - cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) | - (wr->send_flags & IB_SEND_SOLICITED ? - cpu_to_le32(HNS_ROCE_WQE_SE) : 0) | - ((wr->opcode == IB_WR_SEND_WITH_IMM) ? - cpu_to_le32(HNS_ROCE_WQE_IMM) : 0); - - roce_set_field(ud_sq_wqe->u32_16, - UD_SEND_WQE_U32_16_DEST_QP_M, - UD_SEND_WQE_U32_16_DEST_QP_S, - ud_wr(wr)->remote_qpn); - roce_set_field(ud_sq_wqe->u32_16, - UD_SEND_WQE_U32_16_MAX_STATIC_RATE_M, - UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S, - ah->av.stat_rate); - - roce_set_field(ud_sq_wqe->u32_36, - UD_SEND_WQE_U32_36_FLOW_LABEL_M, - UD_SEND_WQE_U32_36_FLOW_LABEL_S, - ah->av.flowlabel); - roce_set_field(ud_sq_wqe->u32_36, - UD_SEND_WQE_U32_36_PRIORITY_M, - UD_SEND_WQE_U32_36_PRIORITY_S, - ah->av.sl); - roce_set_field(ud_sq_wqe->u32_36, - UD_SEND_WQE_U32_36_SGID_INDEX_M, - UD_SEND_WQE_U32_36_SGID_INDEX_S, - hns_get_gid_index(hr_dev, qp->phy_port, - ah->av.gid_index)); - - roce_set_field(ud_sq_wqe->u32_40, - UD_SEND_WQE_U32_40_HOP_LIMIT_M, - UD_SEND_WQE_U32_40_HOP_LIMIT_S, - ah->av.hop_limit); - roce_set_field(ud_sq_wqe->u32_40, - UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M, - UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S, - ah->av.tclass); - - memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN); - - ud_sq_wqe->va0_l = - cpu_to_le32((u32)wr->sg_list[0].addr); - ud_sq_wqe->va0_h = - cpu_to_le32((wr->sg_list[0].addr) >> 32); - ud_sq_wqe->l_key0 = - cpu_to_le32(wr->sg_list[0].lkey); - - ud_sq_wqe->va1_l = - cpu_to_le32((u32)wr->sg_list[1].addr); - ud_sq_wqe->va1_h = - cpu_to_le32((wr->sg_list[1].addr) >> 32); - ud_sq_wqe->l_key1 = - cpu_to_le32(wr->sg_list[1].lkey); - } else if (ibqp->qp_type == IB_QPT_RC) { - u32 tmp_len = 0; - - ctrl = wqe; - memset(ctrl, 0, sizeof(struct hns_roce_wqe_ctrl_seg)); - for (i = 0; i < wr->num_sge; i++) - tmp_len += wr->sg_list[i].length; - - ctrl->msg_length = - cpu_to_le32(le32_to_cpu(ctrl->msg_length) + tmp_len); - - ctrl->sgl_pa_h = 0; - ctrl->flag = 0; - - switch (wr->opcode) { - case IB_WR_SEND_WITH_IMM: - case IB_WR_RDMA_WRITE_WITH_IMM: - ctrl->imm_data = wr->ex.imm_data; - break; - case IB_WR_SEND_WITH_INV: - ctrl->inv_key = - cpu_to_le32(wr->ex.invalidate_rkey); - break; - default: - ctrl->imm_data = 0; - break; - } - - /* Ctrl field, ctrl set type: sig, solic, imm, fence */ - /* SO wait for conforming application scenarios */ - ctrl->flag |= (wr->send_flags & IB_SEND_SIGNALED ? - cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) | - (wr->send_flags & IB_SEND_SOLICITED ? - cpu_to_le32(HNS_ROCE_WQE_SE) : 0) | - ((wr->opcode == IB_WR_SEND_WITH_IMM || - wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) ? - cpu_to_le32(HNS_ROCE_WQE_IMM) : 0) | - (wr->send_flags & IB_SEND_FENCE ? - (cpu_to_le32(HNS_ROCE_WQE_FENCE)) : 0); - - wqe += sizeof(struct hns_roce_wqe_ctrl_seg); - - switch (wr->opcode) { - case IB_WR_RDMA_READ: - ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_READ; - set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, - rdma_wr(wr)->rkey); - break; - case IB_WR_RDMA_WRITE: - case IB_WR_RDMA_WRITE_WITH_IMM: - ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_WRITE; - set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, - rdma_wr(wr)->rkey); - break; - case IB_WR_SEND: - case IB_WR_SEND_WITH_INV: - case IB_WR_SEND_WITH_IMM: - ps_opcode = HNS_ROCE_WQE_OPCODE_SEND; - break; - case IB_WR_LOCAL_INV: - case IB_WR_ATOMIC_CMP_AND_SWP: - case IB_WR_ATOMIC_FETCH_AND_ADD: - case IB_WR_LSO: - default: - ps_opcode = HNS_ROCE_WQE_OPCODE_MASK; - break; - } - ctrl->flag |= cpu_to_le32(ps_opcode); - wqe += sizeof(struct hns_roce_wqe_raddr_seg); - - dseg = wqe; - if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) { - if (le32_to_cpu(ctrl->msg_length) > - hr_dev->caps.max_sq_inline) { - ret = -EINVAL; - *bad_wr = wr; - dev_err(dev, "inline len(1-%d)=%d, illegal", - le32_to_cpu(ctrl->msg_length), - hr_dev->caps.max_sq_inline); - goto out; - } - for (i = 0; i < wr->num_sge; i++) { - memcpy(wqe, ((void *) (uintptr_t) - wr->sg_list[i].addr), - wr->sg_list[i].length); - wqe += wr->sg_list[i].length; - } - ctrl->flag |= cpu_to_le32(HNS_ROCE_WQE_INLINE); - } else { - /* sqe num is two */ - for (i = 0; i < wr->num_sge; i++) - set_data_seg(dseg + i, wr->sg_list + i); - - ctrl->flag |= cpu_to_le32(wr->num_sge << - HNS_ROCE_WQE_SGE_NUM_BIT); - } - } - } - -out: - /* Set DB return */ - if (likely(nreq)) { - qp->sq.head += nreq; - - roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SQ_HEAD_M, - SQ_DOORBELL_U32_4_SQ_HEAD_S, - (qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1))); - roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SL_M, - SQ_DOORBELL_U32_4_SL_S, qp->sl); - roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_PORT_M, - SQ_DOORBELL_U32_4_PORT_S, qp->phy_port); - roce_set_field(sq_db.u32_8, SQ_DOORBELL_U32_8_QPN_M, - SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn); - roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1); - - doorbell[0] = sq_db.u32_4; - doorbell[1] = sq_db.u32_8; - - hns_roce_write64_k(doorbell, qp->sq.db_reg); - } - - spin_unlock_irqrestore(&qp->sq.lock, flags); - - return ret; -} - -static int hns_roce_v1_post_recv(struct ib_qp *ibqp, - const struct ib_recv_wr *wr, - const struct ib_recv_wr **bad_wr) -{ - struct hns_roce_rq_wqe_ctrl *ctrl = NULL; - struct hns_roce_wqe_data_seg *scat = NULL; - struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); - struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); - struct device *dev = &hr_dev->pdev->dev; - struct hns_roce_rq_db rq_db = {}; - __le32 doorbell[2] = {0}; - unsigned long flags = 0; - unsigned int wqe_idx; - int ret = 0; - int nreq; - int i; - u32 reg_val; - - spin_lock_irqsave(&hr_qp->rq.lock, flags); - - for (nreq = 0; wr; ++nreq, wr = wr->next) { - if (hns_roce_wq_overflow(&hr_qp->rq, nreq, - hr_qp->ibqp.recv_cq)) { - ret = -ENOMEM; - *bad_wr = wr; - goto out; - } - - wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1); - - if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) { - dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n", - wr->num_sge, hr_qp->rq.max_gs); - ret = -EINVAL; - *bad_wr = wr; - goto out; - } - - ctrl = hns_roce_get_recv_wqe(hr_qp, wqe_idx); - - roce_set_field(ctrl->rwqe_byte_12, - RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M, - RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S, - wr->num_sge); - - scat = (struct hns_roce_wqe_data_seg *)(ctrl + 1); - - for (i = 0; i < wr->num_sge; i++) - set_data_seg(scat + i, wr->sg_list + i); - - hr_qp->rq.wrid[wqe_idx] = wr->wr_id; - } - -out: - if (likely(nreq)) { - hr_qp->rq.head += nreq; - - if (ibqp->qp_type == IB_QPT_GSI) { - __le32 tmp; - - /* SW update GSI rq header */ - reg_val = roce_read(to_hr_dev(ibqp->device), - ROCEE_QP1C_CFG3_0_REG + - QP1C_CFGN_OFFSET * hr_qp->phy_port); - tmp = cpu_to_le32(reg_val); - roce_set_field(tmp, - ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M, - ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S, - hr_qp->rq.head); - reg_val = le32_to_cpu(tmp); - roce_write(to_hr_dev(ibqp->device), - ROCEE_QP1C_CFG3_0_REG + - QP1C_CFGN_OFFSET * hr_qp->phy_port, reg_val); - } else { - roce_set_field(rq_db.u32_4, RQ_DOORBELL_U32_4_RQ_HEAD_M, - RQ_DOORBELL_U32_4_RQ_HEAD_S, - hr_qp->rq.head); - roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_QPN_M, - RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn); - roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_CMD_M, - RQ_DOORBELL_U32_8_CMD_S, 1); - roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S, - 1); - - doorbell[0] = rq_db.u32_4; - doorbell[1] = rq_db.u32_8; - - hns_roce_write64_k(doorbell, hr_qp->rq.db_reg); - } - } - spin_unlock_irqrestore(&hr_qp->rq.lock, flags); - - return ret; -} - -static void hns_roce_set_db_event_mode(struct hns_roce_dev *hr_dev, - int sdb_mode, int odb_mode) -{ - __le32 tmp; - u32 val; - - val = roce_read(hr_dev, ROCEE_GLB_CFG_REG); - tmp = cpu_to_le32(val); - roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S, sdb_mode); - roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S, odb_mode); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_GLB_CFG_REG, val); -} - -static int hns_roce_v1_set_hem(struct hns_roce_dev *hr_dev, - struct hns_roce_hem_table *table, int obj, - int step_idx) -{ - spinlock_t *lock = &hr_dev->bt_cmd_lock; - struct device *dev = hr_dev->dev; - struct hns_roce_hem_iter iter; - void __iomem *bt_cmd; - __le32 bt_cmd_val[2]; - __le32 bt_cmd_h = 0; - unsigned long flags; - __le32 bt_cmd_l; - int ret = 0; - u64 bt_ba; - long end; - - /* Find the HEM(Hardware Entry Memory) entry */ - unsigned long i = obj / (table->table_chunk_size / table->obj_size); - - switch (table->type) { - case HEM_TYPE_QPC: - case HEM_TYPE_MTPT: - case HEM_TYPE_CQC: - case HEM_TYPE_SRQC: - roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, table->type); - break; - default: - return ret; - } - - roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj); - roce_set_bit(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0); - roce_set_bit(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1); - - /* Currently iter only a chunk */ - for (hns_roce_hem_first(table->hem[i], &iter); - !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) { - bt_ba = hns_roce_hem_addr(&iter) >> HNS_HW_PAGE_SHIFT; - - spin_lock_irqsave(lock, flags); - - bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG; - - end = HW_SYNC_TIMEOUT_MSECS; - while (end > 0) { - if (!(readl(bt_cmd) >> BT_CMD_SYNC_SHIFT)) - break; - - mdelay(HW_SYNC_SLEEP_TIME_INTERVAL); - end -= HW_SYNC_SLEEP_TIME_INTERVAL; - } - - if (end <= 0) { - dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n"); - spin_unlock_irqrestore(lock, flags); - return -EBUSY; - } - - bt_cmd_l = cpu_to_le32(bt_ba); - roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, - upper_32_bits(bt_ba)); - - bt_cmd_val[0] = bt_cmd_l; - bt_cmd_val[1] = bt_cmd_h; - hns_roce_write64_k(bt_cmd_val, - hr_dev->reg_base + ROCEE_BT_CMD_L_REG); - spin_unlock_irqrestore(lock, flags); - } - - return ret; -} - -static void hns_roce_set_db_ext_mode(struct hns_roce_dev *hr_dev, u32 sdb_mode, - u32 odb_mode) -{ - __le32 tmp; - u32 val; - - /* Configure SDB/ODB extend mode */ - val = roce_read(hr_dev, ROCEE_GLB_CFG_REG); - tmp = cpu_to_le32(val); - roce_set_bit(tmp, ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S, sdb_mode); - roce_set_bit(tmp, ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S, odb_mode); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_GLB_CFG_REG, val); -} - -static void hns_roce_set_sdb(struct hns_roce_dev *hr_dev, u32 sdb_alept, - u32 sdb_alful) -{ - __le32 tmp; - u32 val; - - /* Configure SDB */ - val = roce_read(hr_dev, ROCEE_DB_SQ_WL_REG); - tmp = cpu_to_le32(val); - roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M, - ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S, sdb_alful); - roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M, - ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S, sdb_alept); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_DB_SQ_WL_REG, val); -} - -static void hns_roce_set_odb(struct hns_roce_dev *hr_dev, u32 odb_alept, - u32 odb_alful) -{ - __le32 tmp; - u32 val; - - /* Configure ODB */ - val = roce_read(hr_dev, ROCEE_DB_OTHERS_WL_REG); - tmp = cpu_to_le32(val); - roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M, - ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S, odb_alful); - roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M, - ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S, odb_alept); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_DB_OTHERS_WL_REG, val); -} - -static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept, - u32 ext_sdb_alful) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_db_table *db = &priv->db_table; - struct device *dev = &hr_dev->pdev->dev; - dma_addr_t sdb_dma_addr; - __le32 tmp; - u32 val; - - /* Configure extend SDB threshold */ - roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_EMPTY_REG, ext_sdb_alept); - roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_REG, ext_sdb_alful); - - /* Configure extend SDB base addr */ - sdb_dma_addr = db->ext_db->sdb_buf_list->map; - roce_write(hr_dev, ROCEE_EXT_DB_SQ_REG, (u32)(sdb_dma_addr >> 12)); - - /* Configure extend SDB depth */ - val = roce_read(hr_dev, ROCEE_EXT_DB_SQ_H_REG); - tmp = cpu_to_le32(val); - roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M, - ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S, - db->ext_db->esdb_dep); - /* - * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of - * using 4K page, and shift more 32 because of - * calculating the high 32 bit value evaluated to hardware. - */ - roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M, - ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S, sdb_dma_addr >> 44); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_EXT_DB_SQ_H_REG, val); - - dev_dbg(dev, "ext SDB depth: 0x%x\n", db->ext_db->esdb_dep); - dev_dbg(dev, "ext SDB threshold: empty: 0x%x, ful: 0x%x\n", - ext_sdb_alept, ext_sdb_alful); -} - -static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept, - u32 ext_odb_alful) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_db_table *db = &priv->db_table; - struct device *dev = &hr_dev->pdev->dev; - dma_addr_t odb_dma_addr; - __le32 tmp; - u32 val; - - /* Configure extend ODB threshold */ - roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_EMPTY_REG, ext_odb_alept); - roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_REG, ext_odb_alful); - - /* Configure extend ODB base addr */ - odb_dma_addr = db->ext_db->odb_buf_list->map; - roce_write(hr_dev, ROCEE_EXT_DB_OTH_REG, (u32)(odb_dma_addr >> 12)); - - /* Configure extend ODB depth */ - val = roce_read(hr_dev, ROCEE_EXT_DB_OTH_H_REG); - tmp = cpu_to_le32(val); - roce_set_field(tmp, ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M, - ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S, - db->ext_db->eodb_dep); - roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M, - ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S, - db->ext_db->eodb_dep); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_EXT_DB_OTH_H_REG, val); - - dev_dbg(dev, "ext ODB depth: 0x%x\n", db->ext_db->eodb_dep); - dev_dbg(dev, "ext ODB threshold: empty: 0x%x, ful: 0x%x\n", - ext_odb_alept, ext_odb_alful); -} - -static int hns_roce_db_ext_init(struct hns_roce_dev *hr_dev, u32 sdb_ext_mod, - u32 odb_ext_mod) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_db_table *db = &priv->db_table; - struct device *dev = &hr_dev->pdev->dev; - dma_addr_t sdb_dma_addr; - dma_addr_t odb_dma_addr; - int ret = 0; - - db->ext_db = kmalloc(sizeof(*db->ext_db), GFP_KERNEL); - if (!db->ext_db) - return -ENOMEM; - - if (sdb_ext_mod) { - db->ext_db->sdb_buf_list = kmalloc( - sizeof(*db->ext_db->sdb_buf_list), GFP_KERNEL); - if (!db->ext_db->sdb_buf_list) { - ret = -ENOMEM; - goto ext_sdb_buf_fail_out; - } - - db->ext_db->sdb_buf_list->buf = dma_alloc_coherent(dev, - HNS_ROCE_V1_EXT_SDB_SIZE, - &sdb_dma_addr, GFP_KERNEL); - if (!db->ext_db->sdb_buf_list->buf) { - ret = -ENOMEM; - goto alloc_sq_db_buf_fail; - } - db->ext_db->sdb_buf_list->map = sdb_dma_addr; - - db->ext_db->esdb_dep = ilog2(HNS_ROCE_V1_EXT_SDB_DEPTH); - hns_roce_set_sdb_ext(hr_dev, HNS_ROCE_V1_EXT_SDB_ALEPT, - HNS_ROCE_V1_EXT_SDB_ALFUL); - } else - hns_roce_set_sdb(hr_dev, HNS_ROCE_V1_SDB_ALEPT, - HNS_ROCE_V1_SDB_ALFUL); - - if (odb_ext_mod) { - db->ext_db->odb_buf_list = kmalloc( - sizeof(*db->ext_db->odb_buf_list), GFP_KERNEL); - if (!db->ext_db->odb_buf_list) { - ret = -ENOMEM; - goto ext_odb_buf_fail_out; - } - - db->ext_db->odb_buf_list->buf = dma_alloc_coherent(dev, - HNS_ROCE_V1_EXT_ODB_SIZE, - &odb_dma_addr, GFP_KERNEL); - if (!db->ext_db->odb_buf_list->buf) { - ret = -ENOMEM; - goto alloc_otr_db_buf_fail; - } - db->ext_db->odb_buf_list->map = odb_dma_addr; - - db->ext_db->eodb_dep = ilog2(HNS_ROCE_V1_EXT_ODB_DEPTH); - hns_roce_set_odb_ext(hr_dev, HNS_ROCE_V1_EXT_ODB_ALEPT, - HNS_ROCE_V1_EXT_ODB_ALFUL); - } else - hns_roce_set_odb(hr_dev, HNS_ROCE_V1_ODB_ALEPT, - HNS_ROCE_V1_ODB_ALFUL); - - hns_roce_set_db_ext_mode(hr_dev, sdb_ext_mod, odb_ext_mod); - - return 0; - -alloc_otr_db_buf_fail: - kfree(db->ext_db->odb_buf_list); - -ext_odb_buf_fail_out: - if (sdb_ext_mod) { - dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE, - db->ext_db->sdb_buf_list->buf, - db->ext_db->sdb_buf_list->map); - } - -alloc_sq_db_buf_fail: - if (sdb_ext_mod) - kfree(db->ext_db->sdb_buf_list); - -ext_sdb_buf_fail_out: - kfree(db->ext_db); - return ret; -} - -static struct hns_roce_qp *hns_roce_v1_create_lp_qp(struct hns_roce_dev *hr_dev, - struct ib_pd *pd) -{ - struct device *dev = &hr_dev->pdev->dev; - struct ib_qp_init_attr init_attr; - struct ib_qp *qp; - - memset(&init_attr, 0, sizeof(struct ib_qp_init_attr)); - init_attr.qp_type = IB_QPT_RC; - init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; - init_attr.cap.max_recv_wr = HNS_ROCE_MIN_WQE_NUM; - init_attr.cap.max_send_wr = HNS_ROCE_MIN_WQE_NUM; - - qp = ib_create_qp(pd, &init_attr); - if (IS_ERR(qp)) { - dev_err(dev, "Create loop qp for mr free failed!"); - return NULL; - } - - return to_hr_qp(qp); -} - -static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_free_mr *free_mr = &priv->free_mr; - struct hns_roce_caps *caps = &hr_dev->caps; - struct ib_device *ibdev = &hr_dev->ib_dev; - struct device *dev = &hr_dev->pdev->dev; - struct ib_cq_init_attr cq_init_attr; - struct ib_qp_attr attr = { 0 }; - struct hns_roce_qp *hr_qp; - struct ib_cq *cq; - struct ib_pd *pd; - union ib_gid dgid; - __be64 subnet_prefix; - int attr_mask = 0; - int ret; - int i, j; - u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 }; - u8 phy_port; - u32 port = 0; - u8 sl; - - /* Reserved cq for loop qp */ - cq_init_attr.cqe = HNS_ROCE_MIN_WQE_NUM * 2; - cq_init_attr.comp_vector = 0; - - cq = rdma_zalloc_drv_obj(ibdev, ib_cq); - if (!cq) - return -ENOMEM; - - ret = hns_roce_create_cq(cq, &cq_init_attr, NULL); - if (ret) { - dev_err(dev, "Create cq for reserved loop qp failed!"); - goto alloc_cq_failed; - } - free_mr->mr_free_cq = to_hr_cq(cq); - free_mr->mr_free_cq->ib_cq.device = &hr_dev->ib_dev; - free_mr->mr_free_cq->ib_cq.uobject = NULL; - free_mr->mr_free_cq->ib_cq.comp_handler = NULL; - free_mr->mr_free_cq->ib_cq.event_handler = NULL; - free_mr->mr_free_cq->ib_cq.cq_context = NULL; - atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0); - - pd = rdma_zalloc_drv_obj(ibdev, ib_pd); - if (!pd) { - ret = -ENOMEM; - goto alloc_mem_failed; - } - - pd->device = ibdev; - ret = hns_roce_alloc_pd(pd, NULL); - if (ret) - goto alloc_pd_failed; - - free_mr->mr_free_pd = to_hr_pd(pd); - free_mr->mr_free_pd->ibpd.device = &hr_dev->ib_dev; - free_mr->mr_free_pd->ibpd.uobject = NULL; - free_mr->mr_free_pd->ibpd.__internal_mr = NULL; - atomic_set(&free_mr->mr_free_pd->ibpd.usecnt, 0); - - attr.qp_access_flags = IB_ACCESS_REMOTE_WRITE; - attr.pkey_index = 0; - attr.min_rnr_timer = 0; - /* Disable read ability */ - attr.max_dest_rd_atomic = 0; - attr.max_rd_atomic = 0; - /* Use arbitrary values as rq_psn and sq_psn */ - attr.rq_psn = 0x0808; - attr.sq_psn = 0x0808; - attr.retry_cnt = 7; - attr.rnr_retry = 7; - attr.timeout = 0x12; - attr.path_mtu = IB_MTU_256; - attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; - rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0); - rdma_ah_set_static_rate(&attr.ah_attr, 3); - - subnet_prefix = cpu_to_be64(0xfe80000000000000LL); - for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) { - phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) : - (i % HNS_ROCE_MAX_PORTS); - sl = i / HNS_ROCE_MAX_PORTS; - - for (j = 0; j < caps->num_ports; j++) { - if (hr_dev->iboe.phy_port[j] == phy_port) { - queue_en[i] = 1; - port = j; - break; - } - } - - if (!queue_en[i]) - continue; - - free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd); - if (!free_mr->mr_free_qp[i]) { - dev_err(dev, "Create loop qp failed!\n"); - ret = -ENOMEM; - goto create_lp_qp_failed; - } - hr_qp = free_mr->mr_free_qp[i]; - - hr_qp->port = port; - hr_qp->phy_port = phy_port; - hr_qp->ibqp.qp_type = IB_QPT_RC; - hr_qp->ibqp.device = &hr_dev->ib_dev; - hr_qp->ibqp.uobject = NULL; - atomic_set(&hr_qp->ibqp.usecnt, 0); - hr_qp->ibqp.pd = pd; - hr_qp->ibqp.recv_cq = cq; - hr_qp->ibqp.send_cq = cq; - - rdma_ah_set_port_num(&attr.ah_attr, port + 1); - rdma_ah_set_sl(&attr.ah_attr, sl); - attr.port_num = port + 1; - - attr.dest_qp_num = hr_qp->qpn; - memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr), - hr_dev->dev_addr[port], - ETH_ALEN); - - memcpy(&dgid.raw, &subnet_prefix, sizeof(u64)); - memcpy(&dgid.raw[8], hr_dev->dev_addr[port], 3); - memcpy(&dgid.raw[13], hr_dev->dev_addr[port] + 3, 3); - dgid.raw[11] = 0xff; - dgid.raw[12] = 0xfe; - dgid.raw[8] ^= 2; - rdma_ah_set_dgid_raw(&attr.ah_attr, dgid.raw); - - ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask, - IB_QPS_RESET, IB_QPS_INIT); - if (ret) { - dev_err(dev, "modify qp failed(%d)!\n", ret); - goto create_lp_qp_failed; - } - - ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, IB_QP_DEST_QPN, - IB_QPS_INIT, IB_QPS_RTR); - if (ret) { - dev_err(dev, "modify qp failed(%d)!\n", ret); - goto create_lp_qp_failed; - } - - ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask, - IB_QPS_RTR, IB_QPS_RTS); - if (ret) { - dev_err(dev, "modify qp failed(%d)!\n", ret); - goto create_lp_qp_failed; - } - } - - return 0; - -create_lp_qp_failed: - for (i -= 1; i >= 0; i--) { - hr_qp = free_mr->mr_free_qp[i]; - if (ib_destroy_qp(&hr_qp->ibqp)) - dev_err(dev, "Destroy qp %d for mr free failed!\n", i); - } - - hns_roce_dealloc_pd(pd, NULL); - -alloc_pd_failed: - kfree(pd); - -alloc_mem_failed: - hns_roce_destroy_cq(cq, NULL); -alloc_cq_failed: - kfree(cq); - return ret; -} - -static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_free_mr *free_mr = &priv->free_mr; - struct device *dev = &hr_dev->pdev->dev; - struct hns_roce_qp *hr_qp; - int ret; - int i; - - for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) { - hr_qp = free_mr->mr_free_qp[i]; - if (!hr_qp) - continue; - - ret = ib_destroy_qp(&hr_qp->ibqp); - if (ret) - dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n", - i, ret); - } - - hns_roce_destroy_cq(&free_mr->mr_free_cq->ib_cq, NULL); - kfree(&free_mr->mr_free_cq->ib_cq); - hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd, NULL); - kfree(&free_mr->mr_free_pd->ibpd); -} - -static int hns_roce_db_init(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_db_table *db = &priv->db_table; - struct device *dev = &hr_dev->pdev->dev; - u32 sdb_ext_mod; - u32 odb_ext_mod; - u32 sdb_evt_mod; - u32 odb_evt_mod; - int ret; - - memset(db, 0, sizeof(*db)); - - /* Default DB mode */ - sdb_ext_mod = HNS_ROCE_SDB_EXTEND_MODE; - odb_ext_mod = HNS_ROCE_ODB_EXTEND_MODE; - sdb_evt_mod = HNS_ROCE_SDB_NORMAL_MODE; - odb_evt_mod = HNS_ROCE_ODB_POLL_MODE; - - db->sdb_ext_mod = sdb_ext_mod; - db->odb_ext_mod = odb_ext_mod; - - /* Init extend DB */ - ret = hns_roce_db_ext_init(hr_dev, sdb_ext_mod, odb_ext_mod); - if (ret) { - dev_err(dev, "Failed in extend DB configuration.\n"); - return ret; - } - - hns_roce_set_db_event_mode(hr_dev, sdb_evt_mod, odb_evt_mod); - - return 0; -} - -static void hns_roce_v1_recreate_lp_qp_work_fn(struct work_struct *work) -{ - struct hns_roce_recreate_lp_qp_work *lp_qp_work; - struct hns_roce_dev *hr_dev; - - lp_qp_work = container_of(work, struct hns_roce_recreate_lp_qp_work, - work); - hr_dev = to_hr_dev(lp_qp_work->ib_dev); - - hns_roce_v1_release_lp_qp(hr_dev); - - if (hns_roce_v1_rsv_lp_qp(hr_dev)) - dev_err(&hr_dev->pdev->dev, "create reserver qp failed\n"); - - if (lp_qp_work->comp_flag) - complete(lp_qp_work->comp); - - kfree(lp_qp_work); -} - -static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev) -{ - long end = HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS; - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_free_mr *free_mr = &priv->free_mr; - struct hns_roce_recreate_lp_qp_work *lp_qp_work; - struct device *dev = &hr_dev->pdev->dev; - struct completion comp; - - lp_qp_work = kzalloc(sizeof(struct hns_roce_recreate_lp_qp_work), - GFP_KERNEL); - if (!lp_qp_work) - return -ENOMEM; - - INIT_WORK(&(lp_qp_work->work), hns_roce_v1_recreate_lp_qp_work_fn); - - lp_qp_work->ib_dev = &(hr_dev->ib_dev); - lp_qp_work->comp = ∁ - lp_qp_work->comp_flag = 1; - - init_completion(lp_qp_work->comp); - - queue_work(free_mr->free_mr_wq, &(lp_qp_work->work)); - - while (end > 0) { - if (try_wait_for_completion(&comp)) - return 0; - msleep(HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE); - end -= HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE; - } - - lp_qp_work->comp_flag = 0; - if (try_wait_for_completion(&comp)) - return 0; - - dev_warn(dev, "recreate lp qp failed 20s timeout and return failed!\n"); - return -ETIMEDOUT; -} - -static int hns_roce_v1_send_lp_wqe(struct hns_roce_qp *hr_qp) -{ - struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device); - struct device *dev = &hr_dev->pdev->dev; - struct ib_send_wr send_wr; - const struct ib_send_wr *bad_wr; - int ret; - - memset(&send_wr, 0, sizeof(send_wr)); - send_wr.next = NULL; - send_wr.num_sge = 0; - send_wr.send_flags = 0; - send_wr.sg_list = NULL; - send_wr.wr_id = (unsigned long long)&send_wr; - send_wr.opcode = IB_WR_RDMA_WRITE; - - ret = hns_roce_v1_post_send(&hr_qp->ibqp, &send_wr, &bad_wr); - if (ret) { - dev_err(dev, "Post write wqe for mr free failed(%d)!", ret); - return ret; - } - - return 0; -} - -static void hns_roce_v1_mr_free_work_fn(struct work_struct *work) -{ - unsigned long end = - msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies; - struct hns_roce_mr_free_work *mr_work = - container_of(work, struct hns_roce_mr_free_work, work); - struct hns_roce_dev *hr_dev = to_hr_dev(mr_work->ib_dev); - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_free_mr *free_mr = &priv->free_mr; - struct hns_roce_cq *mr_free_cq = free_mr->mr_free_cq; - struct hns_roce_mr *hr_mr = mr_work->mr; - struct device *dev = &hr_dev->pdev->dev; - struct ib_wc wc[HNS_ROCE_V1_RESV_QP]; - struct hns_roce_qp *hr_qp; - int ne = 0; - int ret; - int i; - - for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) { - hr_qp = free_mr->mr_free_qp[i]; - if (!hr_qp) - continue; - ne++; - - ret = hns_roce_v1_send_lp_wqe(hr_qp); - if (ret) { - dev_err(dev, - "Send wqe (qp:0x%lx) for mr free failed(%d)!\n", - hr_qp->qpn, ret); - goto free_work; - } - } - - if (!ne) { - dev_err(dev, "Reserved loop qp is absent!\n"); - goto free_work; - } - - do { - ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc); - if (ret < 0 && hr_qp) { - dev_err(dev, - "(qp:0x%lx) starts, Poll cqe failed(%d) for mr 0x%x free! Remain %d cqe\n", - hr_qp->qpn, ret, hr_mr->key, ne); - goto free_work; - } - ne -= ret; - usleep_range(HNS_ROCE_V1_FREE_MR_WAIT_VALUE * 1000, - (1 + HNS_ROCE_V1_FREE_MR_WAIT_VALUE) * 1000); - } while (ne && time_before_eq(jiffies, end)); - - if (ne != 0) - dev_err(dev, - "Poll cqe for mr 0x%x free timeout! Remain %d cqe\n", - hr_mr->key, ne); - -free_work: - if (mr_work->comp_flag) - complete(mr_work->comp); - kfree(mr_work); -} - -static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev, - struct hns_roce_mr *mr, struct ib_udata *udata) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_free_mr *free_mr = &priv->free_mr; - long end = HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS; - struct device *dev = &hr_dev->pdev->dev; - struct hns_roce_mr_free_work *mr_work; - unsigned long start = jiffies; - struct completion comp; - int ret = 0; - - if (mr->enabled) { - if (hns_roce_hw_destroy_mpt(hr_dev, NULL, - key_to_hw_index(mr->key) & - (hr_dev->caps.num_mtpts - 1))) - dev_warn(dev, "DESTROY_MPT failed!\n"); - } - - mr_work = kzalloc(sizeof(*mr_work), GFP_KERNEL); - if (!mr_work) { - ret = -ENOMEM; - goto free_mr; - } - - INIT_WORK(&(mr_work->work), hns_roce_v1_mr_free_work_fn); - - mr_work->ib_dev = &(hr_dev->ib_dev); - mr_work->comp = ∁ - mr_work->comp_flag = 1; - mr_work->mr = (void *)mr; - init_completion(mr_work->comp); - - queue_work(free_mr->free_mr_wq, &(mr_work->work)); - - while (end > 0) { - if (try_wait_for_completion(&comp)) - goto free_mr; - msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE); - end -= HNS_ROCE_V1_FREE_MR_WAIT_VALUE; - } - - mr_work->comp_flag = 0; - if (try_wait_for_completion(&comp)) - goto free_mr; - - dev_warn(dev, "Free mr work 0x%x over 50s and failed!\n", mr->key); - ret = -ETIMEDOUT; - -free_mr: - dev_dbg(dev, "Free mr 0x%x use 0x%x us.\n", - mr->key, jiffies_to_usecs(jiffies) - jiffies_to_usecs(start)); - - ida_free(&hr_dev->mr_table.mtpt_ida.ida, (int)key_to_hw_index(mr->key)); - hns_roce_mtr_destroy(hr_dev, &mr->pbl_mtr); - kfree(mr); - - return ret; -} - -static void hns_roce_db_free(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_db_table *db = &priv->db_table; - struct device *dev = &hr_dev->pdev->dev; - - if (db->sdb_ext_mod) { - dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE, - db->ext_db->sdb_buf_list->buf, - db->ext_db->sdb_buf_list->map); - kfree(db->ext_db->sdb_buf_list); - } - - if (db->odb_ext_mod) { - dma_free_coherent(dev, HNS_ROCE_V1_EXT_ODB_SIZE, - db->ext_db->odb_buf_list->buf, - db->ext_db->odb_buf_list->map); - kfree(db->ext_db->odb_buf_list); - } - - kfree(db->ext_db); -} - -static int hns_roce_raq_init(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_raq_table *raq = &priv->raq_table; - struct device *dev = &hr_dev->pdev->dev; - dma_addr_t addr; - int raq_shift; - __le32 tmp; - u32 val; - int ret; - - raq->e_raq_buf = kzalloc(sizeof(*(raq->e_raq_buf)), GFP_KERNEL); - if (!raq->e_raq_buf) - return -ENOMEM; - - raq->e_raq_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_RAQ_SIZE, - &addr, GFP_KERNEL); - if (!raq->e_raq_buf->buf) { - ret = -ENOMEM; - goto err_dma_alloc_raq; - } - raq->e_raq_buf->map = addr; - - /* Configure raq extended address. 48bit 4K align */ - roce_write(hr_dev, ROCEE_EXT_RAQ_REG, raq->e_raq_buf->map >> 12); - - /* Configure raq_shift */ - raq_shift = ilog2(HNS_ROCE_V1_RAQ_SIZE / HNS_ROCE_V1_RAQ_ENTRY); - val = roce_read(hr_dev, ROCEE_EXT_RAQ_H_REG); - tmp = cpu_to_le32(val); - roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M, - ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S, raq_shift); - /* - * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of - * using 4K page, and shift more 32 because of - * calculating the high 32 bit value evaluated to hardware. - */ - roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M, - ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S, - raq->e_raq_buf->map >> 44); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_EXT_RAQ_H_REG, val); - dev_dbg(dev, "Configure raq_shift 0x%x.\n", val); - - /* Configure raq threshold */ - val = roce_read(hr_dev, ROCEE_RAQ_WL_REG); - tmp = cpu_to_le32(val); - roce_set_field(tmp, ROCEE_RAQ_WL_ROCEE_RAQ_WL_M, - ROCEE_RAQ_WL_ROCEE_RAQ_WL_S, - HNS_ROCE_V1_EXT_RAQ_WF); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_RAQ_WL_REG, val); - dev_dbg(dev, "Configure raq_wl 0x%x.\n", val); - - /* Enable extend raq */ - val = roce_read(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG); - tmp = cpu_to_le32(val); - roce_set_field(tmp, - ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M, - ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S, - POL_TIME_INTERVAL_VAL); - roce_set_bit(tmp, ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE, 1); - roce_set_field(tmp, - ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M, - ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S, - 2); - roce_set_bit(tmp, - ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S, 1); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG, val); - dev_dbg(dev, "Configure WrmsPolTimeInterval 0x%x.\n", val); - - /* Enable raq drop */ - val = roce_read(hr_dev, ROCEE_GLB_CFG_REG); - tmp = cpu_to_le32(val); - roce_set_bit(tmp, ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S, 1); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_GLB_CFG_REG, val); - dev_dbg(dev, "Configure GlbCfg = 0x%x.\n", val); - - return 0; - -err_dma_alloc_raq: - kfree(raq->e_raq_buf); - return ret; -} - -static void hns_roce_raq_free(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_raq_table *raq = &priv->raq_table; - struct device *dev = &hr_dev->pdev->dev; - - dma_free_coherent(dev, HNS_ROCE_V1_RAQ_SIZE, raq->e_raq_buf->buf, - raq->e_raq_buf->map); - kfree(raq->e_raq_buf); -} - -static void hns_roce_port_enable(struct hns_roce_dev *hr_dev, int enable_flag) -{ - __le32 tmp; - u32 val; - - if (enable_flag) { - val = roce_read(hr_dev, ROCEE_GLB_CFG_REG); - /* Open all ports */ - tmp = cpu_to_le32(val); - roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M, - ROCEE_GLB_CFG_ROCEE_PORT_ST_S, - ALL_PORT_VAL_OPEN); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_GLB_CFG_REG, val); - } else { - val = roce_read(hr_dev, ROCEE_GLB_CFG_REG); - /* Close all ports */ - tmp = cpu_to_le32(val); - roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M, - ROCEE_GLB_CFG_ROCEE_PORT_ST_S, 0x0); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_GLB_CFG_REG, val); - } -} - -static int hns_roce_bt_init(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct device *dev = &hr_dev->pdev->dev; - int ret; - - priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev, - HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map, - GFP_KERNEL); - if (!priv->bt_table.qpc_buf.buf) - return -ENOMEM; - - priv->bt_table.mtpt_buf.buf = dma_alloc_coherent(dev, - HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.mtpt_buf.map, - GFP_KERNEL); - if (!priv->bt_table.mtpt_buf.buf) { - ret = -ENOMEM; - goto err_failed_alloc_mtpt_buf; - } - - priv->bt_table.cqc_buf.buf = dma_alloc_coherent(dev, - HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.cqc_buf.map, - GFP_KERNEL); - if (!priv->bt_table.cqc_buf.buf) { - ret = -ENOMEM; - goto err_failed_alloc_cqc_buf; - } - - return 0; - -err_failed_alloc_cqc_buf: - dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, - priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map); - -err_failed_alloc_mtpt_buf: - dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, - priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map); - - return ret; -} - -static void hns_roce_bt_free(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct device *dev = &hr_dev->pdev->dev; - - dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, - priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map); - - dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, - priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map); - - dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, - priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map); -} - -static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf; - struct device *dev = &hr_dev->pdev->dev; - - /* - * This buffer will be used for CQ's tptr(tail pointer), also - * named ci(customer index). Every CQ will use 2 bytes to save - * cqe ci in hip06. Hardware will read this area to get new ci - * when the queue is almost full. - */ - tptr_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE, - &tptr_buf->map, GFP_KERNEL); - if (!tptr_buf->buf) - return -ENOMEM; - - hr_dev->tptr_dma_addr = tptr_buf->map; - hr_dev->tptr_size = HNS_ROCE_V1_TPTR_BUF_SIZE; - - return 0; -} - -static void hns_roce_tptr_free(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf; - struct device *dev = &hr_dev->pdev->dev; - - dma_free_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE, - tptr_buf->buf, tptr_buf->map); -} - -static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_free_mr *free_mr = &priv->free_mr; - struct device *dev = &hr_dev->pdev->dev; - int ret; - - free_mr->free_mr_wq = create_singlethread_workqueue("hns_roce_free_mr"); - if (!free_mr->free_mr_wq) { - dev_err(dev, "Create free mr workqueue failed!\n"); - return -ENOMEM; - } - - ret = hns_roce_v1_rsv_lp_qp(hr_dev); - if (ret) { - dev_err(dev, "Reserved loop qp failed(%d)!\n", ret); - destroy_workqueue(free_mr->free_mr_wq); - } - - return ret; -} - -static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_free_mr *free_mr = &priv->free_mr; - - destroy_workqueue(free_mr->free_mr_wq); - - hns_roce_v1_release_lp_qp(hr_dev); -} - -/** - * hns_roce_v1_reset - reset RoCE - * @hr_dev: RoCE device struct pointer - * @dereset: true -- drop reset, false -- reset - * return 0 - success , negative --fail - */ -static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset) -{ - struct device_node *dsaf_node; - struct device *dev = &hr_dev->pdev->dev; - struct device_node *np = dev->of_node; - struct fwnode_handle *fwnode; - int ret; - - /* check if this is DT/ACPI case */ - if (dev_of_node(dev)) { - dsaf_node = of_parse_phandle(np, "dsaf-handle", 0); - if (!dsaf_node) { - dev_err(dev, "could not find dsaf-handle\n"); - return -EINVAL; - } - fwnode = &dsaf_node->fwnode; - } else if (is_acpi_device_node(dev->fwnode)) { - struct fwnode_reference_args args; - - ret = acpi_node_get_property_reference(dev->fwnode, - "dsaf-handle", 0, &args); - if (ret) { - dev_err(dev, "could not find dsaf-handle\n"); - return ret; - } - fwnode = args.fwnode; - } else { - dev_err(dev, "cannot read data from DT or ACPI\n"); - return -ENXIO; - } - - ret = hns_dsaf_roce_reset(fwnode, false); - if (ret) - return ret; - - if (dereset) { - msleep(SLEEP_TIME_INTERVAL); - ret = hns_dsaf_roce_reset(fwnode, true); - } - - return ret; -} - -static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_caps *caps = &hr_dev->caps; - int i; - - hr_dev->vendor_id = roce_read(hr_dev, ROCEE_VENDOR_ID_REG); - hr_dev->vendor_part_id = roce_read(hr_dev, ROCEE_VENDOR_PART_ID_REG); - hr_dev->sys_image_guid = roce_read(hr_dev, ROCEE_SYS_IMAGE_GUID_L_REG) | - ((u64)roce_read(hr_dev, - ROCEE_SYS_IMAGE_GUID_H_REG) << 32); - hr_dev->hw_rev = HNS_ROCE_HW_VER1; - - caps->num_qps = HNS_ROCE_V1_MAX_QP_NUM; - caps->max_wqes = HNS_ROCE_V1_MAX_WQE_NUM; - caps->min_wqes = HNS_ROCE_MIN_WQE_NUM; - caps->num_cqs = HNS_ROCE_V1_MAX_CQ_NUM; - caps->min_cqes = HNS_ROCE_MIN_CQE_NUM; - caps->max_cqes = HNS_ROCE_V1_MAX_CQE_NUM; - caps->max_sq_sg = HNS_ROCE_V1_SG_NUM; - caps->max_rq_sg = HNS_ROCE_V1_SG_NUM; - caps->max_sq_inline = HNS_ROCE_V1_INLINE_SIZE; - caps->num_uars = HNS_ROCE_V1_UAR_NUM; - caps->phy_num_uars = HNS_ROCE_V1_PHY_UAR_NUM; - caps->num_aeq_vectors = HNS_ROCE_V1_AEQE_VEC_NUM; - caps->num_comp_vectors = HNS_ROCE_V1_COMP_VEC_NUM; - caps->num_other_vectors = HNS_ROCE_V1_ABNORMAL_VEC_NUM; - caps->num_mtpts = HNS_ROCE_V1_MAX_MTPT_NUM; - caps->num_mtt_segs = HNS_ROCE_V1_MAX_MTT_SEGS; - caps->num_pds = HNS_ROCE_V1_MAX_PD_NUM; - caps->max_qp_init_rdma = HNS_ROCE_V1_MAX_QP_INIT_RDMA; - caps->max_qp_dest_rdma = HNS_ROCE_V1_MAX_QP_DEST_RDMA; - caps->max_sq_desc_sz = HNS_ROCE_V1_MAX_SQ_DESC_SZ; - caps->max_rq_desc_sz = HNS_ROCE_V1_MAX_RQ_DESC_SZ; - caps->qpc_sz = HNS_ROCE_V1_QPC_SIZE; - caps->irrl_entry_sz = HNS_ROCE_V1_IRRL_ENTRY_SIZE; - caps->cqc_entry_sz = HNS_ROCE_V1_CQC_ENTRY_SIZE; - caps->mtpt_entry_sz = HNS_ROCE_V1_MTPT_ENTRY_SIZE; - caps->mtt_entry_sz = HNS_ROCE_V1_MTT_ENTRY_SIZE; - caps->cqe_sz = HNS_ROCE_V1_CQE_SIZE; - caps->page_size_cap = HNS_ROCE_V1_PAGE_SIZE_SUPPORT; - caps->reserved_lkey = 0; - caps->reserved_pds = 0; - caps->reserved_mrws = 1; - caps->reserved_uars = 0; - caps->reserved_cqs = 0; - caps->reserved_qps = 12; /* 2 SQP per port, six ports total 12 */ - caps->chunk_sz = HNS_ROCE_V1_TABLE_CHUNK_SIZE; - - for (i = 0; i < caps->num_ports; i++) - caps->pkey_table_len[i] = 1; - - for (i = 0; i < caps->num_ports; i++) { - /* Six ports shared 16 GID in v1 engine */ - if (i >= (HNS_ROCE_V1_GID_NUM % caps->num_ports)) - caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM / - caps->num_ports; - else - caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM / - caps->num_ports + 1; - } - - caps->ceqe_depth = HNS_ROCE_V1_COMP_EQE_NUM; - caps->aeqe_depth = HNS_ROCE_V1_ASYNC_EQE_NUM; - caps->local_ca_ack_delay = roce_read(hr_dev, ROCEE_ACK_DELAY_REG); - caps->max_mtu = IB_MTU_2048; - - return 0; -} - -static int hns_roce_v1_init(struct hns_roce_dev *hr_dev) -{ - int ret; - u32 val; - __le32 tmp; - struct device *dev = &hr_dev->pdev->dev; - - /* DMAE user config */ - val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG1_REG); - tmp = cpu_to_le32(val); - roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M, - ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S, 0xf); - roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M, - ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S, - 1 << PAGES_SHIFT_16); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_DMAE_USER_CFG1_REG, val); - - val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG2_REG); - tmp = cpu_to_le32(val); - roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M, - ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S, 0xf); - roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M, - ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S, - 1 << PAGES_SHIFT_16); - - ret = hns_roce_db_init(hr_dev); - if (ret) { - dev_err(dev, "doorbell init failed!\n"); - return ret; - } - - ret = hns_roce_raq_init(hr_dev); - if (ret) { - dev_err(dev, "raq init failed!\n"); - goto error_failed_raq_init; - } - - ret = hns_roce_bt_init(hr_dev); - if (ret) { - dev_err(dev, "bt init failed!\n"); - goto error_failed_bt_init; - } - - ret = hns_roce_tptr_init(hr_dev); - if (ret) { - dev_err(dev, "tptr init failed!\n"); - goto error_failed_tptr_init; - } - - ret = hns_roce_free_mr_init(hr_dev); - if (ret) { - dev_err(dev, "free mr init failed!\n"); - goto error_failed_free_mr_init; - } - - hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP); - - return 0; - -error_failed_free_mr_init: - hns_roce_tptr_free(hr_dev); - -error_failed_tptr_init: - hns_roce_bt_free(hr_dev); - -error_failed_bt_init: - hns_roce_raq_free(hr_dev); - -error_failed_raq_init: - hns_roce_db_free(hr_dev); - return ret; -} - -static void hns_roce_v1_exit(struct hns_roce_dev *hr_dev) -{ - hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN); - hns_roce_free_mr_free(hr_dev); - hns_roce_tptr_free(hr_dev); - hns_roce_bt_free(hr_dev); - hns_roce_raq_free(hr_dev); - hns_roce_db_free(hr_dev); -} - -static int hns_roce_v1_cmd_pending(struct hns_roce_dev *hr_dev) -{ - u32 status = readl(hr_dev->reg_base + ROCEE_MB6_REG); - - return (!!(status & (1 << HCR_GO_BIT))); -} - -static int hns_roce_v1_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param, - u64 out_param, u32 in_modifier, u8 op_modifier, - u16 op, u16 token, int event) -{ - u32 __iomem *hcr = (u32 __iomem *)(hr_dev->reg_base + ROCEE_MB1_REG); - unsigned long end; - u32 val = 0; - __le32 tmp; - - end = msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS) + jiffies; - while (hns_roce_v1_cmd_pending(hr_dev)) { - if (time_after(jiffies, end)) { - dev_err(hr_dev->dev, "jiffies=%d end=%d\n", - (int)jiffies, (int)end); - return -EAGAIN; - } - cond_resched(); - } - - tmp = cpu_to_le32(val); - roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S, - op); - roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_MDF_M, - ROCEE_MB6_ROCEE_MB_CMD_MDF_S, op_modifier); - roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_EVENT_S, event); - roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1); - roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_TOKEN_M, - ROCEE_MB6_ROCEE_MB_TOKEN_S, token); - - val = le32_to_cpu(tmp); - writeq(in_param, hcr + 0); - writeq(out_param, hcr + 2); - writel(in_modifier, hcr + 4); - /* Memory barrier */ - wmb(); - - writel(val, hcr + 5); - - return 0; -} - -static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev, - unsigned int timeout) -{ - u8 __iomem *hcr = hr_dev->reg_base + ROCEE_MB1_REG; - unsigned long end; - u32 status = 0; - - end = msecs_to_jiffies(timeout) + jiffies; - while (hns_roce_v1_cmd_pending(hr_dev) && time_before(jiffies, end)) - cond_resched(); - - if (hns_roce_v1_cmd_pending(hr_dev)) { - dev_err(hr_dev->dev, "[cmd_poll]hw run cmd TIMEDOUT!\n"); - return -ETIMEDOUT; - } - - status = le32_to_cpu((__force __le32) - __raw_readl(hcr + HCR_STATUS_OFFSET)); - if ((status & STATUS_MASK) != 0x1) { - dev_err(hr_dev->dev, "mailbox status 0x%x!\n", status); - return -EBUSY; - } - - return 0; -} - -static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u32 port, - int gid_index, const union ib_gid *gid, - const struct ib_gid_attr *attr) -{ - unsigned long flags; - u32 *p = NULL; - u8 gid_idx; - - gid_idx = hns_get_gid_index(hr_dev, port, gid_index); - - spin_lock_irqsave(&hr_dev->iboe.lock, flags); - - p = (u32 *)&gid->raw[0]; - roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_L_0_REG + - (HNS_ROCE_V1_GID_NUM * gid_idx)); - - p = (u32 *)&gid->raw[4]; - roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_ML_0_REG + - (HNS_ROCE_V1_GID_NUM * gid_idx)); - - p = (u32 *)&gid->raw[8]; - roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_MH_0_REG + - (HNS_ROCE_V1_GID_NUM * gid_idx)); - - p = (u32 *)&gid->raw[0xc]; - roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_H_0_REG + - (HNS_ROCE_V1_GID_NUM * gid_idx)); - - spin_unlock_irqrestore(&hr_dev->iboe.lock, flags); - - return 0; -} - -static int hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port, - const u8 *addr) -{ - u32 reg_smac_l; - u16 reg_smac_h; - __le32 tmp; - u16 *p_h; - u32 *p; - u32 val; - - /* - * When mac changed, loopback may fail - * because of smac not equal to dmac. - * We Need to release and create reserved qp again. - */ - if (hr_dev->hw->dereg_mr) { - int ret; - - ret = hns_roce_v1_recreate_lp_qp(hr_dev); - if (ret && ret != -ETIMEDOUT) - return ret; - } - - p = (u32 *)(&addr[0]); - reg_smac_l = *p; - roce_raw_write(reg_smac_l, hr_dev->reg_base + ROCEE_SMAC_L_0_REG + - PHY_PORT_OFFSET * phy_port); - - val = roce_read(hr_dev, - ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET); - tmp = cpu_to_le32(val); - p_h = (u16 *)(&addr[4]); - reg_smac_h = *p_h; - roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_SMAC_H_M, - ROCEE_SMAC_H_ROCEE_SMAC_H_S, reg_smac_h); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET, - val); - - return 0; -} - -static void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port, - enum ib_mtu mtu) -{ - __le32 tmp; - u32 val; - - val = roce_read(hr_dev, - ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET); - tmp = cpu_to_le32(val); - roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_PORT_MTU_M, - ROCEE_SMAC_H_ROCEE_PORT_MTU_S, mtu); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET, - val); -} - -static int hns_roce_v1_write_mtpt(struct hns_roce_dev *hr_dev, void *mb_buf, - struct hns_roce_mr *mr, - unsigned long mtpt_idx) -{ - u64 pages[HNS_ROCE_MAX_INNER_MTPT_NUM] = { 0 }; - struct ib_device *ibdev = &hr_dev->ib_dev; - struct hns_roce_v1_mpt_entry *mpt_entry; - dma_addr_t pbl_ba; - int count; - int i; - - /* MPT filled into mailbox buf */ - mpt_entry = (struct hns_roce_v1_mpt_entry *)mb_buf; - memset(mpt_entry, 0, sizeof(*mpt_entry)); - - roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_STATE_M, - MPT_BYTE_4_KEY_STATE_S, KEY_VALID); - roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_M, - MPT_BYTE_4_KEY_S, mr->key); - roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_PAGE_SIZE_M, - MPT_BYTE_4_PAGE_SIZE_S, MR_SIZE_4K); - roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_TYPE_S, 0); - roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_BIND_ENABLE_S, - (mr->access & IB_ACCESS_MW_BIND ? 1 : 0)); - roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_OWN_S, 0); - roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_MEMORY_LOCATION_TYPE_M, - MPT_BYTE_4_MEMORY_LOCATION_TYPE_S, mr->type); - roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_ATOMIC_S, 0); - roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_LOCAL_WRITE_S, - (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0)); - roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_WRITE_S, - (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0)); - roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_READ_S, - (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0)); - roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_INVAL_ENABLE_S, - 0); - roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_ADDRESS_TYPE_S, 0); - - roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M, - MPT_BYTE_12_PBL_ADDR_H_S, 0); - roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_MW_BIND_COUNTER_M, - MPT_BYTE_12_MW_BIND_COUNTER_S, 0); - - mpt_entry->virt_addr_l = cpu_to_le32((u32)mr->iova); - mpt_entry->virt_addr_h = cpu_to_le32((u32)(mr->iova >> 32)); - mpt_entry->length = cpu_to_le32((u32)mr->size); - - roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_PD_M, - MPT_BYTE_28_PD_S, mr->pd); - roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_L_KEY_IDX_L_M, - MPT_BYTE_28_L_KEY_IDX_L_S, mtpt_idx); - roce_set_field(mpt_entry->mpt_byte_64, MPT_BYTE_64_L_KEY_IDX_H_M, - MPT_BYTE_64_L_KEY_IDX_H_S, mtpt_idx >> MTPT_IDX_SHIFT); - - /* DMA memory register */ - if (mr->type == MR_TYPE_DMA) - return 0; - - count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages, - ARRAY_SIZE(pages), &pbl_ba); - if (count < 1) { - ibdev_err(ibdev, "failed to find PBL mtr, count = %d.", count); - return -ENOBUFS; - } - - /* Register user mr */ - for (i = 0; i < count; i++) { - switch (i) { - case 0: - mpt_entry->pa0_l = cpu_to_le32((u32)(pages[i])); - roce_set_field(mpt_entry->mpt_byte_36, - MPT_BYTE_36_PA0_H_M, - MPT_BYTE_36_PA0_H_S, - (u32)(pages[i] >> PAGES_SHIFT_32)); - break; - case 1: - roce_set_field(mpt_entry->mpt_byte_36, - MPT_BYTE_36_PA1_L_M, - MPT_BYTE_36_PA1_L_S, (u32)(pages[i])); - roce_set_field(mpt_entry->mpt_byte_40, - MPT_BYTE_40_PA1_H_M, - MPT_BYTE_40_PA1_H_S, - (u32)(pages[i] >> PAGES_SHIFT_24)); - break; - case 2: - roce_set_field(mpt_entry->mpt_byte_40, - MPT_BYTE_40_PA2_L_M, - MPT_BYTE_40_PA2_L_S, (u32)(pages[i])); - roce_set_field(mpt_entry->mpt_byte_44, - MPT_BYTE_44_PA2_H_M, - MPT_BYTE_44_PA2_H_S, - (u32)(pages[i] >> PAGES_SHIFT_16)); - break; - case 3: - roce_set_field(mpt_entry->mpt_byte_44, - MPT_BYTE_44_PA3_L_M, - MPT_BYTE_44_PA3_L_S, (u32)(pages[i])); - roce_set_field(mpt_entry->mpt_byte_48, - MPT_BYTE_48_PA3_H_M, - MPT_BYTE_48_PA3_H_S, - (u32)(pages[i] >> PAGES_SHIFT_8)); - break; - case 4: - mpt_entry->pa4_l = cpu_to_le32((u32)(pages[i])); - roce_set_field(mpt_entry->mpt_byte_56, - MPT_BYTE_56_PA4_H_M, - MPT_BYTE_56_PA4_H_S, - (u32)(pages[i] >> PAGES_SHIFT_32)); - break; - case 5: - roce_set_field(mpt_entry->mpt_byte_56, - MPT_BYTE_56_PA5_L_M, - MPT_BYTE_56_PA5_L_S, (u32)(pages[i])); - roce_set_field(mpt_entry->mpt_byte_60, - MPT_BYTE_60_PA5_H_M, - MPT_BYTE_60_PA5_H_S, - (u32)(pages[i] >> PAGES_SHIFT_24)); - break; - case 6: - roce_set_field(mpt_entry->mpt_byte_60, - MPT_BYTE_60_PA6_L_M, - MPT_BYTE_60_PA6_L_S, (u32)(pages[i])); - roce_set_field(mpt_entry->mpt_byte_64, - MPT_BYTE_64_PA6_H_M, - MPT_BYTE_64_PA6_H_S, - (u32)(pages[i] >> PAGES_SHIFT_16)); - break; - default: - break; - } - } - - mpt_entry->pbl_addr_l = cpu_to_le32(pbl_ba); - roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M, - MPT_BYTE_12_PBL_ADDR_H_S, upper_32_bits(pbl_ba)); - - return 0; -} - -static void *get_cqe(struct hns_roce_cq *hr_cq, int n) -{ - return hns_roce_buf_offset(hr_cq->mtr.kmem, n * HNS_ROCE_V1_CQE_SIZE); -} - -static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n) -{ - struct hns_roce_cqe *hr_cqe = get_cqe(hr_cq, n & hr_cq->ib_cq.cqe); - - /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */ - return (roce_get_bit(hr_cqe->cqe_byte_4, CQE_BYTE_4_OWNER_S) ^ - !!(n & hr_cq->cq_depth)) ? hr_cqe : NULL; -} - -static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq) -{ - return get_sw_cqe(hr_cq, hr_cq->cons_index); -} - -static void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index) -{ - __le32 doorbell[2]; - - doorbell[0] = cpu_to_le32(cons_index & ((hr_cq->cq_depth << 1) - 1)); - doorbell[1] = 0; - roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1); - roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M, - ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3); - roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M, - ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 0); - roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M, - ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S, hr_cq->cqn); - - hns_roce_write64_k(doorbell, hr_cq->db_reg); -} - -static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, - struct hns_roce_srq *srq) -{ - struct hns_roce_cqe *cqe, *dest; - u32 prod_index; - int nfreed = 0; - u8 owner_bit; - - for (prod_index = hr_cq->cons_index; get_sw_cqe(hr_cq, prod_index); - ++prod_index) { - if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe) - break; - } - - /* - * Now backwards through the CQ, removing CQ entries - * that match our QP by overwriting them with next entries. - */ - while ((int) --prod_index - (int) hr_cq->cons_index >= 0) { - cqe = get_cqe(hr_cq, prod_index & hr_cq->ib_cq.cqe); - if ((roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M, - CQE_BYTE_16_LOCAL_QPN_S) & - HNS_ROCE_CQE_QPN_MASK) == qpn) { - /* In v1 engine, not support SRQ */ - ++nfreed; - } else if (nfreed) { - dest = get_cqe(hr_cq, (prod_index + nfreed) & - hr_cq->ib_cq.cqe); - owner_bit = roce_get_bit(dest->cqe_byte_4, - CQE_BYTE_4_OWNER_S); - memcpy(dest, cqe, sizeof(*cqe)); - roce_set_bit(dest->cqe_byte_4, CQE_BYTE_4_OWNER_S, - owner_bit); - } - } - - if (nfreed) { - hr_cq->cons_index += nfreed; - hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index); - } -} - -static void hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, - struct hns_roce_srq *srq) -{ - spin_lock_irq(&hr_cq->lock); - __hns_roce_v1_cq_clean(hr_cq, qpn, srq); - spin_unlock_irq(&hr_cq->lock); -} - -static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev, - struct hns_roce_cq *hr_cq, void *mb_buf, - u64 *mtts, dma_addr_t dma_handle) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf; - struct hns_roce_cq_context *cq_context = mb_buf; - dma_addr_t tptr_dma_addr; - int offset; - - memset(cq_context, 0, sizeof(*cq_context)); - - /* Get the tptr for this CQ. */ - offset = hr_cq->cqn * HNS_ROCE_V1_TPTR_ENTRY_SIZE; - tptr_dma_addr = tptr_buf->map + offset; - hr_cq->tptr_addr = (u16 *)(tptr_buf->buf + offset); - - /* Register cq_context members */ - roce_set_field(cq_context->cqc_byte_4, - CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_M, - CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S, CQ_STATE_VALID); - roce_set_field(cq_context->cqc_byte_4, CQ_CONTEXT_CQC_BYTE_4_CQN_M, - CQ_CONTEXT_CQC_BYTE_4_CQN_S, hr_cq->cqn); - - cq_context->cq_bt_l = cpu_to_le32((u32)dma_handle); - - roce_set_field(cq_context->cqc_byte_12, - CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_M, - CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S, - ((u64)dma_handle >> 32)); - roce_set_field(cq_context->cqc_byte_12, - CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M, - CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S, - ilog2(hr_cq->cq_depth)); - roce_set_field(cq_context->cqc_byte_12, CQ_CONTEXT_CQC_BYTE_12_CEQN_M, - CQ_CONTEXT_CQC_BYTE_12_CEQN_S, hr_cq->vector); - - cq_context->cur_cqe_ba0_l = cpu_to_le32((u32)(mtts[0])); - - roce_set_field(cq_context->cqc_byte_20, - CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_M, - CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S, (mtts[0]) >> 32); - /* Dedicated hardware, directly set 0 */ - roce_set_field(cq_context->cqc_byte_20, - CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_M, - CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S, 0); - /** - * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of - * using 4K page, and shift more 32 because of - * calculating the high 32 bit value evaluated to hardware. - */ - roce_set_field(cq_context->cqc_byte_20, - CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M, - CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S, - tptr_dma_addr >> 44); - - cq_context->cqe_tptr_addr_l = cpu_to_le32((u32)(tptr_dma_addr >> 12)); - - roce_set_field(cq_context->cqc_byte_32, - CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M, - CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S, 0); - roce_set_bit(cq_context->cqc_byte_32, - CQ_CONTEXT_CQC_BYTE_32_SE_FLAG_S, 0); - roce_set_bit(cq_context->cqc_byte_32, - CQ_CONTEXT_CQC_BYTE_32_CE_FLAG_S, 0); - roce_set_bit(cq_context->cqc_byte_32, - CQ_CONTEXT_CQC_BYTE_32_NOTIFICATION_FLAG_S, 0); - roce_set_bit(cq_context->cqc_byte_32, - CQ_CQNTEXT_CQC_BYTE_32_TYPE_OF_COMPLETION_NOTIFICATION_S, - 0); - /* The initial value of cq's ci is 0 */ - roce_set_field(cq_context->cqc_byte_32, - CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M, - CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S, 0); -} - -static int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq, - enum ib_cq_notify_flags flags) -{ - struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); - u32 notification_flag; - __le32 doorbell[2] = {}; - - notification_flag = (flags & IB_CQ_SOLICITED_MASK) == - IB_CQ_SOLICITED ? CQ_DB_REQ_NOT : CQ_DB_REQ_NOT_SOL; - /* - * flags = 0; Notification Flag = 1, next - * flags = 1; Notification Flag = 0, solocited - */ - doorbell[0] = - cpu_to_le32(hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1)); - roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1); - roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M, - ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3); - roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M, - ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 1); - roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M, - ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S, - hr_cq->cqn | notification_flag); - - hns_roce_write64_k(doorbell, hr_cq->db_reg); - - return 0; -} - -static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq, - struct hns_roce_qp **cur_qp, struct ib_wc *wc) -{ - int qpn; - int is_send; - u16 wqe_ctr; - u32 status; - u32 opcode; - struct hns_roce_cqe *cqe; - struct hns_roce_qp *hr_qp; - struct hns_roce_wq *wq; - struct hns_roce_wqe_ctrl_seg *sq_wqe; - struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device); - struct device *dev = &hr_dev->pdev->dev; - - /* Find cqe according consumer index */ - cqe = next_cqe_sw(hr_cq); - if (!cqe) - return -EAGAIN; - - ++hr_cq->cons_index; - /* Memory barrier */ - rmb(); - /* 0->SQ, 1->RQ */ - is_send = !(roce_get_bit(cqe->cqe_byte_4, CQE_BYTE_4_SQ_RQ_FLAG_S)); - - /* Local_qpn in UD cqe is always 1, so it needs to compute new qpn */ - if (roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M, - CQE_BYTE_16_LOCAL_QPN_S) <= 1) { - qpn = roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_PORT_NUM_M, - CQE_BYTE_20_PORT_NUM_S) + - roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M, - CQE_BYTE_16_LOCAL_QPN_S) * - HNS_ROCE_MAX_PORTS; - } else { - qpn = roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M, - CQE_BYTE_16_LOCAL_QPN_S); - } - - if (!*cur_qp || (qpn & HNS_ROCE_CQE_QPN_MASK) != (*cur_qp)->qpn) { - hr_qp = __hns_roce_qp_lookup(hr_dev, qpn); - if (unlikely(!hr_qp)) { - dev_err(dev, "CQ %06lx with entry for unknown QPN %06x\n", - hr_cq->cqn, (qpn & HNS_ROCE_CQE_QPN_MASK)); - return -EINVAL; - } - - *cur_qp = hr_qp; - } - - wc->qp = &(*cur_qp)->ibqp; - wc->vendor_err = 0; - - status = roce_get_field(cqe->cqe_byte_4, - CQE_BYTE_4_STATUS_OF_THE_OPERATION_M, - CQE_BYTE_4_STATUS_OF_THE_OPERATION_S) & - HNS_ROCE_CQE_STATUS_MASK; - switch (status) { - case HNS_ROCE_CQE_SUCCESS: - wc->status = IB_WC_SUCCESS; - break; - case HNS_ROCE_CQE_SYNDROME_LOCAL_LENGTH_ERR: - wc->status = IB_WC_LOC_LEN_ERR; - break; - case HNS_ROCE_CQE_SYNDROME_LOCAL_QP_OP_ERR: - wc->status = IB_WC_LOC_QP_OP_ERR; - break; - case HNS_ROCE_CQE_SYNDROME_LOCAL_PROT_ERR: - wc->status = IB_WC_LOC_PROT_ERR; - break; - case HNS_ROCE_CQE_SYNDROME_WR_FLUSH_ERR: - wc->status = IB_WC_WR_FLUSH_ERR; - break; - case HNS_ROCE_CQE_SYNDROME_MEM_MANAGE_OPERATE_ERR: - wc->status = IB_WC_MW_BIND_ERR; - break; - case HNS_ROCE_CQE_SYNDROME_BAD_RESP_ERR: - wc->status = IB_WC_BAD_RESP_ERR; - break; - case HNS_ROCE_CQE_SYNDROME_LOCAL_ACCESS_ERR: - wc->status = IB_WC_LOC_ACCESS_ERR; - break; - case HNS_ROCE_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR: - wc->status = IB_WC_REM_INV_REQ_ERR; - break; - case HNS_ROCE_CQE_SYNDROME_REMOTE_ACCESS_ERR: - wc->status = IB_WC_REM_ACCESS_ERR; - break; - case HNS_ROCE_CQE_SYNDROME_REMOTE_OP_ERR: - wc->status = IB_WC_REM_OP_ERR; - break; - case HNS_ROCE_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR: - wc->status = IB_WC_RETRY_EXC_ERR; - break; - case HNS_ROCE_CQE_SYNDROME_RNR_RETRY_EXC_ERR: - wc->status = IB_WC_RNR_RETRY_EXC_ERR; - break; - default: - wc->status = IB_WC_GENERAL_ERR; - break; - } - - /* CQE status error, directly return */ - if (wc->status != IB_WC_SUCCESS) - return 0; - - if (is_send) { - /* SQ conrespond to CQE */ - sq_wqe = hns_roce_get_send_wqe(*cur_qp, - roce_get_field(cqe->cqe_byte_4, - CQE_BYTE_4_WQE_INDEX_M, - CQE_BYTE_4_WQE_INDEX_S) & - ((*cur_qp)->sq.wqe_cnt-1)); - switch (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_OPCODE_MASK) { - case HNS_ROCE_WQE_OPCODE_SEND: - wc->opcode = IB_WC_SEND; - break; - case HNS_ROCE_WQE_OPCODE_RDMA_READ: - wc->opcode = IB_WC_RDMA_READ; - wc->byte_len = le32_to_cpu(cqe->byte_cnt); - break; - case HNS_ROCE_WQE_OPCODE_RDMA_WRITE: - wc->opcode = IB_WC_RDMA_WRITE; - break; - case HNS_ROCE_WQE_OPCODE_LOCAL_INV: - wc->opcode = IB_WC_LOCAL_INV; - break; - case HNS_ROCE_WQE_OPCODE_UD_SEND: - wc->opcode = IB_WC_SEND; - break; - default: - wc->status = IB_WC_GENERAL_ERR; - break; - } - wc->wc_flags = (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_IMM ? - IB_WC_WITH_IMM : 0); - - wq = &(*cur_qp)->sq; - if ((*cur_qp)->sq_signal_bits) { - /* - * If sg_signal_bit is 1, - * firstly tail pointer updated to wqe - * which current cqe correspond to - */ - wqe_ctr = (u16)roce_get_field(cqe->cqe_byte_4, - CQE_BYTE_4_WQE_INDEX_M, - CQE_BYTE_4_WQE_INDEX_S); - wq->tail += (wqe_ctr - (u16)wq->tail) & - (wq->wqe_cnt - 1); - } - wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; - ++wq->tail; - } else { - /* RQ conrespond to CQE */ - wc->byte_len = le32_to_cpu(cqe->byte_cnt); - opcode = roce_get_field(cqe->cqe_byte_4, - CQE_BYTE_4_OPERATION_TYPE_M, - CQE_BYTE_4_OPERATION_TYPE_S) & - HNS_ROCE_CQE_OPCODE_MASK; - switch (opcode) { - case HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE: - wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; - wc->wc_flags = IB_WC_WITH_IMM; - wc->ex.imm_data = - cpu_to_be32(le32_to_cpu(cqe->immediate_data)); - break; - case HNS_ROCE_OPCODE_SEND_DATA_RECEIVE: - if (roce_get_bit(cqe->cqe_byte_4, - CQE_BYTE_4_IMM_INDICATOR_S)) { - wc->opcode = IB_WC_RECV; - wc->wc_flags = IB_WC_WITH_IMM; - wc->ex.imm_data = cpu_to_be32( - le32_to_cpu(cqe->immediate_data)); - } else { - wc->opcode = IB_WC_RECV; - wc->wc_flags = 0; - } - break; - default: - wc->status = IB_WC_GENERAL_ERR; - break; - } - - /* Update tail pointer, record wr_id */ - wq = &(*cur_qp)->rq; - wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; - ++wq->tail; - wc->sl = (u8)roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_SL_M, - CQE_BYTE_20_SL_S); - wc->src_qp = (u8)roce_get_field(cqe->cqe_byte_20, - CQE_BYTE_20_REMOTE_QPN_M, - CQE_BYTE_20_REMOTE_QPN_S); - wc->wc_flags |= (roce_get_bit(cqe->cqe_byte_20, - CQE_BYTE_20_GRH_PRESENT_S) ? - IB_WC_GRH : 0); - wc->pkey_index = (u16)roce_get_field(cqe->cqe_byte_28, - CQE_BYTE_28_P_KEY_IDX_M, - CQE_BYTE_28_P_KEY_IDX_S); - } - - return 0; -} - -int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) -{ - struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); - struct hns_roce_qp *cur_qp = NULL; - unsigned long flags; - int npolled; - int ret; - - spin_lock_irqsave(&hr_cq->lock, flags); - - for (npolled = 0; npolled < num_entries; ++npolled) { - ret = hns_roce_v1_poll_one(hr_cq, &cur_qp, wc + npolled); - if (ret) - break; - } - - if (npolled) { - *hr_cq->tptr_addr = hr_cq->cons_index & - ((hr_cq->cq_depth << 1) - 1); - - hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index); - } - - spin_unlock_irqrestore(&hr_cq->lock, flags); - - if (ret == 0 || ret == -EAGAIN) - return npolled; - else - return ret; -} - -static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev, - struct hns_roce_hem_table *table, int obj, - int step_idx) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct device *dev = &hr_dev->pdev->dev; - long end = HW_SYNC_TIMEOUT_MSECS; - __le32 bt_cmd_val[2] = {0}; - unsigned long flags = 0; - void __iomem *bt_cmd; - u64 bt_ba = 0; - - switch (table->type) { - case HEM_TYPE_QPC: - bt_ba = priv->bt_table.qpc_buf.map >> 12; - break; - case HEM_TYPE_MTPT: - bt_ba = priv->bt_table.mtpt_buf.map >> 12; - break; - case HEM_TYPE_CQC: - bt_ba = priv->bt_table.cqc_buf.map >> 12; - break; - case HEM_TYPE_SRQC: - dev_dbg(dev, "HEM_TYPE_SRQC not support.\n"); - return -EINVAL; - default: - return 0; - } - roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, table->type); - roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj); - roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0); - roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1); - - spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags); - - bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG; - - while (1) { - if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) { - if (!end) { - dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n"); - spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, - flags); - return -EBUSY; - } - } else { - break; - } - mdelay(HW_SYNC_SLEEP_TIME_INTERVAL); - end -= HW_SYNC_SLEEP_TIME_INTERVAL; - } - - bt_cmd_val[0] = cpu_to_le32(bt_ba); - roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32); - hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG); - - spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags); - - return 0; -} - -static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev, - enum hns_roce_qp_state cur_state, - enum hns_roce_qp_state new_state, - struct hns_roce_qp_context *context, - struct hns_roce_qp *hr_qp) -{ - static const u16 - op[HNS_ROCE_QP_NUM_STATE][HNS_ROCE_QP_NUM_STATE] = { - [HNS_ROCE_QP_STATE_RST] = { - [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP, - [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP, - [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP, - }, - [HNS_ROCE_QP_STATE_INIT] = { - [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP, - [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP, - /* Note: In v1 engine, HW doesn't support RST2INIT. - * We use RST2INIT cmd instead of INIT2INIT. - */ - [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP, - [HNS_ROCE_QP_STATE_RTR] = HNS_ROCE_CMD_INIT2RTR_QP, - }, - [HNS_ROCE_QP_STATE_RTR] = { - [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP, - [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP, - [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTR2RTS_QP, - }, - [HNS_ROCE_QP_STATE_RTS] = { - [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP, - [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP, - [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTS2RTS_QP, - [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_RTS2SQD_QP, - }, - [HNS_ROCE_QP_STATE_SQD] = { - [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP, - [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP, - [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_SQD2RTS_QP, - [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_SQD2SQD_QP, - }, - [HNS_ROCE_QP_STATE_ERR] = { - [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP, - [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP, - } - }; - - struct hns_roce_cmd_mailbox *mailbox; - struct device *dev = &hr_dev->pdev->dev; - int ret; - - if (cur_state >= HNS_ROCE_QP_NUM_STATE || - new_state >= HNS_ROCE_QP_NUM_STATE || - !op[cur_state][new_state]) { - dev_err(dev, "[modify_qp]not support state %d to %d\n", - cur_state, new_state); - return -EINVAL; - } - - if (op[cur_state][new_state] == HNS_ROCE_CMD_2RST_QP) - return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2, - HNS_ROCE_CMD_2RST_QP, - HNS_ROCE_CMD_TIMEOUT_MSECS); - - if (op[cur_state][new_state] == HNS_ROCE_CMD_2ERR_QP) - return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2, - HNS_ROCE_CMD_2ERR_QP, - HNS_ROCE_CMD_TIMEOUT_MSECS); - - mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); - if (IS_ERR(mailbox)) - return PTR_ERR(mailbox); - - memcpy(mailbox->buf, context, sizeof(*context)); - - ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0, - op[cur_state][new_state], - HNS_ROCE_CMD_TIMEOUT_MSECS); - - hns_roce_free_cmd_mailbox(hr_dev, mailbox); - return ret; -} - -static int find_wqe_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, - u64 *sq_ba, u64 *rq_ba, dma_addr_t *bt_ba) -{ - struct ib_device *ibdev = &hr_dev->ib_dev; - int count; - - count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, sq_ba, 1, bt_ba); - if (count < 1) { - ibdev_err(ibdev, "Failed to find SQ ba\n"); - return -ENOBUFS; - } - - count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, rq_ba, - 1, NULL); - if (!count) { - ibdev_err(ibdev, "Failed to find RQ ba\n"); - return -ENOBUFS; - } - - return 0; -} - -static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, - int attr_mask, enum ib_qp_state cur_state, - enum ib_qp_state new_state) -{ - struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); - struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); - struct hns_roce_sqp_context *context; - dma_addr_t dma_handle = 0; - u32 __iomem *addr; - u64 sq_ba = 0; - u64 rq_ba = 0; - __le32 tmp; - u32 reg_val; - - context = kzalloc(sizeof(*context), GFP_KERNEL); - if (!context) - return -ENOMEM; - - /* Search QP buf's MTTs */ - if (find_wqe_mtt(hr_dev, hr_qp, &sq_ba, &rq_ba, &dma_handle)) - goto out; - - if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { - roce_set_field(context->qp1c_bytes_4, - QP1C_BYTES_4_SQ_WQE_SHIFT_M, - QP1C_BYTES_4_SQ_WQE_SHIFT_S, - ilog2((unsigned int)hr_qp->sq.wqe_cnt)); - roce_set_field(context->qp1c_bytes_4, - QP1C_BYTES_4_RQ_WQE_SHIFT_M, - QP1C_BYTES_4_RQ_WQE_SHIFT_S, - ilog2((unsigned int)hr_qp->rq.wqe_cnt)); - roce_set_field(context->qp1c_bytes_4, QP1C_BYTES_4_PD_M, - QP1C_BYTES_4_PD_S, to_hr_pd(ibqp->pd)->pdn); - - context->sq_rq_bt_l = cpu_to_le32(dma_handle); - roce_set_field(context->qp1c_bytes_12, - QP1C_BYTES_12_SQ_RQ_BT_H_M, - QP1C_BYTES_12_SQ_RQ_BT_H_S, - upper_32_bits(dma_handle)); - - roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_HEAD_M, - QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head); - roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_PORT_NUM_M, - QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port); - roce_set_bit(context->qp1c_bytes_16, - QP1C_BYTES_16_SIGNALING_TYPE_S, - hr_qp->sq_signal_bits); - roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S, - 1); - roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S, - 1); - roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_QP1_ERR_S, - 0); - - roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_SQ_HEAD_M, - QP1C_BYTES_20_SQ_HEAD_S, hr_qp->sq.head); - roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_PKEY_IDX_M, - QP1C_BYTES_20_PKEY_IDX_S, attr->pkey_index); - - context->cur_rq_wqe_ba_l = cpu_to_le32(rq_ba); - - roce_set_field(context->qp1c_bytes_28, - QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M, - QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S, - upper_32_bits(rq_ba)); - roce_set_field(context->qp1c_bytes_28, - QP1C_BYTES_28_RQ_CUR_IDX_M, - QP1C_BYTES_28_RQ_CUR_IDX_S, 0); - - roce_set_field(context->qp1c_bytes_32, - QP1C_BYTES_32_RX_CQ_NUM_M, - QP1C_BYTES_32_RX_CQ_NUM_S, - to_hr_cq(ibqp->recv_cq)->cqn); - roce_set_field(context->qp1c_bytes_32, - QP1C_BYTES_32_TX_CQ_NUM_M, - QP1C_BYTES_32_TX_CQ_NUM_S, - to_hr_cq(ibqp->send_cq)->cqn); - - context->cur_sq_wqe_ba_l = cpu_to_le32(sq_ba); - - roce_set_field(context->qp1c_bytes_40, - QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M, - QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S, - upper_32_bits(sq_ba)); - roce_set_field(context->qp1c_bytes_40, - QP1C_BYTES_40_SQ_CUR_IDX_M, - QP1C_BYTES_40_SQ_CUR_IDX_S, 0); - - /* Copy context to QP1C register */ - addr = (u32 __iomem *)(hr_dev->reg_base + - ROCEE_QP1C_CFG0_0_REG + - hr_qp->phy_port * sizeof(*context)); - - writel(le32_to_cpu(context->qp1c_bytes_4), addr); - writel(le32_to_cpu(context->sq_rq_bt_l), addr + 1); - writel(le32_to_cpu(context->qp1c_bytes_12), addr + 2); - writel(le32_to_cpu(context->qp1c_bytes_16), addr + 3); - writel(le32_to_cpu(context->qp1c_bytes_20), addr + 4); - writel(le32_to_cpu(context->cur_rq_wqe_ba_l), addr + 5); - writel(le32_to_cpu(context->qp1c_bytes_28), addr + 6); - writel(le32_to_cpu(context->qp1c_bytes_32), addr + 7); - writel(le32_to_cpu(context->cur_sq_wqe_ba_l), addr + 8); - writel(le32_to_cpu(context->qp1c_bytes_40), addr + 9); - } - - /* Modify QP1C status */ - reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG0_0_REG + - hr_qp->phy_port * sizeof(*context)); - tmp = cpu_to_le32(reg_val); - roce_set_field(tmp, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M, - ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S, new_state); - reg_val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_QP1C_CFG0_0_REG + - hr_qp->phy_port * sizeof(*context), reg_val); - - hr_qp->state = new_state; - if (new_state == IB_QPS_RESET) { - hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn, - ibqp->srq ? to_hr_srq(ibqp->srq) : NULL); - if (ibqp->send_cq != ibqp->recv_cq) - hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq), - hr_qp->qpn, NULL); - - hr_qp->rq.head = 0; - hr_qp->rq.tail = 0; - hr_qp->sq.head = 0; - hr_qp->sq.tail = 0; - } - - kfree(context); - return 0; - -out: - kfree(context); - return -EINVAL; -} - -static bool check_qp_state(enum ib_qp_state cur_state, - enum ib_qp_state new_state) -{ - static const bool sm[][IB_QPS_ERR + 1] = { - [IB_QPS_RESET] = { [IB_QPS_RESET] = true, - [IB_QPS_INIT] = true }, - [IB_QPS_INIT] = { [IB_QPS_RESET] = true, - [IB_QPS_INIT] = true, - [IB_QPS_RTR] = true, - [IB_QPS_ERR] = true }, - [IB_QPS_RTR] = { [IB_QPS_RESET] = true, - [IB_QPS_RTS] = true, - [IB_QPS_ERR] = true }, - [IB_QPS_RTS] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true }, - [IB_QPS_SQD] = {}, - [IB_QPS_SQE] = {}, - [IB_QPS_ERR] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true } - }; - - return sm[cur_state][new_state]; -} - -static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, - int attr_mask, enum ib_qp_state cur_state, - enum ib_qp_state new_state) -{ - struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); - struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); - struct device *dev = &hr_dev->pdev->dev; - struct hns_roce_qp_context *context; - const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr); - dma_addr_t dma_handle_2 = 0; - dma_addr_t dma_handle = 0; - __le32 doorbell[2] = {0}; - u64 *mtts_2 = NULL; - int ret = -EINVAL; - const u8 *smac; - u64 sq_ba = 0; - u64 rq_ba = 0; - u32 port; - u32 port_num; - u8 *dmac; - - if (!check_qp_state(cur_state, new_state)) { - ibdev_err(ibqp->device, - "not support QP(%u) status from %d to %d\n", - ibqp->qp_num, cur_state, new_state); - return -EINVAL; - } - - context = kzalloc(sizeof(*context), GFP_KERNEL); - if (!context) - return -ENOMEM; - - /* Search qp buf's mtts */ - if (find_wqe_mtt(hr_dev, hr_qp, &sq_ba, &rq_ba, &dma_handle)) - goto out; - - /* Search IRRL's mtts */ - mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table, - hr_qp->qpn, &dma_handle_2); - if (mtts_2 == NULL) { - dev_err(dev, "qp irrl_table find failed\n"); - goto out; - } - - /* - * Reset to init - * Mandatory param: - * IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS - * Optional param: NA - */ - if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { - roce_set_field(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M, - QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S, - to_hr_qp_type(hr_qp->ibqp.qp_type)); - - roce_set_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0); - roce_set_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S, - !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ)); - roce_set_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S, - !!(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE) - ); - roce_set_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S, - !!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) - ); - roce_set_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1); - roce_set_field(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M, - QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S, - ilog2((unsigned int)hr_qp->sq.wqe_cnt)); - roce_set_field(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M, - QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S, - ilog2((unsigned int)hr_qp->rq.wqe_cnt)); - roce_set_field(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTES_4_PD_M, - QP_CONTEXT_QPC_BYTES_4_PD_S, - to_hr_pd(ibqp->pd)->pdn); - hr_qp->access_flags = attr->qp_access_flags; - roce_set_field(context->qpc_bytes_8, - QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M, - QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S, - to_hr_cq(ibqp->send_cq)->cqn); - roce_set_field(context->qpc_bytes_8, - QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M, - QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S, - to_hr_cq(ibqp->recv_cq)->cqn); - - if (ibqp->srq) - roce_set_field(context->qpc_bytes_12, - QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M, - QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S, - to_hr_srq(ibqp->srq)->srqn); - - roce_set_field(context->qpc_bytes_12, - QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M, - QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S, - attr->pkey_index); - hr_qp->pkey_index = attr->pkey_index; - roce_set_field(context->qpc_bytes_16, - QP_CONTEXT_QPC_BYTES_16_QP_NUM_M, - QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn); - } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) { - roce_set_field(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M, - QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S, - to_hr_qp_type(hr_qp->ibqp.qp_type)); - roce_set_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0); - if (attr_mask & IB_QP_ACCESS_FLAGS) { - roce_set_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S, - !!(attr->qp_access_flags & - IB_ACCESS_REMOTE_READ)); - roce_set_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S, - !!(attr->qp_access_flags & - IB_ACCESS_REMOTE_WRITE)); - } else { - roce_set_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S, - !!(hr_qp->access_flags & - IB_ACCESS_REMOTE_READ)); - roce_set_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S, - !!(hr_qp->access_flags & - IB_ACCESS_REMOTE_WRITE)); - } - - roce_set_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1); - roce_set_field(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M, - QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S, - ilog2((unsigned int)hr_qp->sq.wqe_cnt)); - roce_set_field(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M, - QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S, - ilog2((unsigned int)hr_qp->rq.wqe_cnt)); - roce_set_field(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTES_4_PD_M, - QP_CONTEXT_QPC_BYTES_4_PD_S, - to_hr_pd(ibqp->pd)->pdn); - - roce_set_field(context->qpc_bytes_8, - QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M, - QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S, - to_hr_cq(ibqp->send_cq)->cqn); - roce_set_field(context->qpc_bytes_8, - QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M, - QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S, - to_hr_cq(ibqp->recv_cq)->cqn); - - if (ibqp->srq) - roce_set_field(context->qpc_bytes_12, - QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M, - QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S, - to_hr_srq(ibqp->srq)->srqn); - if (attr_mask & IB_QP_PKEY_INDEX) - roce_set_field(context->qpc_bytes_12, - QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M, - QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S, - attr->pkey_index); - else - roce_set_field(context->qpc_bytes_12, - QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M, - QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S, - hr_qp->pkey_index); - - roce_set_field(context->qpc_bytes_16, - QP_CONTEXT_QPC_BYTES_16_QP_NUM_M, - QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn); - } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { - if ((attr_mask & IB_QP_ALT_PATH) || - (attr_mask & IB_QP_ACCESS_FLAGS) || - (attr_mask & IB_QP_PKEY_INDEX) || - (attr_mask & IB_QP_QKEY)) { - dev_err(dev, "INIT2RTR attr_mask error\n"); - goto out; - } - - dmac = (u8 *)attr->ah_attr.roce.dmac; - - context->sq_rq_bt_l = cpu_to_le32(dma_handle); - roce_set_field(context->qpc_bytes_24, - QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M, - QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S, - upper_32_bits(dma_handle)); - roce_set_bit(context->qpc_bytes_24, - QP_CONTEXT_QPC_BYTE_24_REMOTE_ENABLE_E2E_CREDITS_S, - 1); - roce_set_field(context->qpc_bytes_24, - QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M, - QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S, - attr->min_rnr_timer); - context->irrl_ba_l = cpu_to_le32((u32)(dma_handle_2)); - roce_set_field(context->qpc_bytes_32, - QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M, - QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S, - ((u32)(dma_handle_2 >> 32)) & - QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M); - roce_set_field(context->qpc_bytes_32, - QP_CONTEXT_QPC_BYTES_32_MIG_STATE_M, - QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S, 0); - roce_set_bit(context->qpc_bytes_32, - QP_CONTEXT_QPC_BYTE_32_LOCAL_ENABLE_E2E_CREDITS_S, - 1); - roce_set_bit(context->qpc_bytes_32, - QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S, - hr_qp->sq_signal_bits); - - port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : - hr_qp->port; - smac = (const u8 *)hr_dev->dev_addr[port]; - /* when dmac equals smac or loop_idc is 1, it should loopback */ - if (ether_addr_equal_unaligned(dmac, smac) || - hr_dev->loop_idc == 0x1) - roce_set_bit(context->qpc_bytes_32, - QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S, 1); - - roce_set_bit(context->qpc_bytes_32, - QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S, - rdma_ah_get_ah_flags(&attr->ah_attr)); - roce_set_field(context->qpc_bytes_32, - QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M, - QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S, - ilog2((unsigned int)attr->max_dest_rd_atomic)); - - if (attr_mask & IB_QP_DEST_QPN) - roce_set_field(context->qpc_bytes_36, - QP_CONTEXT_QPC_BYTES_36_DEST_QP_M, - QP_CONTEXT_QPC_BYTES_36_DEST_QP_S, - attr->dest_qp_num); - - /* Configure GID index */ - port_num = rdma_ah_get_port_num(&attr->ah_attr); - roce_set_field(context->qpc_bytes_36, - QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M, - QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S, - hns_get_gid_index(hr_dev, - port_num - 1, - grh->sgid_index)); - - memcpy(&(context->dmac_l), dmac, 4); - - roce_set_field(context->qpc_bytes_44, - QP_CONTEXT_QPC_BYTES_44_DMAC_H_M, - QP_CONTEXT_QPC_BYTES_44_DMAC_H_S, - *((u16 *)(&dmac[4]))); - roce_set_field(context->qpc_bytes_44, - QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_M, - QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S, - rdma_ah_get_static_rate(&attr->ah_attr)); - roce_set_field(context->qpc_bytes_44, - QP_CONTEXT_QPC_BYTES_44_HOPLMT_M, - QP_CONTEXT_QPC_BYTES_44_HOPLMT_S, - grh->hop_limit); - - roce_set_field(context->qpc_bytes_48, - QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M, - QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S, - grh->flow_label); - roce_set_field(context->qpc_bytes_48, - QP_CONTEXT_QPC_BYTES_48_TCLASS_M, - QP_CONTEXT_QPC_BYTES_48_TCLASS_S, - grh->traffic_class); - roce_set_field(context->qpc_bytes_48, - QP_CONTEXT_QPC_BYTES_48_MTU_M, - QP_CONTEXT_QPC_BYTES_48_MTU_S, attr->path_mtu); - - memcpy(context->dgid, grh->dgid.raw, - sizeof(grh->dgid.raw)); - - dev_dbg(dev, "dmac:%x :%lx\n", context->dmac_l, - roce_get_field(context->qpc_bytes_44, - QP_CONTEXT_QPC_BYTES_44_DMAC_H_M, - QP_CONTEXT_QPC_BYTES_44_DMAC_H_S)); - - roce_set_field(context->qpc_bytes_68, - QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M, - QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S, - hr_qp->rq.head); - roce_set_field(context->qpc_bytes_68, - QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M, - QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0); - - context->cur_rq_wqe_ba_l = cpu_to_le32(rq_ba); - - roce_set_field(context->qpc_bytes_76, - QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M, - QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S, - upper_32_bits(rq_ba)); - roce_set_field(context->qpc_bytes_76, - QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_M, - QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S, 0); - - context->rx_rnr_time = 0; - - roce_set_field(context->qpc_bytes_84, - QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_M, - QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S, - attr->rq_psn - 1); - roce_set_field(context->qpc_bytes_84, - QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_M, - QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S, 0); - - roce_set_field(context->qpc_bytes_88, - QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M, - QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S, - attr->rq_psn); - roce_set_bit(context->qpc_bytes_88, - QP_CONTEXT_QPC_BYTES_88_RX_REQ_PSN_ERR_FLAG_S, 0); - roce_set_bit(context->qpc_bytes_88, - QP_CONTEXT_QPC_BYTES_88_RX_LAST_OPCODE_FLG_S, 0); - roce_set_field(context->qpc_bytes_88, - QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_M, - QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S, - 0); - roce_set_field(context->qpc_bytes_88, - QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_M, - QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S, - 0); - - context->dma_length = 0; - context->r_key = 0; - context->va_l = 0; - context->va_h = 0; - - roce_set_field(context->qpc_bytes_108, - QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_M, - QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S, 0); - roce_set_bit(context->qpc_bytes_108, - QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_FLG_S, 0); - roce_set_bit(context->qpc_bytes_108, - QP_CONTEXT_QPC_BYTES_108_TRRL_TDB_PSN_FLG_S, 0); - - roce_set_field(context->qpc_bytes_112, - QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_M, - QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S, 0); - roce_set_field(context->qpc_bytes_112, - QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_M, - QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S, 0); - - /* For chip resp ack */ - roce_set_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M, - QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S, - hr_qp->phy_port); - roce_set_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_SL_M, - QP_CONTEXT_QPC_BYTES_156_SL_S, - rdma_ah_get_sl(&attr->ah_attr)); - hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr); - } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) { - /* If exist optional param, return error */ - if ((attr_mask & IB_QP_ALT_PATH) || - (attr_mask & IB_QP_ACCESS_FLAGS) || - (attr_mask & IB_QP_QKEY) || - (attr_mask & IB_QP_PATH_MIG_STATE) || - (attr_mask & IB_QP_CUR_STATE) || - (attr_mask & IB_QP_MIN_RNR_TIMER)) { - dev_err(dev, "RTR2RTS attr_mask error\n"); - goto out; - } - - context->rx_cur_sq_wqe_ba_l = cpu_to_le32(sq_ba); - - roce_set_field(context->qpc_bytes_120, - QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M, - QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S, - upper_32_bits(sq_ba)); - - roce_set_field(context->qpc_bytes_124, - QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_M, - QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S, 0); - roce_set_field(context->qpc_bytes_124, - QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_M, - QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S, 0); - - roce_set_field(context->qpc_bytes_128, - QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_M, - QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S, - attr->sq_psn); - roce_set_bit(context->qpc_bytes_128, - QP_CONTEXT_QPC_BYTES_128_RX_ACK_PSN_ERR_FLG_S, 0); - roce_set_field(context->qpc_bytes_128, - QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_M, - QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S, - 0); - roce_set_bit(context->qpc_bytes_128, - QP_CONTEXT_QPC_BYTES_128_IRRL_PSN_VLD_FLG_S, 0); - - roce_set_field(context->qpc_bytes_132, - QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_M, - QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S, 0); - roce_set_field(context->qpc_bytes_132, - QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_M, - QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S, 0); - - roce_set_field(context->qpc_bytes_136, - QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_M, - QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S, - attr->sq_psn); - roce_set_field(context->qpc_bytes_136, - QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_M, - QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S, - attr->sq_psn); - - roce_set_field(context->qpc_bytes_140, - QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_M, - QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S, - (attr->sq_psn >> SQ_PSN_SHIFT)); - roce_set_field(context->qpc_bytes_140, - QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_M, - QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S, 0); - roce_set_bit(context->qpc_bytes_140, - QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S, 0); - - roce_set_field(context->qpc_bytes_148, - QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M, - QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S, 0); - roce_set_field(context->qpc_bytes_148, - QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M, - QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S, - attr->retry_cnt); - roce_set_field(context->qpc_bytes_148, - QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M, - QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S, - attr->rnr_retry); - roce_set_field(context->qpc_bytes_148, - QP_CONTEXT_QPC_BYTES_148_LSN_M, - QP_CONTEXT_QPC_BYTES_148_LSN_S, 0x100); - - context->rnr_retry = 0; - - roce_set_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M, - QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S, - attr->retry_cnt); - if (attr->timeout < 0x12) { - dev_info(dev, "ack timeout value(0x%x) must bigger than 0x12.\n", - attr->timeout); - roce_set_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M, - QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S, - 0x12); - } else { - roce_set_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M, - QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S, - attr->timeout); - } - roce_set_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M, - QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S, - attr->rnr_retry); - roce_set_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M, - QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S, - hr_qp->phy_port); - roce_set_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_SL_M, - QP_CONTEXT_QPC_BYTES_156_SL_S, - rdma_ah_get_sl(&attr->ah_attr)); - hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr); - roce_set_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M, - QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S, - ilog2((unsigned int)attr->max_rd_atomic)); - roce_set_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_M, - QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S, 0); - context->pkt_use_len = 0; - - roce_set_field(context->qpc_bytes_164, - QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M, - QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S, attr->sq_psn); - roce_set_field(context->qpc_bytes_164, - QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_M, - QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S, 0); - - roce_set_field(context->qpc_bytes_168, - QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_M, - QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S, - attr->sq_psn); - roce_set_field(context->qpc_bytes_168, - QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_M, - QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S, 0); - roce_set_field(context->qpc_bytes_168, - QP_CONTEXT_QPC_BYTES_168_DB_TYPE_M, - QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S, 0); - roce_set_bit(context->qpc_bytes_168, - QP_CONTEXT_QPC_BYTES_168_MSG_LP_IND_S, 0); - roce_set_bit(context->qpc_bytes_168, - QP_CONTEXT_QPC_BYTES_168_CSDB_LP_IND_S, 0); - roce_set_bit(context->qpc_bytes_168, - QP_CONTEXT_QPC_BYTES_168_QP_ERR_FLG_S, 0); - context->sge_use_len = 0; - - roce_set_field(context->qpc_bytes_176, - QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_M, - QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S, 0); - roce_set_field(context->qpc_bytes_176, - QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_M, - QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S, - 0); - roce_set_field(context->qpc_bytes_180, - QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_M, - QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S, 0); - roce_set_field(context->qpc_bytes_180, - QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M, - QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S, 0); - - context->tx_cur_sq_wqe_ba_l = cpu_to_le32(sq_ba); - - roce_set_field(context->qpc_bytes_188, - QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M, - QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S, - upper_32_bits(sq_ba)); - roce_set_bit(context->qpc_bytes_188, - QP_CONTEXT_QPC_BYTES_188_PKT_RETRY_FLG_S, 0); - roce_set_field(context->qpc_bytes_188, - QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M, - QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S, - 0); - } - - /* Every status migrate must change state */ - roce_set_field(context->qpc_bytes_144, - QP_CONTEXT_QPC_BYTES_144_QP_STATE_M, - QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, new_state); - - /* SW pass context to HW */ - ret = hns_roce_v1_qp_modify(hr_dev, to_hns_roce_state(cur_state), - to_hns_roce_state(new_state), context, - hr_qp); - if (ret) { - dev_err(dev, "hns_roce_qp_modify failed\n"); - goto out; - } - - /* - * Use rst2init to instead of init2init with drv, - * need to hw to flash RQ HEAD by DB again - */ - if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) { - roce_set_field(doorbell[0], RQ_DOORBELL_U32_4_RQ_HEAD_M, - RQ_DOORBELL_U32_4_RQ_HEAD_S, hr_qp->rq.head); - roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_QPN_M, - RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn); - roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_CMD_M, - RQ_DOORBELL_U32_8_CMD_S, 1); - roce_set_bit(doorbell[1], RQ_DOORBELL_U32_8_HW_SYNC_S, 1); - - if (ibqp->uobject) { - hr_qp->rq.db_reg = hr_dev->reg_base + - hr_dev->odb_offset + - DB_REG_OFFSET * hr_dev->priv_uar.index; - } - - hns_roce_write64_k(doorbell, hr_qp->rq.db_reg); - } - - hr_qp->state = new_state; - - if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) - hr_qp->resp_depth = attr->max_dest_rd_atomic; - if (attr_mask & IB_QP_PORT) { - hr_qp->port = attr->port_num - 1; - hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; - } - - if (new_state == IB_QPS_RESET && !ibqp->uobject) { - hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn, - ibqp->srq ? to_hr_srq(ibqp->srq) : NULL); - if (ibqp->send_cq != ibqp->recv_cq) - hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq), - hr_qp->qpn, NULL); - - hr_qp->rq.head = 0; - hr_qp->rq.tail = 0; - hr_qp->sq.head = 0; - hr_qp->sq.tail = 0; - } -out: - kfree(context); - return ret; -} - -static int hns_roce_v1_modify_qp(struct ib_qp *ibqp, - const struct ib_qp_attr *attr, int attr_mask, - enum ib_qp_state cur_state, - enum ib_qp_state new_state) -{ - if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) - return -EOPNOTSUPP; - - if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) - return hns_roce_v1_m_sqp(ibqp, attr, attr_mask, cur_state, - new_state); - else - return hns_roce_v1_m_qp(ibqp, attr, attr_mask, cur_state, - new_state); -} - -static enum ib_qp_state to_ib_qp_state(enum hns_roce_qp_state state) -{ - switch (state) { - case HNS_ROCE_QP_STATE_RST: - return IB_QPS_RESET; - case HNS_ROCE_QP_STATE_INIT: - return IB_QPS_INIT; - case HNS_ROCE_QP_STATE_RTR: - return IB_QPS_RTR; - case HNS_ROCE_QP_STATE_RTS: - return IB_QPS_RTS; - case HNS_ROCE_QP_STATE_SQD: - return IB_QPS_SQD; - case HNS_ROCE_QP_STATE_ERR: - return IB_QPS_ERR; - default: - return IB_QPS_ERR; - } -} - -static int hns_roce_v1_query_qpc(struct hns_roce_dev *hr_dev, - struct hns_roce_qp *hr_qp, - struct hns_roce_qp_context *hr_context) -{ - struct hns_roce_cmd_mailbox *mailbox; - int ret; - - mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); - if (IS_ERR(mailbox)) - return PTR_ERR(mailbox); - - ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0, - HNS_ROCE_CMD_QUERY_QP, - HNS_ROCE_CMD_TIMEOUT_MSECS); - if (!ret) - memcpy(hr_context, mailbox->buf, sizeof(*hr_context)); - else - dev_err(&hr_dev->pdev->dev, "QUERY QP cmd process error\n"); - - hns_roce_free_cmd_mailbox(hr_dev, mailbox); - - return ret; -} - -static int hns_roce_v1_q_sqp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, - int qp_attr_mask, - struct ib_qp_init_attr *qp_init_attr) -{ - struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); - struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); - struct hns_roce_sqp_context context; - u32 addr; - - mutex_lock(&hr_qp->mutex); - - if (hr_qp->state == IB_QPS_RESET) { - qp_attr->qp_state = IB_QPS_RESET; - goto done; - } - - addr = ROCEE_QP1C_CFG0_0_REG + - hr_qp->port * sizeof(struct hns_roce_sqp_context); - context.qp1c_bytes_4 = cpu_to_le32(roce_read(hr_dev, addr)); - context.sq_rq_bt_l = cpu_to_le32(roce_read(hr_dev, addr + 1)); - context.qp1c_bytes_12 = cpu_to_le32(roce_read(hr_dev, addr + 2)); - context.qp1c_bytes_16 = cpu_to_le32(roce_read(hr_dev, addr + 3)); - context.qp1c_bytes_20 = cpu_to_le32(roce_read(hr_dev, addr + 4)); - context.cur_rq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 5)); - context.qp1c_bytes_28 = cpu_to_le32(roce_read(hr_dev, addr + 6)); - context.qp1c_bytes_32 = cpu_to_le32(roce_read(hr_dev, addr + 7)); - context.cur_sq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 8)); - context.qp1c_bytes_40 = cpu_to_le32(roce_read(hr_dev, addr + 9)); - - hr_qp->state = roce_get_field(context.qp1c_bytes_4, - QP1C_BYTES_4_QP_STATE_M, - QP1C_BYTES_4_QP_STATE_S); - qp_attr->qp_state = hr_qp->state; - qp_attr->path_mtu = IB_MTU_256; - qp_attr->path_mig_state = IB_MIG_ARMED; - qp_attr->qkey = QKEY_VAL; - qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; - qp_attr->rq_psn = 0; - qp_attr->sq_psn = 0; - qp_attr->dest_qp_num = 1; - qp_attr->qp_access_flags = 6; - - qp_attr->pkey_index = roce_get_field(context.qp1c_bytes_20, - QP1C_BYTES_20_PKEY_IDX_M, - QP1C_BYTES_20_PKEY_IDX_S); - qp_attr->port_num = hr_qp->port + 1; - qp_attr->sq_draining = 0; - qp_attr->max_rd_atomic = 0; - qp_attr->max_dest_rd_atomic = 0; - qp_attr->min_rnr_timer = 0; - qp_attr->timeout = 0; - qp_attr->retry_cnt = 0; - qp_attr->rnr_retry = 0; - qp_attr->alt_timeout = 0; - -done: - qp_attr->cur_qp_state = qp_attr->qp_state; - qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt; - qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs; - qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt; - qp_attr->cap.max_send_sge = hr_qp->sq.max_gs; - qp_attr->cap.max_inline_data = 0; - qp_init_attr->cap = qp_attr->cap; - qp_init_attr->create_flags = 0; - - mutex_unlock(&hr_qp->mutex); - - return 0; -} - -static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, - int qp_attr_mask, - struct ib_qp_init_attr *qp_init_attr) -{ - struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); - struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); - struct device *dev = &hr_dev->pdev->dev; - struct hns_roce_qp_context *context; - int tmp_qp_state; - int ret = 0; - int state; - - context = kzalloc(sizeof(*context), GFP_KERNEL); - if (!context) - return -ENOMEM; - - memset(qp_attr, 0, sizeof(*qp_attr)); - memset(qp_init_attr, 0, sizeof(*qp_init_attr)); - - mutex_lock(&hr_qp->mutex); - - if (hr_qp->state == IB_QPS_RESET) { - qp_attr->qp_state = IB_QPS_RESET; - goto done; - } - - ret = hns_roce_v1_query_qpc(hr_dev, hr_qp, context); - if (ret) { - dev_err(dev, "query qpc error\n"); - ret = -EINVAL; - goto out; - } - - state = roce_get_field(context->qpc_bytes_144, - QP_CONTEXT_QPC_BYTES_144_QP_STATE_M, - QP_CONTEXT_QPC_BYTES_144_QP_STATE_S); - tmp_qp_state = (int)to_ib_qp_state((enum hns_roce_qp_state)state); - if (tmp_qp_state == -1) { - dev_err(dev, "to_ib_qp_state error\n"); - ret = -EINVAL; - goto out; - } - hr_qp->state = (u8)tmp_qp_state; - qp_attr->qp_state = (enum ib_qp_state)hr_qp->state; - qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->qpc_bytes_48, - QP_CONTEXT_QPC_BYTES_48_MTU_M, - QP_CONTEXT_QPC_BYTES_48_MTU_S); - qp_attr->path_mig_state = IB_MIG_ARMED; - qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; - if (hr_qp->ibqp.qp_type == IB_QPT_UD) - qp_attr->qkey = QKEY_VAL; - - qp_attr->rq_psn = roce_get_field(context->qpc_bytes_88, - QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M, - QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S); - qp_attr->sq_psn = (u32)roce_get_field(context->qpc_bytes_164, - QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M, - QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S); - qp_attr->dest_qp_num = (u8)roce_get_field(context->qpc_bytes_36, - QP_CONTEXT_QPC_BYTES_36_DEST_QP_M, - QP_CONTEXT_QPC_BYTES_36_DEST_QP_S); - qp_attr->qp_access_flags = ((roce_get_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S)) << 2) | - ((roce_get_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S)) << 1) | - ((roce_get_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S)) << 3); - - if (hr_qp->ibqp.qp_type == IB_QPT_RC) { - struct ib_global_route *grh = - rdma_ah_retrieve_grh(&qp_attr->ah_attr); - - rdma_ah_set_sl(&qp_attr->ah_attr, - roce_get_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_SL_M, - QP_CONTEXT_QPC_BYTES_156_SL_S)); - rdma_ah_set_ah_flags(&qp_attr->ah_attr, IB_AH_GRH); - grh->flow_label = - roce_get_field(context->qpc_bytes_48, - QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M, - QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S); - grh->sgid_index = - roce_get_field(context->qpc_bytes_36, - QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M, - QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S); - grh->hop_limit = - roce_get_field(context->qpc_bytes_44, - QP_CONTEXT_QPC_BYTES_44_HOPLMT_M, - QP_CONTEXT_QPC_BYTES_44_HOPLMT_S); - grh->traffic_class = - roce_get_field(context->qpc_bytes_48, - QP_CONTEXT_QPC_BYTES_48_TCLASS_M, - QP_CONTEXT_QPC_BYTES_48_TCLASS_S); - - memcpy(grh->dgid.raw, context->dgid, - sizeof(grh->dgid.raw)); - } - - qp_attr->pkey_index = roce_get_field(context->qpc_bytes_12, - QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M, - QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S); - qp_attr->port_num = hr_qp->port + 1; - qp_attr->sq_draining = 0; - qp_attr->max_rd_atomic = 1 << roce_get_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M, - QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S); - qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->qpc_bytes_32, - QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M, - QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S); - qp_attr->min_rnr_timer = (u8)(roce_get_field(context->qpc_bytes_24, - QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M, - QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S)); - qp_attr->timeout = (u8)(roce_get_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M, - QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S)); - qp_attr->retry_cnt = roce_get_field(context->qpc_bytes_148, - QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M, - QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S); - qp_attr->rnr_retry = (u8)le32_to_cpu(context->rnr_retry); - -done: - qp_attr->cur_qp_state = qp_attr->qp_state; - qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt; - qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs; - - if (!ibqp->uobject) { - qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt; - qp_attr->cap.max_send_sge = hr_qp->sq.max_gs; - } else { - qp_attr->cap.max_send_wr = 0; - qp_attr->cap.max_send_sge = 0; - } - - qp_init_attr->cap = qp_attr->cap; - -out: - mutex_unlock(&hr_qp->mutex); - kfree(context); - return ret; -} - -static int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, - int qp_attr_mask, - struct ib_qp_init_attr *qp_init_attr) -{ - struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); - - return hr_qp->doorbell_qpn <= 1 ? - hns_roce_v1_q_sqp(ibqp, qp_attr, qp_attr_mask, qp_init_attr) : - hns_roce_v1_q_qp(ibqp, qp_attr, qp_attr_mask, qp_init_attr); -} - -int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) -{ - struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); - struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); - struct hns_roce_cq *send_cq, *recv_cq; - int ret; - - ret = hns_roce_v1_modify_qp(ibqp, NULL, 0, hr_qp->state, IB_QPS_RESET); - if (ret) - return ret; - - send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL; - recv_cq = hr_qp->ibqp.recv_cq ? to_hr_cq(hr_qp->ibqp.recv_cq) : NULL; - - hns_roce_lock_cqs(send_cq, recv_cq); - if (!udata) { - if (recv_cq) - __hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn, - (hr_qp->ibqp.srq ? - to_hr_srq(hr_qp->ibqp.srq) : - NULL)); - - if (send_cq && send_cq != recv_cq) - __hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL); - } - hns_roce_qp_remove(hr_dev, hr_qp); - hns_roce_unlock_cqs(send_cq, recv_cq); - - hns_roce_qp_destroy(hr_dev, hr_qp, udata); - - return 0; -} - -static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) -{ - struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device); - struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); - struct device *dev = &hr_dev->pdev->dev; - u32 cqe_cnt_ori; - u32 cqe_cnt_cur; - int wait_time = 0; - - /* - * Before freeing cq buffer, we need to ensure that the outstanding CQE - * have been written by checking the CQE counter. - */ - cqe_cnt_ori = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT); - while (1) { - if (roce_read(hr_dev, ROCEE_CAEP_CQE_WCMD_EMPTY) & - HNS_ROCE_CQE_WCMD_EMPTY_BIT) - break; - - cqe_cnt_cur = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT); - if ((cqe_cnt_cur - cqe_cnt_ori) >= HNS_ROCE_MIN_CQE_CNT) - break; - - msleep(HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS); - if (wait_time > HNS_ROCE_MAX_FREE_CQ_WAIT_CNT) { - dev_warn(dev, "Destroy cq 0x%lx timeout!\n", - hr_cq->cqn); - break; - } - wait_time++; - } - return 0; -} - -static void set_eq_cons_index_v1(struct hns_roce_eq *eq, u32 req_not) -{ - roce_raw_write((eq->cons_index & HNS_ROCE_V1_CONS_IDX_M) | - (req_not << eq->log_entries), eq->db_reg); -} - -static void hns_roce_v1_wq_catas_err_handle(struct hns_roce_dev *hr_dev, - struct hns_roce_aeqe *aeqe, int qpn) -{ - struct device *dev = &hr_dev->pdev->dev; - - dev_warn(dev, "Local Work Queue Catastrophic Error.\n"); - switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M, - HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) { - case HNS_ROCE_LWQCE_QPC_ERROR: - dev_warn(dev, "QP %d, QPC error.\n", qpn); - break; - case HNS_ROCE_LWQCE_MTU_ERROR: - dev_warn(dev, "QP %d, MTU error.\n", qpn); - break; - case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR: - dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn); - break; - case HNS_ROCE_LWQCE_WQE_ADDR_ERROR: - dev_warn(dev, "QP %d, WQE addr error.\n", qpn); - break; - case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR: - dev_warn(dev, "QP %d, WQE shift error\n", qpn); - break; - case HNS_ROCE_LWQCE_SL_ERROR: - dev_warn(dev, "QP %d, SL error.\n", qpn); - break; - case HNS_ROCE_LWQCE_PORT_ERROR: - dev_warn(dev, "QP %d, port error.\n", qpn); - break; - default: - break; - } -} - -static void hns_roce_v1_local_wq_access_err_handle(struct hns_roce_dev *hr_dev, - struct hns_roce_aeqe *aeqe, - int qpn) -{ - struct device *dev = &hr_dev->pdev->dev; - - dev_warn(dev, "Local Access Violation Work Queue Error.\n"); - switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M, - HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) { - case HNS_ROCE_LAVWQE_R_KEY_VIOLATION: - dev_warn(dev, "QP %d, R_key violation.\n", qpn); - break; - case HNS_ROCE_LAVWQE_LENGTH_ERROR: - dev_warn(dev, "QP %d, length error.\n", qpn); - break; - case HNS_ROCE_LAVWQE_VA_ERROR: - dev_warn(dev, "QP %d, VA error.\n", qpn); - break; - case HNS_ROCE_LAVWQE_PD_ERROR: - dev_err(dev, "QP %d, PD error.\n", qpn); - break; - case HNS_ROCE_LAVWQE_RW_ACC_ERROR: - dev_warn(dev, "QP %d, rw acc error.\n", qpn); - break; - case HNS_ROCE_LAVWQE_KEY_STATE_ERROR: - dev_warn(dev, "QP %d, key state error.\n", qpn); - break; - case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR: - dev_warn(dev, "QP %d, MR operation error.\n", qpn); - break; - default: - break; - } -} - -static void hns_roce_v1_qp_err_handle(struct hns_roce_dev *hr_dev, - struct hns_roce_aeqe *aeqe, - int event_type) -{ - struct device *dev = &hr_dev->pdev->dev; - int phy_port; - int qpn; - - qpn = roce_get_field(aeqe->event.queue_event.num, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S); - phy_port = roce_get_field(aeqe->event.queue_event.num, - HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M, - HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S); - if (qpn <= 1) - qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port; - - switch (event_type) { - case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: - dev_warn(dev, "Invalid Req Local Work Queue Error.\n" - "QP %d, phy_port %d.\n", qpn, phy_port); - break; - case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: - hns_roce_v1_wq_catas_err_handle(hr_dev, aeqe, qpn); - break; - case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: - hns_roce_v1_local_wq_access_err_handle(hr_dev, aeqe, qpn); - break; - default: - break; - } - - hns_roce_qp_event(hr_dev, qpn, event_type); -} - -static void hns_roce_v1_cq_err_handle(struct hns_roce_dev *hr_dev, - struct hns_roce_aeqe *aeqe, - int event_type) -{ - struct device *dev = &hr_dev->pdev->dev; - u32 cqn; - - cqn = roce_get_field(aeqe->event.queue_event.num, - HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M, - HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S); - - switch (event_type) { - case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: - dev_warn(dev, "CQ 0x%x access err.\n", cqn); - break; - case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: - dev_warn(dev, "CQ 0x%x overflow\n", cqn); - break; - case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID: - dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn); - break; - default: - break; - } - - hns_roce_cq_event(hr_dev, cqn, event_type); -} - -static void hns_roce_v1_db_overflow_handle(struct hns_roce_dev *hr_dev, - struct hns_roce_aeqe *aeqe) -{ - struct device *dev = &hr_dev->pdev->dev; - - switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M, - HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) { - case HNS_ROCE_DB_SUBTYPE_SDB_OVF: - dev_warn(dev, "SDB overflow.\n"); - break; - case HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF: - dev_warn(dev, "SDB almost overflow.\n"); - break; - case HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP: - dev_warn(dev, "SDB almost empty.\n"); - break; - case HNS_ROCE_DB_SUBTYPE_ODB_OVF: - dev_warn(dev, "ODB overflow.\n"); - break; - case HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF: - dev_warn(dev, "ODB almost overflow.\n"); - break; - case HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP: - dev_warn(dev, "SDB almost empty.\n"); - break; - default: - break; - } -} - -static struct hns_roce_aeqe *get_aeqe_v1(struct hns_roce_eq *eq, u32 entry) -{ - unsigned long off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQE_SIZE; - - return (struct hns_roce_aeqe *)((u8 *) - (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) + - off % HNS_ROCE_BA_SIZE); -} - -static struct hns_roce_aeqe *next_aeqe_sw_v1(struct hns_roce_eq *eq) -{ - struct hns_roce_aeqe *aeqe = get_aeqe_v1(eq, eq->cons_index); - - return (roce_get_bit(aeqe->asyn, HNS_ROCE_AEQE_U32_4_OWNER_S) ^ - !!(eq->cons_index & eq->entries)) ? aeqe : NULL; -} - -static int hns_roce_v1_aeq_int(struct hns_roce_dev *hr_dev, - struct hns_roce_eq *eq) -{ - struct device *dev = &hr_dev->pdev->dev; - struct hns_roce_aeqe *aeqe; - int aeqes_found = 0; - int event_type; - - while ((aeqe = next_aeqe_sw_v1(eq))) { - /* Make sure we read the AEQ entry after we have checked the - * ownership bit - */ - dma_rmb(); - - dev_dbg(dev, "aeqe = %pK, aeqe->asyn.event_type = 0x%lx\n", - aeqe, - roce_get_field(aeqe->asyn, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)); - event_type = roce_get_field(aeqe->asyn, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S); - switch (event_type) { - case HNS_ROCE_EVENT_TYPE_PATH_MIG: - dev_warn(dev, "PATH MIG not supported\n"); - break; - case HNS_ROCE_EVENT_TYPE_COMM_EST: - dev_warn(dev, "COMMUNICATION established\n"); - break; - case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: - dev_warn(dev, "SQ DRAINED not supported\n"); - break; - case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED: - dev_warn(dev, "PATH MIG failed\n"); - break; - case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: - case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: - case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: - hns_roce_v1_qp_err_handle(hr_dev, aeqe, event_type); - break; - case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH: - case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR: - case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH: - dev_warn(dev, "SRQ not support!\n"); - break; - case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: - case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: - case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID: - hns_roce_v1_cq_err_handle(hr_dev, aeqe, event_type); - break; - case HNS_ROCE_EVENT_TYPE_PORT_CHANGE: - dev_warn(dev, "port change.\n"); - break; - case HNS_ROCE_EVENT_TYPE_MB: - hns_roce_cmd_event(hr_dev, - le16_to_cpu(aeqe->event.cmd.token), - aeqe->event.cmd.status, - le64_to_cpu(aeqe->event.cmd.out_param - )); - break; - case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW: - hns_roce_v1_db_overflow_handle(hr_dev, aeqe); - break; - default: - dev_warn(dev, "Unhandled event %d on EQ %d at idx %u.\n", - event_type, eq->eqn, eq->cons_index); - break; - } - - eq->cons_index++; - aeqes_found = 1; - - if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1) - eq->cons_index = 0; - } - - set_eq_cons_index_v1(eq, 0); - - return aeqes_found; -} - -static struct hns_roce_ceqe *get_ceqe_v1(struct hns_roce_eq *eq, u32 entry) -{ - unsigned long off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQE_SIZE; - - return (struct hns_roce_ceqe *)((u8 *) - (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) + - off % HNS_ROCE_BA_SIZE); -} - -static struct hns_roce_ceqe *next_ceqe_sw_v1(struct hns_roce_eq *eq) -{ - struct hns_roce_ceqe *ceqe = get_ceqe_v1(eq, eq->cons_index); - - return (!!(roce_get_bit(ceqe->comp, - HNS_ROCE_CEQE_CEQE_COMP_OWNER_S))) ^ - (!!(eq->cons_index & eq->entries)) ? ceqe : NULL; -} - -static int hns_roce_v1_ceq_int(struct hns_roce_dev *hr_dev, - struct hns_roce_eq *eq) -{ - struct hns_roce_ceqe *ceqe; - int ceqes_found = 0; - u32 cqn; - - while ((ceqe = next_ceqe_sw_v1(eq))) { - /* Make sure we read CEQ entry after we have checked the - * ownership bit - */ - dma_rmb(); - - cqn = roce_get_field(ceqe->comp, - HNS_ROCE_CEQE_CEQE_COMP_CQN_M, - HNS_ROCE_CEQE_CEQE_COMP_CQN_S); - hns_roce_cq_completion(hr_dev, cqn); - - ++eq->cons_index; - ceqes_found = 1; - - if (eq->cons_index > - EQ_DEPTH_COEFF * hr_dev->caps.ceqe_depth - 1) - eq->cons_index = 0; - } - - set_eq_cons_index_v1(eq, 0); - - return ceqes_found; -} - -static irqreturn_t hns_roce_v1_msix_interrupt_eq(int irq, void *eq_ptr) -{ - struct hns_roce_eq *eq = eq_ptr; - struct hns_roce_dev *hr_dev = eq->hr_dev; - int int_work; - - if (eq->type_flag == HNS_ROCE_CEQ) - /* CEQ irq routine, CEQ is pulse irq, not clear */ - int_work = hns_roce_v1_ceq_int(hr_dev, eq); - else - /* AEQ irq routine, AEQ is pulse irq, not clear */ - int_work = hns_roce_v1_aeq_int(hr_dev, eq); - - return IRQ_RETVAL(int_work); -} - -static irqreturn_t hns_roce_v1_msix_interrupt_abn(int irq, void *dev_id) -{ - struct hns_roce_dev *hr_dev = dev_id; - struct device *dev = &hr_dev->pdev->dev; - int int_work = 0; - u32 caepaemask_val; - u32 cealmovf_val; - u32 caepaest_val; - u32 aeshift_val; - u32 ceshift_val; - u32 cemask_val; - __le32 tmp; - int i; - - /* - * Abnormal interrupt: - * AEQ overflow, ECC multi-bit err, CEQ overflow must clear - * interrupt, mask irq, clear irq, cancel mask operation - */ - aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG); - tmp = cpu_to_le32(aeshift_val); - - /* AEQE overflow */ - if (roce_get_bit(tmp, - ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S) == 1) { - dev_warn(dev, "AEQ overflow!\n"); - - /* Set mask */ - caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG); - tmp = cpu_to_le32(caepaemask_val); - roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S, - HNS_ROCE_INT_MASK_ENABLE); - caepaemask_val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val); - - /* Clear int state(INT_WC : write 1 clear) */ - caepaest_val = roce_read(hr_dev, ROCEE_CAEP_AE_ST_REG); - tmp = cpu_to_le32(caepaest_val); - roce_set_bit(tmp, ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S, 1); - caepaest_val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_CAEP_AE_ST_REG, caepaest_val); - - /* Clear mask */ - caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG); - tmp = cpu_to_le32(caepaemask_val); - roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S, - HNS_ROCE_INT_MASK_DISABLE); - caepaemask_val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val); - } - - /* CEQ almost overflow */ - for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) { - ceshift_val = roce_read(hr_dev, ROCEE_CAEP_CEQC_SHIFT_0_REG + - i * CEQ_REG_OFFSET); - tmp = cpu_to_le32(ceshift_val); - - if (roce_get_bit(tmp, - ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S) == 1) { - dev_warn(dev, "CEQ[%d] almost overflow!\n", i); - int_work++; - - /* Set mask */ - cemask_val = roce_read(hr_dev, - ROCEE_CAEP_CE_IRQ_MASK_0_REG + - i * CEQ_REG_OFFSET); - tmp = cpu_to_le32(cemask_val); - roce_set_bit(tmp, - ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S, - HNS_ROCE_INT_MASK_ENABLE); - cemask_val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG + - i * CEQ_REG_OFFSET, cemask_val); - - /* Clear int state(INT_WC : write 1 clear) */ - cealmovf_val = roce_read(hr_dev, - ROCEE_CAEP_CEQ_ALM_OVF_0_REG + - i * CEQ_REG_OFFSET); - tmp = cpu_to_le32(cealmovf_val); - roce_set_bit(tmp, - ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S, - 1); - cealmovf_val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_CAEP_CEQ_ALM_OVF_0_REG + - i * CEQ_REG_OFFSET, cealmovf_val); - - /* Clear mask */ - cemask_val = roce_read(hr_dev, - ROCEE_CAEP_CE_IRQ_MASK_0_REG + - i * CEQ_REG_OFFSET); - tmp = cpu_to_le32(cemask_val); - roce_set_bit(tmp, - ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S, - HNS_ROCE_INT_MASK_DISABLE); - cemask_val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG + - i * CEQ_REG_OFFSET, cemask_val); - } - } - - /* ECC multi-bit error alarm */ - dev_warn(dev, "ECC UCERR ALARM: 0x%x, 0x%x, 0x%x\n", - roce_read(hr_dev, ROCEE_ECC_UCERR_ALM0_REG), - roce_read(hr_dev, ROCEE_ECC_UCERR_ALM1_REG), - roce_read(hr_dev, ROCEE_ECC_UCERR_ALM2_REG)); - - dev_warn(dev, "ECC CERR ALARM: 0x%x, 0x%x, 0x%x\n", - roce_read(hr_dev, ROCEE_ECC_CERR_ALM0_REG), - roce_read(hr_dev, ROCEE_ECC_CERR_ALM1_REG), - roce_read(hr_dev, ROCEE_ECC_CERR_ALM2_REG)); - - return IRQ_RETVAL(int_work); -} - -static void hns_roce_v1_int_mask_enable(struct hns_roce_dev *hr_dev) -{ - u32 aemask_val; - int masken = 0; - __le32 tmp; - int i; - - /* AEQ INT */ - aemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG); - tmp = cpu_to_le32(aemask_val); - roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S, - masken); - roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S, masken); - aemask_val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, aemask_val); - - /* CEQ INT */ - for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) { - /* IRQ mask */ - roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG + - i * CEQ_REG_OFFSET, masken); - } -} - -static void hns_roce_v1_free_eq(struct hns_roce_dev *hr_dev, - struct hns_roce_eq *eq) -{ - int npages = (PAGE_ALIGN(eq->eqe_size * eq->entries) + - HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE; - int i; - - if (!eq->buf_list) - return; - - for (i = 0; i < npages; ++i) - dma_free_coherent(&hr_dev->pdev->dev, HNS_ROCE_BA_SIZE, - eq->buf_list[i].buf, eq->buf_list[i].map); - - kfree(eq->buf_list); -} - -static void hns_roce_v1_enable_eq(struct hns_roce_dev *hr_dev, int eq_num, - int enable_flag) -{ - void __iomem *eqc = hr_dev->eq_table.eqc_base[eq_num]; - __le32 tmp; - u32 val; - - val = readl(eqc); - tmp = cpu_to_le32(val); - - if (enable_flag) - roce_set_field(tmp, - ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M, - ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S, - HNS_ROCE_EQ_STAT_VALID); - else - roce_set_field(tmp, - ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M, - ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S, - HNS_ROCE_EQ_STAT_INVALID); - - val = le32_to_cpu(tmp); - writel(val, eqc); -} - -static int hns_roce_v1_create_eq(struct hns_roce_dev *hr_dev, - struct hns_roce_eq *eq) -{ - void __iomem *eqc = hr_dev->eq_table.eqc_base[eq->eqn]; - struct device *dev = &hr_dev->pdev->dev; - dma_addr_t tmp_dma_addr; - u32 eqcuridx_val; - u32 eqconsindx_val; - u32 eqshift_val; - __le32 tmp2 = 0; - __le32 tmp1 = 0; - __le32 tmp = 0; - int num_bas; - int ret; - int i; - - num_bas = (PAGE_ALIGN(eq->entries * eq->eqe_size) + - HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE; - - if ((eq->entries * eq->eqe_size) > HNS_ROCE_BA_SIZE) { - dev_err(dev, "[error]eq buf %d gt ba size(%d) need bas=%d\n", - (eq->entries * eq->eqe_size), HNS_ROCE_BA_SIZE, - num_bas); - return -EINVAL; - } - - eq->buf_list = kcalloc(num_bas, sizeof(*eq->buf_list), GFP_KERNEL); - if (!eq->buf_list) - return -ENOMEM; - - for (i = 0; i < num_bas; ++i) { - eq->buf_list[i].buf = dma_alloc_coherent(dev, HNS_ROCE_BA_SIZE, - &tmp_dma_addr, - GFP_KERNEL); - if (!eq->buf_list[i].buf) { - ret = -ENOMEM; - goto err_out_free_pages; - } - - eq->buf_list[i].map = tmp_dma_addr; - } - eq->cons_index = 0; - roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M, - ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S, - HNS_ROCE_EQ_STAT_INVALID); - roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M, - ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S, - eq->log_entries); - eqshift_val = le32_to_cpu(tmp); - writel(eqshift_val, eqc); - - /* Configure eq extended address 12~44bit */ - writel((u32)(eq->buf_list[0].map >> 12), eqc + 4); - - /* - * Configure eq extended address 45~49 bit. - * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of - * using 4K page, and shift more 32 because of - * calculating the high 32 bit value evaluated to hardware. - */ - roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M, - ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S, - eq->buf_list[0].map >> 44); - roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M, - ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0); - eqcuridx_val = le32_to_cpu(tmp1); - writel(eqcuridx_val, eqc + 8); - - /* Configure eq consumer index */ - roce_set_field(tmp2, ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M, - ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0); - eqconsindx_val = le32_to_cpu(tmp2); - writel(eqconsindx_val, eqc + 0xc); - - return 0; - -err_out_free_pages: - for (i -= 1; i >= 0; i--) - dma_free_coherent(dev, HNS_ROCE_BA_SIZE, eq->buf_list[i].buf, - eq->buf_list[i].map); - - kfree(eq->buf_list); - return ret; -} - -static int hns_roce_v1_init_eq_table(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; - struct device *dev = &hr_dev->pdev->dev; - struct hns_roce_eq *eq; - int irq_num; - int eq_num; - int ret; - int i, j; - - eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors; - irq_num = eq_num + hr_dev->caps.num_other_vectors; - - eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL); - if (!eq_table->eq) - return -ENOMEM; - - eq_table->eqc_base = kcalloc(eq_num, sizeof(*eq_table->eqc_base), - GFP_KERNEL); - if (!eq_table->eqc_base) { - ret = -ENOMEM; - goto err_eqc_base_alloc_fail; - } - - for (i = 0; i < eq_num; i++) { - eq = &eq_table->eq[i]; - eq->hr_dev = hr_dev; - eq->eqn = i; - eq->irq = hr_dev->irq[i]; - eq->log_page_size = PAGE_SHIFT; - - if (i < hr_dev->caps.num_comp_vectors) { - /* CEQ */ - eq_table->eqc_base[i] = hr_dev->reg_base + - ROCEE_CAEP_CEQC_SHIFT_0_REG + - CEQ_REG_OFFSET * i; - eq->type_flag = HNS_ROCE_CEQ; - eq->db_reg = hr_dev->reg_base + - ROCEE_CAEP_CEQC_CONS_IDX_0_REG + - CEQ_REG_OFFSET * i; - eq->entries = hr_dev->caps.ceqe_depth; - eq->log_entries = ilog2(eq->entries); - eq->eqe_size = HNS_ROCE_CEQE_SIZE; - } else { - /* AEQ */ - eq_table->eqc_base[i] = hr_dev->reg_base + - ROCEE_CAEP_AEQC_AEQE_SHIFT_REG; - eq->type_flag = HNS_ROCE_AEQ; - eq->db_reg = hr_dev->reg_base + - ROCEE_CAEP_AEQE_CONS_IDX_REG; - eq->entries = hr_dev->caps.aeqe_depth; - eq->log_entries = ilog2(eq->entries); - eq->eqe_size = HNS_ROCE_AEQE_SIZE; - } - } - - /* Disable irq */ - hns_roce_v1_int_mask_enable(hr_dev); - - /* Configure ce int interval */ - roce_write(hr_dev, ROCEE_CAEP_CE_INTERVAL_CFG_REG, - HNS_ROCE_CEQ_DEFAULT_INTERVAL); - - /* Configure ce int burst num */ - roce_write(hr_dev, ROCEE_CAEP_CE_BURST_NUM_CFG_REG, - HNS_ROCE_CEQ_DEFAULT_BURST_NUM); - - for (i = 0; i < eq_num; i++) { - ret = hns_roce_v1_create_eq(hr_dev, &eq_table->eq[i]); - if (ret) { - dev_err(dev, "eq create failed\n"); - goto err_create_eq_fail; - } - } - - for (j = 0; j < irq_num; j++) { - if (j < eq_num) - ret = request_irq(hr_dev->irq[j], - hns_roce_v1_msix_interrupt_eq, 0, - hr_dev->irq_names[j], - &eq_table->eq[j]); - else - ret = request_irq(hr_dev->irq[j], - hns_roce_v1_msix_interrupt_abn, 0, - hr_dev->irq_names[j], hr_dev); - - if (ret) { - dev_err(dev, "request irq error!\n"); - goto err_request_irq_fail; - } - } - - for (i = 0; i < eq_num; i++) - hns_roce_v1_enable_eq(hr_dev, i, EQ_ENABLE); - - return 0; - -err_request_irq_fail: - for (j -= 1; j >= 0; j--) - free_irq(hr_dev->irq[j], &eq_table->eq[j]); - -err_create_eq_fail: - for (i -= 1; i >= 0; i--) - hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]); - - kfree(eq_table->eqc_base); - -err_eqc_base_alloc_fail: - kfree(eq_table->eq); - - return ret; -} - -static void hns_roce_v1_cleanup_eq_table(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; - int irq_num; - int eq_num; - int i; - - eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors; - irq_num = eq_num + hr_dev->caps.num_other_vectors; - for (i = 0; i < eq_num; i++) { - /* Disable EQ */ - hns_roce_v1_enable_eq(hr_dev, i, EQ_DISABLE); - - free_irq(hr_dev->irq[i], &eq_table->eq[i]); - - hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]); - } - for (i = eq_num; i < irq_num; i++) - free_irq(hr_dev->irq[i], hr_dev); - - kfree(eq_table->eqc_base); - kfree(eq_table->eq); -} - -static const struct ib_device_ops hns_roce_v1_dev_ops = { - .destroy_qp = hns_roce_v1_destroy_qp, - .poll_cq = hns_roce_v1_poll_cq, - .post_recv = hns_roce_v1_post_recv, - .post_send = hns_roce_v1_post_send, - .query_qp = hns_roce_v1_query_qp, - .req_notify_cq = hns_roce_v1_req_notify_cq, -}; - -static const struct hns_roce_hw hns_roce_hw_v1 = { - .reset = hns_roce_v1_reset, - .hw_profile = hns_roce_v1_profile, - .hw_init = hns_roce_v1_init, - .hw_exit = hns_roce_v1_exit, - .post_mbox = hns_roce_v1_post_mbox, - .poll_mbox_done = hns_roce_v1_chk_mbox, - .set_gid = hns_roce_v1_set_gid, - .set_mac = hns_roce_v1_set_mac, - .set_mtu = hns_roce_v1_set_mtu, - .write_mtpt = hns_roce_v1_write_mtpt, - .write_cqc = hns_roce_v1_write_cqc, - .set_hem = hns_roce_v1_set_hem, - .clear_hem = hns_roce_v1_clear_hem, - .modify_qp = hns_roce_v1_modify_qp, - .dereg_mr = hns_roce_v1_dereg_mr, - .destroy_cq = hns_roce_v1_destroy_cq, - .init_eq = hns_roce_v1_init_eq_table, - .cleanup_eq = hns_roce_v1_cleanup_eq_table, - .hns_roce_dev_ops = &hns_roce_v1_dev_ops, -}; - -static const struct of_device_id hns_roce_of_match[] = { - { .compatible = "hisilicon,hns-roce-v1", .data = &hns_roce_hw_v1, }, - {}, -}; -MODULE_DEVICE_TABLE(of, hns_roce_of_match); - -static const struct acpi_device_id hns_roce_acpi_match[] = { - { "HISI00D1", (kernel_ulong_t)&hns_roce_hw_v1 }, - {}, -}; -MODULE_DEVICE_TABLE(acpi, hns_roce_acpi_match); - -static struct -platform_device *hns_roce_find_pdev(struct fwnode_handle *fwnode) -{ - struct device *dev; - - /* get the 'device' corresponding to the matching 'fwnode' */ - dev = bus_find_device_by_fwnode(&platform_bus_type, fwnode); - /* get the platform device */ - return dev ? to_platform_device(dev) : NULL; -} - -static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev) -{ - struct device *dev = &hr_dev->pdev->dev; - struct platform_device *pdev = NULL; - struct net_device *netdev = NULL; - struct device_node *net_node; - int port_cnt = 0; - u8 phy_port; - int ret; - int i; - - /* check if we are compatible with the underlying SoC */ - if (dev_of_node(dev)) { - const struct of_device_id *of_id; - - of_id = of_match_node(hns_roce_of_match, dev->of_node); - if (!of_id) { - dev_err(dev, "device is not compatible!\n"); - return -ENXIO; - } - hr_dev->hw = (const struct hns_roce_hw *)of_id->data; - if (!hr_dev->hw) { - dev_err(dev, "couldn't get H/W specific DT data!\n"); - return -ENXIO; - } - } else if (is_acpi_device_node(dev->fwnode)) { - const struct acpi_device_id *acpi_id; - - acpi_id = acpi_match_device(hns_roce_acpi_match, dev); - if (!acpi_id) { - dev_err(dev, "device is not compatible!\n"); - return -ENXIO; - } - hr_dev->hw = (const struct hns_roce_hw *) acpi_id->driver_data; - if (!hr_dev->hw) { - dev_err(dev, "couldn't get H/W specific ACPI data!\n"); - return -ENXIO; - } - } else { - dev_err(dev, "can't read compatibility data from DT or ACPI\n"); - return -ENXIO; - } - - /* get the mapped register base address */ - hr_dev->reg_base = devm_platform_ioremap_resource(hr_dev->pdev, 0); - if (IS_ERR(hr_dev->reg_base)) - return PTR_ERR(hr_dev->reg_base); - - /* read the node_guid of IB device from the DT or ACPI */ - ret = device_property_read_u8_array(dev, "node-guid", - (u8 *)&hr_dev->ib_dev.node_guid, - GUID_LEN); - if (ret) { - dev_err(dev, "couldn't get node_guid from DT or ACPI!\n"); - return ret; - } - - /* get the RoCE associated ethernet ports or netdevices */ - for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) { - if (dev_of_node(dev)) { - net_node = of_parse_phandle(dev->of_node, "eth-handle", - i); - if (!net_node) - continue; - pdev = of_find_device_by_node(net_node); - } else if (is_acpi_device_node(dev->fwnode)) { - struct fwnode_reference_args args; - - ret = acpi_node_get_property_reference(dev->fwnode, - "eth-handle", - i, &args); - if (ret) - continue; - pdev = hns_roce_find_pdev(args.fwnode); - } else { - dev_err(dev, "cannot read data from DT or ACPI\n"); - return -ENXIO; - } - - if (pdev) { - netdev = platform_get_drvdata(pdev); - phy_port = (u8)i; - if (netdev) { - hr_dev->iboe.netdevs[port_cnt] = netdev; - hr_dev->iboe.phy_port[port_cnt] = phy_port; - } else { - dev_err(dev, "no netdev found with pdev %s\n", - pdev->name); - return -ENODEV; - } - port_cnt++; - } - } - - if (port_cnt == 0) { - dev_err(dev, "unable to get eth-handle for available ports!\n"); - return -EINVAL; - } - - hr_dev->caps.num_ports = port_cnt; - - /* cmd issue mode: 0 is poll, 1 is event */ - hr_dev->cmd_mod = 1; - hr_dev->loop_idc = 0; - hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG; - hr_dev->odb_offset = ROCEE_DB_OTHERS_L_0_REG; - - /* read the interrupt names from the DT or ACPI */ - ret = device_property_read_string_array(dev, "interrupt-names", - hr_dev->irq_names, - HNS_ROCE_V1_MAX_IRQ_NUM); - if (ret < 0) { - dev_err(dev, "couldn't get interrupt names from DT or ACPI!\n"); - return ret; - } - - /* fetch the interrupt numbers */ - for (i = 0; i < HNS_ROCE_V1_MAX_IRQ_NUM; i++) { - hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i); - if (hr_dev->irq[i] <= 0) - return -EINVAL; - } - - return 0; -} - -/** - * hns_roce_probe - RoCE driver entrance - * @pdev: pointer to platform device - * Return : int - * - */ -static int hns_roce_probe(struct platform_device *pdev) -{ - int ret; - struct hns_roce_dev *hr_dev; - struct device *dev = &pdev->dev; - - hr_dev = ib_alloc_device(hns_roce_dev, ib_dev); - if (!hr_dev) - return -ENOMEM; - - hr_dev->priv = kzalloc(sizeof(struct hns_roce_v1_priv), GFP_KERNEL); - if (!hr_dev->priv) { - ret = -ENOMEM; - goto error_failed_kzalloc; - } - - hr_dev->pdev = pdev; - hr_dev->dev = dev; - platform_set_drvdata(pdev, hr_dev); - - if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64ULL)) && - dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32ULL))) { - dev_err(dev, "Not usable DMA addressing mode\n"); - ret = -EIO; - goto error_failed_get_cfg; - } - - ret = hns_roce_get_cfg(hr_dev); - if (ret) { - dev_err(dev, "Get Configuration failed!\n"); - goto error_failed_get_cfg; - } - - ret = hns_roce_init(hr_dev); - if (ret) { - dev_err(dev, "RoCE engine init failed!\n"); - goto error_failed_get_cfg; - } - - return 0; - -error_failed_get_cfg: - kfree(hr_dev->priv); - -error_failed_kzalloc: - ib_dealloc_device(&hr_dev->ib_dev); - - return ret; -} - -/** - * hns_roce_remove - remove RoCE device - * @pdev: pointer to platform device - */ -static int hns_roce_remove(struct platform_device *pdev) -{ - struct hns_roce_dev *hr_dev = platform_get_drvdata(pdev); - - hns_roce_exit(hr_dev); - kfree(hr_dev->priv); - ib_dealloc_device(&hr_dev->ib_dev); - - return 0; -} - -static struct platform_driver hns_roce_driver = { - .probe = hns_roce_probe, - .remove = hns_roce_remove, - .driver = { - .name = DRV_NAME, - .of_match_table = hns_roce_of_match, - .acpi_match_table = ACPI_PTR(hns_roce_acpi_match), - }, -}; - -module_platform_driver(hns_roce_driver); - -MODULE_LICENSE("Dual BSD/GPL"); -MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>"); -MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>"); -MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>"); -MODULE_DESCRIPTION("Hisilicon Hip06 Family RoCE Driver"); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h deleted file mode 100644 index 60fdcbae6729..000000000000 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h +++ /dev/null @@ -1,1147 +0,0 @@ -/* - * Copyright (c) 2016 Hisilicon Limited. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef _HNS_ROCE_HW_V1_H -#define _HNS_ROCE_HW_V1_H - -#define CQ_STATE_VALID 2 - -#define HNS_ROCE_V1_MAX_PD_NUM 0x8000 -#define HNS_ROCE_V1_MAX_CQ_NUM 0x10000 -#define HNS_ROCE_V1_MAX_CQE_NUM 0x8000 - -#define HNS_ROCE_V1_MAX_QP_NUM 0x40000 -#define HNS_ROCE_V1_MAX_WQE_NUM 0x4000 - -#define HNS_ROCE_V1_MAX_MTPT_NUM 0x80000 - -#define HNS_ROCE_V1_MAX_MTT_SEGS 0x100000 - -#define HNS_ROCE_V1_MAX_QP_INIT_RDMA 128 -#define HNS_ROCE_V1_MAX_QP_DEST_RDMA 128 - -#define HNS_ROCE_V1_MAX_SQ_DESC_SZ 64 -#define HNS_ROCE_V1_MAX_RQ_DESC_SZ 64 -#define HNS_ROCE_V1_SG_NUM 2 -#define HNS_ROCE_V1_INLINE_SIZE 32 - -#define HNS_ROCE_V1_UAR_NUM 256 -#define HNS_ROCE_V1_PHY_UAR_NUM 8 - -#define HNS_ROCE_V1_GID_NUM 16 -#define HNS_ROCE_V1_RESV_QP 8 - -#define HNS_ROCE_V1_MAX_IRQ_NUM 34 -#define HNS_ROCE_V1_COMP_VEC_NUM 32 -#define HNS_ROCE_V1_AEQE_VEC_NUM 1 -#define HNS_ROCE_V1_ABNORMAL_VEC_NUM 1 - -#define HNS_ROCE_V1_COMP_EQE_NUM 0x8000 -#define HNS_ROCE_V1_ASYNC_EQE_NUM 0x400 - -#define HNS_ROCE_V1_QPC_SIZE 256 -#define HNS_ROCE_V1_IRRL_ENTRY_SIZE 8 -#define HNS_ROCE_V1_CQC_ENTRY_SIZE 64 -#define HNS_ROCE_V1_MTPT_ENTRY_SIZE 64 -#define HNS_ROCE_V1_MTT_ENTRY_SIZE 64 - -#define HNS_ROCE_V1_CQE_SIZE 32 -#define HNS_ROCE_V1_PAGE_SIZE_SUPPORT 0xFFFFF000 - -#define HNS_ROCE_V1_TABLE_CHUNK_SIZE (1 << 17) - -#define HNS_ROCE_V1_EXT_RAQ_WF 8 -#define HNS_ROCE_V1_RAQ_ENTRY 64 -#define HNS_ROCE_V1_RAQ_DEPTH 32768 -#define HNS_ROCE_V1_RAQ_SIZE (HNS_ROCE_V1_RAQ_ENTRY * HNS_ROCE_V1_RAQ_DEPTH) - -#define HNS_ROCE_V1_SDB_DEPTH 0x400 -#define HNS_ROCE_V1_ODB_DEPTH 0x400 - -#define HNS_ROCE_V1_DB_RSVD 0x80 - -#define HNS_ROCE_V1_SDB_ALEPT HNS_ROCE_V1_DB_RSVD -#define HNS_ROCE_V1_SDB_ALFUL (HNS_ROCE_V1_SDB_DEPTH - HNS_ROCE_V1_DB_RSVD) -#define HNS_ROCE_V1_ODB_ALEPT HNS_ROCE_V1_DB_RSVD -#define HNS_ROCE_V1_ODB_ALFUL (HNS_ROCE_V1_ODB_DEPTH - HNS_ROCE_V1_DB_RSVD) - -#define HNS_ROCE_V1_EXT_SDB_DEPTH 0x4000 -#define HNS_ROCE_V1_EXT_ODB_DEPTH 0x4000 -#define HNS_ROCE_V1_EXT_SDB_ENTRY 16 -#define HNS_ROCE_V1_EXT_ODB_ENTRY 16 -#define HNS_ROCE_V1_EXT_SDB_SIZE \ - (HNS_ROCE_V1_EXT_SDB_DEPTH * HNS_ROCE_V1_EXT_SDB_ENTRY) -#define HNS_ROCE_V1_EXT_ODB_SIZE \ - (HNS_ROCE_V1_EXT_ODB_DEPTH * HNS_ROCE_V1_EXT_ODB_ENTRY) - -#define HNS_ROCE_V1_EXT_SDB_ALEPT HNS_ROCE_V1_DB_RSVD -#define HNS_ROCE_V1_EXT_SDB_ALFUL \ - (HNS_ROCE_V1_EXT_SDB_DEPTH - HNS_ROCE_V1_DB_RSVD) -#define HNS_ROCE_V1_EXT_ODB_ALEPT HNS_ROCE_V1_DB_RSVD -#define HNS_ROCE_V1_EXT_ODB_ALFUL \ - (HNS_ROCE_V1_EXT_ODB_DEPTH - HNS_ROCE_V1_DB_RSVD) - -#define HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS 50000 -#define HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS 10000 -#define HNS_ROCE_V1_FREE_MR_WAIT_VALUE 5 -#define HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE 20 - -#define HNS_ROCE_BT_RSV_BUF_SIZE (1 << 17) - -#define HNS_ROCE_V1_TPTR_ENTRY_SIZE 2 -#define HNS_ROCE_V1_TPTR_BUF_SIZE \ - (HNS_ROCE_V1_TPTR_ENTRY_SIZE * HNS_ROCE_V1_MAX_CQ_NUM) - -#define HNS_ROCE_ODB_POLL_MODE 0 - -#define HNS_ROCE_SDB_NORMAL_MODE 0 -#define HNS_ROCE_SDB_EXTEND_MODE 1 - -#define HNS_ROCE_ODB_EXTEND_MODE 1 - -#define KEY_VALID 0x02 - -#define HNS_ROCE_CQE_QPN_MASK 0x3ffff -#define HNS_ROCE_CQE_STATUS_MASK 0x1f -#define HNS_ROCE_CQE_OPCODE_MASK 0xf - -#define HNS_ROCE_CQE_SUCCESS 0x00 -#define HNS_ROCE_CQE_SYNDROME_LOCAL_LENGTH_ERR 0x01 -#define HNS_ROCE_CQE_SYNDROME_LOCAL_QP_OP_ERR 0x02 -#define HNS_ROCE_CQE_SYNDROME_LOCAL_PROT_ERR 0x03 -#define HNS_ROCE_CQE_SYNDROME_WR_FLUSH_ERR 0x04 -#define HNS_ROCE_CQE_SYNDROME_MEM_MANAGE_OPERATE_ERR 0x05 -#define HNS_ROCE_CQE_SYNDROME_BAD_RESP_ERR 0x06 -#define HNS_ROCE_CQE_SYNDROME_LOCAL_ACCESS_ERR 0x07 -#define HNS_ROCE_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR 0x08 -#define HNS_ROCE_CQE_SYNDROME_REMOTE_ACCESS_ERR 0x09 -#define HNS_ROCE_CQE_SYNDROME_REMOTE_OP_ERR 0x0a -#define HNS_ROCE_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR 0x0b -#define HNS_ROCE_CQE_SYNDROME_RNR_RETRY_EXC_ERR 0x0c - -#define QP1C_CFGN_OFFSET 0x28 -#define PHY_PORT_OFFSET 0x8 -#define MTPT_IDX_SHIFT 16 -#define ALL_PORT_VAL_OPEN 0x3f -#define POL_TIME_INTERVAL_VAL 0x80 -#define SLEEP_TIME_INTERVAL 20 -#define SQ_PSN_SHIFT 8 -#define QKEY_VAL 0x80010000 -#define SDB_INV_CNT_OFFSET 8 - -#define HNS_ROCE_CEQ_DEFAULT_INTERVAL 0x10 -#define HNS_ROCE_CEQ_DEFAULT_BURST_NUM 0x10 - -#define HNS_ROCE_INT_MASK_DISABLE 0 -#define HNS_ROCE_INT_MASK_ENABLE 1 - -#define CEQ_REG_OFFSET 0x18 - -#define HNS_ROCE_CEQE_CEQE_COMP_OWNER_S 0 - -#define HNS_ROCE_V1_CONS_IDX_M GENMASK(15, 0) - -#define HNS_ROCE_CEQE_CEQE_COMP_CQN_S 16 -#define HNS_ROCE_CEQE_CEQE_COMP_CQN_M GENMASK(31, 16) - -#define HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S 16 -#define HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M GENMASK(23, 16) - -#define HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S 24 -#define HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M GENMASK(30, 24) - -#define HNS_ROCE_AEQE_U32_4_OWNER_S 31 - -#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S 0 -#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M GENMASK(23, 0) - -#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S 25 -#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M GENMASK(27, 25) - -#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S 0 -#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M GENMASK(15, 0) - -#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S 0 -#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M GENMASK(4, 0) - -/* Local Work Queue Catastrophic Error,SUBTYPE 0x5 */ -enum { - HNS_ROCE_LWQCE_QPC_ERROR = 1, - HNS_ROCE_LWQCE_MTU_ERROR, - HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR, - HNS_ROCE_LWQCE_WQE_ADDR_ERROR, - HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR, - HNS_ROCE_LWQCE_SL_ERROR, - HNS_ROCE_LWQCE_PORT_ERROR, -}; - -/* Local Access Violation Work Queue Error,SUBTYPE 0x7 */ -enum { - HNS_ROCE_LAVWQE_R_KEY_VIOLATION = 1, - HNS_ROCE_LAVWQE_LENGTH_ERROR, - HNS_ROCE_LAVWQE_VA_ERROR, - HNS_ROCE_LAVWQE_PD_ERROR, - HNS_ROCE_LAVWQE_RW_ACC_ERROR, - HNS_ROCE_LAVWQE_KEY_STATE_ERROR, - HNS_ROCE_LAVWQE_MR_OPERATION_ERROR, -}; - -/* DOORBELL overflow subtype */ -enum { - HNS_ROCE_DB_SUBTYPE_SDB_OVF = 1, - HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF, - HNS_ROCE_DB_SUBTYPE_ODB_OVF, - HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF, - HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP, - HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP, -}; - -enum { - /* RQ&SRQ related operations */ - HNS_ROCE_OPCODE_SEND_DATA_RECEIVE = 0x06, - HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE, -}; - -enum { - HNS_ROCE_PORT_DOWN = 0, - HNS_ROCE_PORT_UP, -}; - -struct hns_roce_cq_context { - __le32 cqc_byte_4; - __le32 cq_bt_l; - __le32 cqc_byte_12; - __le32 cur_cqe_ba0_l; - __le32 cqc_byte_20; - __le32 cqe_tptr_addr_l; - __le32 cur_cqe_ba1_l; - __le32 cqc_byte_32; -}; - -#define CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S 0 -#define CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_M \ - (((1UL << 2) - 1) << CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S) - -#define CQ_CONTEXT_CQC_BYTE_4_CQN_S 16 -#define CQ_CONTEXT_CQC_BYTE_4_CQN_M \ - (((1UL << 16) - 1) << CQ_CONTEXT_CQC_BYTE_4_CQN_S) - -#define CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S 0 -#define CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_M \ - (((1UL << 17) - 1) << CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S) - -#define CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S 20 -#define CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M \ - (((1UL << 4) - 1) << CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S) - -#define CQ_CONTEXT_CQC_BYTE_12_CEQN_S 24 -#define CQ_CONTEXT_CQC_BYTE_12_CEQN_M \ - (((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_12_CEQN_S) - -#define CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S 0 -#define CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_M \ - (((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S) - -#define CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S 16 -#define CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_M \ - (((1UL << 16) - 1) << CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S) - -#define CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S 8 -#define CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M \ - (((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S) - -#define CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S 0 -#define CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M \ - (((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S) - -#define CQ_CONTEXT_CQC_BYTE_32_SE_FLAG_S 9 - -#define CQ_CONTEXT_CQC_BYTE_32_CE_FLAG_S 8 -#define CQ_CONTEXT_CQC_BYTE_32_NOTIFICATION_FLAG_S 14 -#define CQ_CQNTEXT_CQC_BYTE_32_TYPE_OF_COMPLETION_NOTIFICATION_S 15 - -#define CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S 16 -#define CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M \ - (((1UL << 16) - 1) << CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S) - -struct hns_roce_cqe { - __le32 cqe_byte_4; - union { - __le32 r_key; - __le32 immediate_data; - }; - __le32 byte_cnt; - __le32 cqe_byte_16; - __le32 cqe_byte_20; - __le32 s_mac_l; - __le32 cqe_byte_28; - __le32 reserved; -}; - -#define CQE_BYTE_4_OWNER_S 7 -#define CQE_BYTE_4_SQ_RQ_FLAG_S 14 - -#define CQE_BYTE_4_STATUS_OF_THE_OPERATION_S 8 -#define CQE_BYTE_4_STATUS_OF_THE_OPERATION_M \ - (((1UL << 5) - 1) << CQE_BYTE_4_STATUS_OF_THE_OPERATION_S) - -#define CQE_BYTE_4_WQE_INDEX_S 16 -#define CQE_BYTE_4_WQE_INDEX_M (((1UL << 14) - 1) << CQE_BYTE_4_WQE_INDEX_S) - -#define CQE_BYTE_4_OPERATION_TYPE_S 0 -#define CQE_BYTE_4_OPERATION_TYPE_M \ - (((1UL << 4) - 1) << CQE_BYTE_4_OPERATION_TYPE_S) - -#define CQE_BYTE_4_IMM_INDICATOR_S 15 - -#define CQE_BYTE_16_LOCAL_QPN_S 0 -#define CQE_BYTE_16_LOCAL_QPN_M (((1UL << 24) - 1) << CQE_BYTE_16_LOCAL_QPN_S) - -#define CQE_BYTE_20_PORT_NUM_S 26 -#define CQE_BYTE_20_PORT_NUM_M (((1UL << 3) - 1) << CQE_BYTE_20_PORT_NUM_S) - -#define CQE_BYTE_20_SL_S 24 -#define CQE_BYTE_20_SL_M (((1UL << 2) - 1) << CQE_BYTE_20_SL_S) - -#define CQE_BYTE_20_REMOTE_QPN_S 0 -#define CQE_BYTE_20_REMOTE_QPN_M \ - (((1UL << 24) - 1) << CQE_BYTE_20_REMOTE_QPN_S) - -#define CQE_BYTE_20_GRH_PRESENT_S 29 - -#define CQE_BYTE_28_P_KEY_IDX_S 16 -#define CQE_BYTE_28_P_KEY_IDX_M (((1UL << 16) - 1) << CQE_BYTE_28_P_KEY_IDX_S) - -#define CQ_DB_REQ_NOT_SOL 0 -#define CQ_DB_REQ_NOT (1 << 16) - -struct hns_roce_v1_mpt_entry { - __le32 mpt_byte_4; - __le32 pbl_addr_l; - __le32 mpt_byte_12; - __le32 virt_addr_l; - __le32 virt_addr_h; - __le32 length; - __le32 mpt_byte_28; - __le32 pa0_l; - __le32 mpt_byte_36; - __le32 mpt_byte_40; - __le32 mpt_byte_44; - __le32 mpt_byte_48; - __le32 pa4_l; - __le32 mpt_byte_56; - __le32 mpt_byte_60; - __le32 mpt_byte_64; -}; - -#define MPT_BYTE_4_KEY_STATE_S 0 -#define MPT_BYTE_4_KEY_STATE_M (((1UL << 2) - 1) << MPT_BYTE_4_KEY_STATE_S) - -#define MPT_BYTE_4_KEY_S 8 -#define MPT_BYTE_4_KEY_M (((1UL << 8) - 1) << MPT_BYTE_4_KEY_S) - -#define MPT_BYTE_4_PAGE_SIZE_S 16 -#define MPT_BYTE_4_PAGE_SIZE_M (((1UL << 2) - 1) << MPT_BYTE_4_PAGE_SIZE_S) - -#define MPT_BYTE_4_MW_TYPE_S 20 - -#define MPT_BYTE_4_MW_BIND_ENABLE_S 21 - -#define MPT_BYTE_4_OWN_S 22 - -#define MPT_BYTE_4_MEMORY_LOCATION_TYPE_S 24 -#define MPT_BYTE_4_MEMORY_LOCATION_TYPE_M \ - (((1UL << 2) - 1) << MPT_BYTE_4_MEMORY_LOCATION_TYPE_S) - -#define MPT_BYTE_4_REMOTE_ATOMIC_S 26 -#define MPT_BYTE_4_LOCAL_WRITE_S 27 -#define MPT_BYTE_4_REMOTE_WRITE_S 28 -#define MPT_BYTE_4_REMOTE_READ_S 29 -#define MPT_BYTE_4_REMOTE_INVAL_ENABLE_S 30 -#define MPT_BYTE_4_ADDRESS_TYPE_S 31 - -#define MPT_BYTE_12_PBL_ADDR_H_S 0 -#define MPT_BYTE_12_PBL_ADDR_H_M \ - (((1UL << 17) - 1) << MPT_BYTE_12_PBL_ADDR_H_S) - -#define MPT_BYTE_12_MW_BIND_COUNTER_S 17 -#define MPT_BYTE_12_MW_BIND_COUNTER_M \ - (((1UL << 15) - 1) << MPT_BYTE_12_MW_BIND_COUNTER_S) - -#define MPT_BYTE_28_PD_S 0 -#define MPT_BYTE_28_PD_M (((1UL << 16) - 1) << MPT_BYTE_28_PD_S) - -#define MPT_BYTE_28_L_KEY_IDX_L_S 16 -#define MPT_BYTE_28_L_KEY_IDX_L_M \ - (((1UL << 16) - 1) << MPT_BYTE_28_L_KEY_IDX_L_S) - -#define MPT_BYTE_36_PA0_H_S 0 -#define MPT_BYTE_36_PA0_H_M (((1UL << 5) - 1) << MPT_BYTE_36_PA0_H_S) - -#define MPT_BYTE_36_PA1_L_S 8 -#define MPT_BYTE_36_PA1_L_M (((1UL << 24) - 1) << MPT_BYTE_36_PA1_L_S) - -#define MPT_BYTE_40_PA1_H_S 0 -#define MPT_BYTE_40_PA1_H_M (((1UL << 13) - 1) << MPT_BYTE_40_PA1_H_S) - -#define MPT_BYTE_40_PA2_L_S 16 -#define MPT_BYTE_40_PA2_L_M (((1UL << 16) - 1) << MPT_BYTE_40_PA2_L_S) - -#define MPT_BYTE_44_PA2_H_S 0 -#define MPT_BYTE_44_PA2_H_M (((1UL << 21) - 1) << MPT_BYTE_44_PA2_H_S) - -#define MPT_BYTE_44_PA3_L_S 24 -#define MPT_BYTE_44_PA3_L_M (((1UL << 8) - 1) << MPT_BYTE_44_PA3_L_S) - -#define MPT_BYTE_48_PA3_H_S 0 -#define MPT_BYTE_48_PA3_H_M (((1UL << 29) - 1) << MPT_BYTE_48_PA3_H_S) - -#define MPT_BYTE_56_PA4_H_S 0 -#define MPT_BYTE_56_PA4_H_M (((1UL << 5) - 1) << MPT_BYTE_56_PA4_H_S) - -#define MPT_BYTE_56_PA5_L_S 8 -#define MPT_BYTE_56_PA5_L_M (((1UL << 24) - 1) << MPT_BYTE_56_PA5_L_S) - -#define MPT_BYTE_60_PA5_H_S 0 -#define MPT_BYTE_60_PA5_H_M (((1UL << 13) - 1) << MPT_BYTE_60_PA5_H_S) - -#define MPT_BYTE_60_PA6_L_S 16 -#define MPT_BYTE_60_PA6_L_M (((1UL << 16) - 1) << MPT_BYTE_60_PA6_L_S) - -#define MPT_BYTE_64_PA6_H_S 0 -#define MPT_BYTE_64_PA6_H_M (((1UL << 21) - 1) << MPT_BYTE_64_PA6_H_S) - -#define MPT_BYTE_64_L_KEY_IDX_H_S 24 -#define MPT_BYTE_64_L_KEY_IDX_H_M \ - (((1UL << 8) - 1) << MPT_BYTE_64_L_KEY_IDX_H_S) - -struct hns_roce_wqe_ctrl_seg { - __le32 sgl_pa_h; - __le32 flag; - union { - __be32 imm_data; - __le32 inv_key; - }; - __le32 msg_length; -}; - -struct hns_roce_wqe_data_seg { - __le64 addr; - __le32 lkey; - __le32 len; -}; - -struct hns_roce_wqe_raddr_seg { - __le32 rkey; - __le32 len; /* reserved */ - __le64 raddr; -}; - -struct hns_roce_rq_wqe_ctrl { - __le32 rwqe_byte_4; - __le32 rocee_sgl_ba_l; - __le32 rwqe_byte_12; - __le32 reserved[5]; -}; - -#define RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S 16 -#define RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M \ - (((1UL << 6) - 1) << RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S) - -#define HNS_ROCE_QP_DESTROY_TIMEOUT_MSECS 10000 - -#define GID_LEN 16 - -struct hns_roce_ud_send_wqe { - __le32 dmac_h; - __le32 u32_8; - __le32 immediate_data; - - __le32 u32_16; - union { - unsigned char dgid[GID_LEN]; - struct { - __le32 u32_20; - __le32 u32_24; - __le32 u32_28; - __le32 u32_32; - }; - }; - - __le32 u32_36; - __le32 u32_40; - - __le32 va0_l; - __le32 va0_h; - __le32 l_key0; - - __le32 va1_l; - __le32 va1_h; - __le32 l_key1; -}; - -#define UD_SEND_WQE_U32_4_DMAC_0_S 0 -#define UD_SEND_WQE_U32_4_DMAC_0_M \ - (((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_0_S) - -#define UD_SEND_WQE_U32_4_DMAC_1_S 8 -#define UD_SEND_WQE_U32_4_DMAC_1_M \ - (((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_1_S) - -#define UD_SEND_WQE_U32_4_DMAC_2_S 16 -#define UD_SEND_WQE_U32_4_DMAC_2_M \ - (((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_2_S) - -#define UD_SEND_WQE_U32_4_DMAC_3_S 24 -#define UD_SEND_WQE_U32_4_DMAC_3_M \ - (((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_3_S) - -#define UD_SEND_WQE_U32_8_DMAC_4_S 0 -#define UD_SEND_WQE_U32_8_DMAC_4_M \ - (((1UL << 8) - 1) << UD_SEND_WQE_U32_8_DMAC_4_S) - -#define UD_SEND_WQE_U32_8_DMAC_5_S 8 -#define UD_SEND_WQE_U32_8_DMAC_5_M \ - (((1UL << 8) - 1) << UD_SEND_WQE_U32_8_DMAC_5_S) - -#define UD_SEND_WQE_U32_8_LOOPBACK_INDICATOR_S 22 - -#define UD_SEND_WQE_U32_8_OPERATION_TYPE_S 16 -#define UD_SEND_WQE_U32_8_OPERATION_TYPE_M \ - (((1UL << 4) - 1) << UD_SEND_WQE_U32_8_OPERATION_TYPE_S) - -#define UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S 24 -#define UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_M \ - (((1UL << 6) - 1) << UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S) - -#define UD_SEND_WQE_U32_8_SEND_GL_ROUTING_HDR_FLAG_S 31 - -#define UD_SEND_WQE_U32_16_DEST_QP_S 0 -#define UD_SEND_WQE_U32_16_DEST_QP_M \ - (((1UL << 24) - 1) << UD_SEND_WQE_U32_16_DEST_QP_S) - -#define UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S 24 -#define UD_SEND_WQE_U32_16_MAX_STATIC_RATE_M \ - (((1UL << 8) - 1) << UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S) - -#define UD_SEND_WQE_U32_36_FLOW_LABEL_S 0 -#define UD_SEND_WQE_U32_36_FLOW_LABEL_M \ - (((1UL << 20) - 1) << UD_SEND_WQE_U32_36_FLOW_LABEL_S) - -#define UD_SEND_WQE_U32_36_PRIORITY_S 20 -#define UD_SEND_WQE_U32_36_PRIORITY_M \ - (((1UL << 4) - 1) << UD_SEND_WQE_U32_36_PRIORITY_S) - -#define UD_SEND_WQE_U32_36_SGID_INDEX_S 24 -#define UD_SEND_WQE_U32_36_SGID_INDEX_M \ - (((1UL << 8) - 1) << UD_SEND_WQE_U32_36_SGID_INDEX_S) - -#define UD_SEND_WQE_U32_40_HOP_LIMIT_S 0 -#define UD_SEND_WQE_U32_40_HOP_LIMIT_M \ - (((1UL << 8) - 1) << UD_SEND_WQE_U32_40_HOP_LIMIT_S) - -#define UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S 8 -#define UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M \ - (((1UL << 8) - 1) << UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S) - -struct hns_roce_sqp_context { - __le32 qp1c_bytes_4; - __le32 sq_rq_bt_l; - __le32 qp1c_bytes_12; - __le32 qp1c_bytes_16; - __le32 qp1c_bytes_20; - __le32 cur_rq_wqe_ba_l; - __le32 qp1c_bytes_28; - __le32 qp1c_bytes_32; - __le32 cur_sq_wqe_ba_l; - __le32 qp1c_bytes_40; -}; - -#define QP1C_BYTES_4_QP_STATE_S 0 -#define QP1C_BYTES_4_QP_STATE_M \ - (((1UL << 3) - 1) << QP1C_BYTES_4_QP_STATE_S) - -#define QP1C_BYTES_4_SQ_WQE_SHIFT_S 8 -#define QP1C_BYTES_4_SQ_WQE_SHIFT_M \ - (((1UL << 4) - 1) << QP1C_BYTES_4_SQ_WQE_SHIFT_S) - -#define QP1C_BYTES_4_RQ_WQE_SHIFT_S 12 -#define QP1C_BYTES_4_RQ_WQE_SHIFT_M \ - (((1UL << 4) - 1) << QP1C_BYTES_4_RQ_WQE_SHIFT_S) - -#define QP1C_BYTES_4_PD_S 16 -#define QP1C_BYTES_4_PD_M (((1UL << 16) - 1) << QP1C_BYTES_4_PD_S) - -#define QP1C_BYTES_12_SQ_RQ_BT_H_S 0 -#define QP1C_BYTES_12_SQ_RQ_BT_H_M \ - (((1UL << 17) - 1) << QP1C_BYTES_12_SQ_RQ_BT_H_S) - -#define QP1C_BYTES_16_RQ_HEAD_S 0 -#define QP1C_BYTES_16_RQ_HEAD_M (((1UL << 15) - 1) << QP1C_BYTES_16_RQ_HEAD_S) - -#define QP1C_BYTES_16_PORT_NUM_S 16 -#define QP1C_BYTES_16_PORT_NUM_M \ - (((1UL << 3) - 1) << QP1C_BYTES_16_PORT_NUM_S) - -#define QP1C_BYTES_16_SIGNALING_TYPE_S 27 -#define QP1C_BYTES_16_LOCAL_ENABLE_E2E_CREDIT_S 28 -#define QP1C_BYTES_16_RQ_BA_FLG_S 29 -#define QP1C_BYTES_16_SQ_BA_FLG_S 30 -#define QP1C_BYTES_16_QP1_ERR_S 31 - -#define QP1C_BYTES_20_SQ_HEAD_S 0 -#define QP1C_BYTES_20_SQ_HEAD_M (((1UL << 15) - 1) << QP1C_BYTES_20_SQ_HEAD_S) - -#define QP1C_BYTES_20_PKEY_IDX_S 16 -#define QP1C_BYTES_20_PKEY_IDX_M \ - (((1UL << 16) - 1) << QP1C_BYTES_20_PKEY_IDX_S) - -#define QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S 0 -#define QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M \ - (((1UL << 5) - 1) << QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S) - -#define QP1C_BYTES_28_RQ_CUR_IDX_S 16 -#define QP1C_BYTES_28_RQ_CUR_IDX_M \ - (((1UL << 15) - 1) << QP1C_BYTES_28_RQ_CUR_IDX_S) - -#define QP1C_BYTES_32_TX_CQ_NUM_S 0 -#define QP1C_BYTES_32_TX_CQ_NUM_M \ - (((1UL << 16) - 1) << QP1C_BYTES_32_TX_CQ_NUM_S) - -#define QP1C_BYTES_32_RX_CQ_NUM_S 16 -#define QP1C_BYTES_32_RX_CQ_NUM_M \ - (((1UL << 16) - 1) << QP1C_BYTES_32_RX_CQ_NUM_S) - -#define QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S 0 -#define QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M \ - (((1UL << 5) - 1) << QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S) - -#define QP1C_BYTES_40_SQ_CUR_IDX_S 16 -#define QP1C_BYTES_40_SQ_CUR_IDX_M \ - (((1UL << 15) - 1) << QP1C_BYTES_40_SQ_CUR_IDX_S) - -#define HNS_ROCE_WQE_INLINE (1UL<<31) -#define HNS_ROCE_WQE_SE (1UL<<30) - -#define HNS_ROCE_WQE_SGE_NUM_BIT 24 -#define HNS_ROCE_WQE_IMM (1UL<<23) -#define HNS_ROCE_WQE_FENCE (1UL<<21) -#define HNS_ROCE_WQE_CQ_NOTIFY (1UL<<20) - -#define HNS_ROCE_WQE_OPCODE_SEND (0<<16) -#define HNS_ROCE_WQE_OPCODE_RDMA_READ (1<<16) -#define HNS_ROCE_WQE_OPCODE_RDMA_WRITE (2<<16) -#define HNS_ROCE_WQE_OPCODE_LOCAL_INV (4<<16) -#define HNS_ROCE_WQE_OPCODE_UD_SEND (7<<16) -#define HNS_ROCE_WQE_OPCODE_MASK (15<<16) - -struct hns_roce_qp_context { - __le32 qpc_bytes_4; - __le32 qpc_bytes_8; - __le32 qpc_bytes_12; - __le32 qpc_bytes_16; - __le32 sq_rq_bt_l; - __le32 qpc_bytes_24; - __le32 irrl_ba_l; - __le32 qpc_bytes_32; - __le32 qpc_bytes_36; - __le32 dmac_l; - __le32 qpc_bytes_44; - __le32 qpc_bytes_48; - u8 dgid[16]; - __le32 qpc_bytes_68; - __le32 cur_rq_wqe_ba_l; - __le32 qpc_bytes_76; - __le32 rx_rnr_time; - __le32 qpc_bytes_84; - __le32 qpc_bytes_88; - union { - __le32 rx_sge_len; - __le32 dma_length; - }; - union { - __le32 rx_sge_num; - __le32 rx_send_pktn; - __le32 r_key; - }; - __le32 va_l; - __le32 va_h; - __le32 qpc_bytes_108; - __le32 qpc_bytes_112; - __le32 rx_cur_sq_wqe_ba_l; - __le32 qpc_bytes_120; - __le32 qpc_bytes_124; - __le32 qpc_bytes_128; - __le32 qpc_bytes_132; - __le32 qpc_bytes_136; - __le32 qpc_bytes_140; - __le32 qpc_bytes_144; - __le32 qpc_bytes_148; - union { - __le32 rnr_retry; - __le32 ack_time; - }; - __le32 qpc_bytes_156; - __le32 pkt_use_len; - __le32 qpc_bytes_164; - __le32 qpc_bytes_168; - union { - __le32 sge_use_len; - __le32 pa_use_len; - }; - __le32 qpc_bytes_176; - __le32 qpc_bytes_180; - __le32 tx_cur_sq_wqe_ba_l; - __le32 qpc_bytes_188; - __le32 rvd21; -}; - -#define QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S 0 -#define QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M \ - (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S) - -#define QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S 3 -#define QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S 4 -#define QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S 5 -#define QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S 6 -#define QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S 7 - -#define QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S 8 -#define QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M \ - (((1UL << 4) - 1) << QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S) - -#define QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S 12 -#define QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M \ - (((1UL << 4) - 1) << QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S) - -#define QP_CONTEXT_QPC_BYTES_4_PD_S 16 -#define QP_CONTEXT_QPC_BYTES_4_PD_M \ - (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_4_PD_S) - -#define QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S 0 -#define QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M \ - (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S) - -#define QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S 16 -#define QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M \ - (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S) - -#define QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S 0 -#define QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M \ - (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S) - -#define QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S 16 -#define QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M \ - (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S) - -#define QP_CONTEXT_QPC_BYTES_16_QP_NUM_S 0 -#define QP_CONTEXT_QPC_BYTES_16_QP_NUM_M \ - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_16_QP_NUM_S) - -#define QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S 0 -#define QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M \ - (((1UL << 17) - 1) << QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S) - -#define QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S 18 -#define QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M \ - (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S) - -#define QP_CONTEXT_QPC_BYTE_24_REMOTE_ENABLE_E2E_CREDITS_S 23 - -#define QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S 0 -#define QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M \ - (((1UL << 17) - 1) << QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S) - -#define QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S 18 -#define QP_CONTEXT_QPC_BYTES_32_MIG_STATE_M \ - (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S) - -#define QP_CONTEXT_QPC_BYTE_32_LOCAL_ENABLE_E2E_CREDITS_S 20 -#define QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S 21 -#define QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S 22 -#define QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S 23 - -#define QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S 24 -#define QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M \ - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S) - -#define QP_CONTEXT_QPC_BYTES_36_DEST_QP_S 0 -#define QP_CONTEXT_QPC_BYTES_36_DEST_QP_M \ - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_36_DEST_QP_S) - -#define QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S 24 -#define QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M \ - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S) - -#define QP_CONTEXT_QPC_BYTES_44_DMAC_H_S 0 -#define QP_CONTEXT_QPC_BYTES_44_DMAC_H_M \ - (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_44_DMAC_H_S) - -#define QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S 16 -#define QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_M \ - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S) - -#define QP_CONTEXT_QPC_BYTES_44_HOPLMT_S 24 -#define QP_CONTEXT_QPC_BYTES_44_HOPLMT_M \ - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_44_HOPLMT_S) - -#define QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S 0 -#define QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M \ - (((1UL << 20) - 1) << QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S) - -#define QP_CONTEXT_QPC_BYTES_48_TCLASS_S 20 -#define QP_CONTEXT_QPC_BYTES_48_TCLASS_M \ - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_48_TCLASS_S) - -#define QP_CONTEXT_QPC_BYTES_48_MTU_S 28 -#define QP_CONTEXT_QPC_BYTES_48_MTU_M \ - (((1UL << 4) - 1) << QP_CONTEXT_QPC_BYTES_48_MTU_S) - -#define QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S 0 -#define QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M \ - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S) - -#define QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S 16 -#define QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M \ - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S) - -#define QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S 0 -#define QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M \ - (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S) - -#define QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S 8 -#define QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_M \ - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S) - -#define QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S 0 -#define QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_M \ - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S) - -#define QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S 24 -#define QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_M \ - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S) - -#define QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S 0 -#define QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M \ - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S) - -#define QP_CONTEXT_QPC_BYTES_88_RX_REQ_PSN_ERR_FLAG_S 24 -#define QP_CONTEXT_QPC_BYTES_88_RX_LAST_OPCODE_FLG_S 25 - -#define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S 26 -#define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_M \ - (((1UL << 2) - 1) << \ - QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S) - -#define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S 29 -#define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_M \ - (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S) - -#define QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S 0 -#define QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_M \ - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S) - -#define QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_FLG_S 24 -#define QP_CONTEXT_QPC_BYTES_108_TRRL_TDB_PSN_FLG_S 25 - -#define QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S 0 -#define QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_M \ - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S) - -#define QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S 24 -#define QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_M \ - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S) - -#define QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S 0 -#define QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M \ - (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S) - -#define QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S 0 -#define QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_M \ - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S) - -#define QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S 16 -#define QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_M \ - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S) - -#define QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S 0 -#define QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_M \ - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S) - -#define QP_CONTEXT_QPC_BYTES_128_RX_ACK_PSN_ERR_FLG_S 24 - -#define QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S 25 -#define QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_M \ - (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S) - -#define QP_CONTEXT_QPC_BYTES_128_IRRL_PSN_VLD_FLG_S 27 - -#define QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S 0 -#define QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_M \ - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S) - -#define QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S 24 -#define QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_M \ - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S) - -#define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S 0 -#define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_M \ - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S) - -#define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S 24 -#define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_M \ - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S) - -#define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S 0 -#define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_M \ - (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S) - -#define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S 16 -#define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_M \ - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S) - -#define QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S 31 - -#define QP_CONTEXT_QPC_BYTES_144_QP_STATE_S 0 -#define QP_CONTEXT_QPC_BYTES_144_QP_STATE_M \ - (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_144_QP_STATE_S) - -#define QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S 0 -#define QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M \ - (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S) - -#define QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S 2 -#define QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M \ - (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S) - -#define QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S 5 -#define QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M \ - (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S) - -#define QP_CONTEXT_QPC_BYTES_148_LSN_S 8 -#define QP_CONTEXT_QPC_BYTES_148_LSN_M \ - (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_148_LSN_S) - -#define QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S 0 -#define QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M \ - (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S) - -#define QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S 3 -#define QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M \ - (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S) - -#define QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S 8 -#define QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M \ - (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S) - -#define QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S 11 -#define QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M \ - (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S) - -#define QP_CONTEXT_QPC_BYTES_156_SL_S 14 -#define QP_CONTEXT_QPC_BYTES_156_SL_M \ - (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_156_SL_S) - -#define QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S 16 -#define QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M \ - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S) - -#define QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S 24 -#define QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_M \ - (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S) - -#define QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S 0 -#define QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M \ - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S) - -#define QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S 24 -#define QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_M \ - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S) - -#define QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S 0 -#define QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_M \ - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S) - -#define QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S 24 -#define QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_M \ - (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S) - -#define QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S 26 -#define QP_CONTEXT_QPC_BYTES_168_DB_TYPE_M \ - (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S) - -#define QP_CONTEXT_QPC_BYTES_168_MSG_LP_IND_S 28 -#define QP_CONTEXT_QPC_BYTES_168_CSDB_LP_IND_S 29 -#define QP_CONTEXT_QPC_BYTES_168_QP_ERR_FLG_S 30 - -#define QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S 0 -#define QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_M \ - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S) - -#define QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S 16 -#define QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_M \ - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S) - -#define QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S 0 -#define QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M \ - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S) - -#define QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S 16 -#define QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_M \ - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S) - -#define QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S 0 -#define QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M \ - (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S) - -#define QP_CONTEXT_QPC_BYTES_188_PKT_RETRY_FLG_S 8 - -#define QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S 16 -#define QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M \ - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S) - -#define STATUS_MASK 0xff -#define GO_BIT_TIMEOUT_MSECS 10000 -#define HCR_STATUS_OFFSET 0x18 -#define HCR_GO_BIT 15 - -struct hns_roce_rq_db { - __le32 u32_4; - __le32 u32_8; -}; - -#define RQ_DOORBELL_U32_4_RQ_HEAD_S 0 -#define RQ_DOORBELL_U32_4_RQ_HEAD_M \ - (((1UL << 15) - 1) << RQ_DOORBELL_U32_4_RQ_HEAD_S) - -#define RQ_DOORBELL_U32_8_QPN_S 0 -#define RQ_DOORBELL_U32_8_QPN_M (((1UL << 24) - 1) << RQ_DOORBELL_U32_8_QPN_S) - -#define RQ_DOORBELL_U32_8_CMD_S 28 -#define RQ_DOORBELL_U32_8_CMD_M (((1UL << 3) - 1) << RQ_DOORBELL_U32_8_CMD_S) - -#define RQ_DOORBELL_U32_8_HW_SYNC_S 31 - -struct hns_roce_sq_db { - __le32 u32_4; - __le32 u32_8; -}; - -#define SQ_DOORBELL_U32_4_SQ_HEAD_S 0 -#define SQ_DOORBELL_U32_4_SQ_HEAD_M \ - (((1UL << 15) - 1) << SQ_DOORBELL_U32_4_SQ_HEAD_S) - -#define SQ_DOORBELL_U32_4_SL_S 16 -#define SQ_DOORBELL_U32_4_SL_M \ - (((1UL << 2) - 1) << SQ_DOORBELL_U32_4_SL_S) - -#define SQ_DOORBELL_U32_4_PORT_S 18 -#define SQ_DOORBELL_U32_4_PORT_M (((1UL << 3) - 1) << SQ_DOORBELL_U32_4_PORT_S) - -#define SQ_DOORBELL_U32_8_QPN_S 0 -#define SQ_DOORBELL_U32_8_QPN_M (((1UL << 24) - 1) << SQ_DOORBELL_U32_8_QPN_S) - -#define SQ_DOORBELL_HW_SYNC_S 31 - -struct hns_roce_ext_db { - int esdb_dep; - int eodb_dep; - struct hns_roce_buf_list *sdb_buf_list; - struct hns_roce_buf_list *odb_buf_list; -}; - -struct hns_roce_db_table { - int sdb_ext_mod; - int odb_ext_mod; - struct hns_roce_ext_db *ext_db; -}; - -#define HW_SYNC_SLEEP_TIME_INTERVAL 20 -#define HW_SYNC_TIMEOUT_MSECS (25 * HW_SYNC_SLEEP_TIME_INTERVAL) -#define BT_CMD_SYNC_SHIFT 31 -#define HNS_ROCE_BA_SIZE (32 * 4096) - -struct hns_roce_bt_table { - struct hns_roce_buf_list qpc_buf; - struct hns_roce_buf_list mtpt_buf; - struct hns_roce_buf_list cqc_buf; -}; - -struct hns_roce_tptr_table { - struct hns_roce_buf_list tptr_buf; -}; - -struct hns_roce_qp_work { - struct work_struct work; - struct ib_device *ib_dev; - struct hns_roce_qp *qp; - u32 db_wait_stage; - u32 sdb_issue_ptr; - u32 sdb_inv_cnt; - u32 sche_cnt; -}; - -struct hns_roce_mr_free_work { - struct work_struct work; - struct ib_device *ib_dev; - struct completion *comp; - int comp_flag; - void *mr; -}; - -struct hns_roce_recreate_lp_qp_work { - struct work_struct work; - struct ib_device *ib_dev; - struct completion *comp; - int comp_flag; -}; - -struct hns_roce_free_mr { - struct workqueue_struct *free_mr_wq; - struct hns_roce_qp *mr_free_qp[HNS_ROCE_V1_RESV_QP]; - struct hns_roce_cq *mr_free_cq; - struct hns_roce_pd *mr_free_pd; -}; - -struct hns_roce_v1_priv { - struct hns_roce_db_table db_table; - struct hns_roce_raq_table raq_table; - struct hns_roce_bt_table bt_table; - struct hns_roce_tptr_table tptr_table; - struct hns_roce_free_mr free_mr; -}; - -int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset); -int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); -int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata); - -#endif diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index eb0defa80d0d..1435fe2ea176 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -55,6 +55,42 @@ enum { CMD_RST_PRC_EBUSY, }; +enum ecc_resource_type { + ECC_RESOURCE_QPC, + ECC_RESOURCE_CQC, + ECC_RESOURCE_MPT, + ECC_RESOURCE_SRQC, + ECC_RESOURCE_GMV, + ECC_RESOURCE_QPC_TIMER, + ECC_RESOURCE_CQC_TIMER, + ECC_RESOURCE_SCCC, + ECC_RESOURCE_COUNT, +}; + +static const struct { + const char *name; + u8 read_bt0_op; + u8 write_bt0_op; +} fmea_ram_res[] = { + { "ECC_RESOURCE_QPC", + HNS_ROCE_CMD_READ_QPC_BT0, HNS_ROCE_CMD_WRITE_QPC_BT0 }, + { "ECC_RESOURCE_CQC", + HNS_ROCE_CMD_READ_CQC_BT0, HNS_ROCE_CMD_WRITE_CQC_BT0 }, + { "ECC_RESOURCE_MPT", + HNS_ROCE_CMD_READ_MPT_BT0, HNS_ROCE_CMD_WRITE_MPT_BT0 }, + { "ECC_RESOURCE_SRQC", + HNS_ROCE_CMD_READ_SRQC_BT0, HNS_ROCE_CMD_WRITE_SRQC_BT0 }, + /* ECC_RESOURCE_GMV is handled by cmdq, not mailbox */ + { "ECC_RESOURCE_GMV", + 0, 0 }, + { "ECC_RESOURCE_QPC_TIMER", + HNS_ROCE_CMD_READ_QPC_TIMER_BT0, HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0 }, + { "ECC_RESOURCE_CQC_TIMER", + HNS_ROCE_CMD_READ_CQC_TIMER_BT0, HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0 }, + { "ECC_RESOURCE_SCCC", + HNS_ROCE_CMD_READ_SCCC_BT0, HNS_ROCE_CMD_WRITE_SCCC_BT0 }, +}; + static inline void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg, struct ib_sge *sg) { @@ -82,7 +118,6 @@ static const u32 hns_roce_op_code[] = { HR_OPC_MAP(ATOMIC_CMP_AND_SWP, ATOM_CMP_AND_SWAP), HR_OPC_MAP(ATOMIC_FETCH_AND_ADD, ATOM_FETCH_AND_ADD), HR_OPC_MAP(SEND_WITH_INV, SEND_WITH_INV), - HR_OPC_MAP(LOCAL_INV, LOCAL_INV), HR_OPC_MAP(MASKED_ATOMIC_CMP_AND_SWP, ATOM_MSK_CMP_AND_SWAP), HR_OPC_MAP(MASKED_ATOMIC_FETCH_AND_ADD, ATOM_MSK_FETCH_AND_ADD), HR_OPC_MAP(REG_MR, FAST_REG_PMR), @@ -149,8 +184,7 @@ static void set_atomic_seg(const struct ib_send_wr *wr, aseg->cmp_data = 0; } - roce_set_field(rc_sq_wqe->byte_16, V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M, - V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge); + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, valid_num_sge); } static int fill_ext_sge_inl_data(struct hns_roce_qp *qp, @@ -158,8 +192,7 @@ static int fill_ext_sge_inl_data(struct hns_roce_qp *qp, unsigned int *sge_idx, u32 msg_len) { struct ib_device *ibdev = &(to_hr_dev(qp->ibqp.device))->ib_dev; - unsigned int dseg_len = sizeof(struct hns_roce_v2_wqe_data_seg); - unsigned int ext_sge_sz = qp->sq.max_gs * dseg_len; + unsigned int ext_sge_sz = qp->sq.max_gs * HNS_ROCE_SGE_SIZE; unsigned int left_len_in_pg; unsigned int idx = *sge_idx; unsigned int i = 0; @@ -187,7 +220,7 @@ static int fill_ext_sge_inl_data(struct hns_roce_qp *qp, if (len <= left_len_in_pg) { memcpy(dseg, addr, len); - idx += len / dseg_len; + idx += len / HNS_ROCE_SGE_SIZE; i++; if (i >= wr->num_sge) @@ -202,7 +235,7 @@ static int fill_ext_sge_inl_data(struct hns_roce_qp *qp, len -= left_len_in_pg; addr += left_len_in_pg; - idx += left_len_in_pg / dseg_len; + idx += left_len_in_pg / HNS_ROCE_SGE_SIZE; dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1)); left_len_in_pg = 1 << HNS_HW_PAGE_SHIFT; @@ -271,8 +304,7 @@ static int set_rc_inl(struct hns_roce_qp *qp, const struct ib_send_wr *wr, dseg += sizeof(struct hns_roce_v2_rc_send_wqe); if (msg_len <= HNS_ROCE_V2_MAX_RC_INL_INN_SZ) { - roce_set_bit(rc_sq_wqe->byte_20, - V2_RC_SEND_WQE_BYTE_20_INL_TYPE_S, 0); + hr_reg_clear(rc_sq_wqe, RC_SEND_WQE_INL_TYPE); for (i = 0; i < wr->num_sge; i++) { memcpy(dseg, ((void *)wr->sg_list[i].addr), @@ -280,17 +312,13 @@ static int set_rc_inl(struct hns_roce_qp *qp, const struct ib_send_wr *wr, dseg += wr->sg_list[i].length; } } else { - roce_set_bit(rc_sq_wqe->byte_20, - V2_RC_SEND_WQE_BYTE_20_INL_TYPE_S, 1); + hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_INL_TYPE); ret = fill_ext_sge_inl_data(qp, wr, &curr_idx, msg_len); if (ret) return ret; - roce_set_field(rc_sq_wqe->byte_16, - V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M, - V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, - curr_idx - *sge_idx); + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, curr_idx - *sge_idx); } *sge_idx = curr_idx; @@ -309,12 +337,10 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr, int j = 0; int i; - roce_set_field(rc_sq_wqe->byte_20, - V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M, - V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S, - (*sge_ind) & (qp->sge.sge_cnt - 1)); + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_MSG_START_SGE_IDX, + (*sge_ind) & (qp->sge.sge_cnt - 1)); - roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S, + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_INLINE, !!(wr->send_flags & IB_SEND_INLINE)); if (wr->send_flags & IB_SEND_INLINE) return set_rc_inl(qp, wr, rc_sq_wqe, sge_ind); @@ -339,9 +365,7 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr, valid_num_sge - HNS_ROCE_SGE_IN_WQE); } - roce_set_field(rc_sq_wqe->byte_16, - V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M, - V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge); + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, valid_num_sge); return 0; } @@ -355,7 +379,7 @@ static int check_send_valid(struct hns_roce_dev *hr_dev, if (unlikely(ibqp->qp_type != IB_QPT_RC && ibqp->qp_type != IB_QPT_GSI && ibqp->qp_type != IB_QPT_UD)) { - ibdev_err(ibdev, "Not supported QP(0x%x)type!\n", + ibdev_err(ibdev, "not supported QP(0x%x)type!\n", ibqp->qp_type); return -EOPNOTSUPP; } else if (unlikely(hr_qp->state == IB_QPS_RESET || @@ -412,8 +436,7 @@ static int set_ud_opcode(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe, ud_sq_wqe->immtdata = get_immtdata(wr); - roce_set_field(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_OPCODE_M, - V2_UD_SEND_WQE_BYTE_4_OPCODE_S, to_hr_opcode(ib_op)); + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_OPCODE, to_hr_opcode(ib_op)); return 0; } @@ -424,21 +447,15 @@ static int fill_ud_av(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe, struct ib_device *ib_dev = ah->ibah.device; struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); - roce_set_field(ud_sq_wqe->byte_24, V2_UD_SEND_WQE_BYTE_24_UDPSPN_M, - V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, ah->av.udp_sport); - - roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M, - V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S, ah->av.hop_limit); - roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_TCLASS_M, - V2_UD_SEND_WQE_BYTE_36_TCLASS_S, ah->av.tclass); - roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M, - V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S, ah->av.flowlabel); + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_UDPSPN, ah->av.udp_sport); + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_HOPLIMIT, ah->av.hop_limit); + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_TCLASS, ah->av.tclass); + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_FLOW_LABEL, ah->av.flowlabel); if (WARN_ON(ah->av.sl > MAX_SERVICE_LEVEL)) return -EINVAL; - roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_SL_M, - V2_UD_SEND_WQE_BYTE_40_SL_S, ah->av.sl); + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SL, ah->av.sl); ud_sq_wqe->sgid_index = ah->av.gid_index; @@ -448,10 +465,8 @@ static int fill_ud_av(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe, if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) return 0; - roce_set_bit(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S, - ah->av.vlan_en); - roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_VLAN_M, - V2_UD_SEND_WQE_BYTE_36_VLAN_S, ah->av.vlan_id); + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_VLAN_EN, ah->av.vlan_en); + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_VLAN, ah->av.vlan_id); return 0; } @@ -476,27 +491,19 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp, ud_sq_wqe->msg_len = cpu_to_le32(msg_len); - roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_CQE_S, + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_CQE, !!(wr->send_flags & IB_SEND_SIGNALED)); - - roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_SE_S, + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SE, !!(wr->send_flags & IB_SEND_SOLICITED)); - roce_set_field(ud_sq_wqe->byte_16, V2_UD_SEND_WQE_BYTE_16_PD_M, - V2_UD_SEND_WQE_BYTE_16_PD_S, to_hr_pd(qp->ibqp.pd)->pdn); - - roce_set_field(ud_sq_wqe->byte_16, V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M, - V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge); - - roce_set_field(ud_sq_wqe->byte_20, - V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M, - V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S, - curr_idx & (qp->sge.sge_cnt - 1)); + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_PD, to_hr_pd(qp->ibqp.pd)->pdn); + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SGE_NUM, valid_num_sge); + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_MSG_START_SGE_IDX, + curr_idx & (qp->sge.sge_cnt - 1)); ud_sq_wqe->qkey = cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ? qp->qkey : ud_wr(wr)->remote_qkey); - roce_set_field(ud_sq_wqe->byte_32, V2_UD_SEND_WQE_BYTE_32_DQPN_M, - V2_UD_SEND_WQE_BYTE_32_DQPN_S, ud_wr(wr)->remote_qpn); + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_DQPN, ud_wr(wr)->remote_qpn); ret = fill_ud_av(ud_sq_wqe, ah); if (ret) @@ -516,8 +523,7 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp, dma_wmb(); *sge_idx = curr_idx; - roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_OWNER_S, - owner_bit); + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_OWNER, owner_bit); return 0; } @@ -552,9 +558,6 @@ static int set_rc_opcode(struct hns_roce_dev *hr_dev, else ret = -EOPNOTSUPP; break; - case IB_WR_LOCAL_INV: - roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SO_S, 1); - fallthrough; case IB_WR_SEND_WITH_INV: rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey); break; @@ -565,11 +568,11 @@ static int set_rc_opcode(struct hns_roce_dev *hr_dev, if (unlikely(ret)) return ret; - roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, to_hr_opcode(ib_op)); + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_OPCODE, to_hr_opcode(ib_op)); return ret; } + static inline int set_rc_wqe(struct hns_roce_qp *qp, const struct ib_send_wr *wr, void *wqe, unsigned int *sge_idx, @@ -590,13 +593,13 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp, if (WARN_ON(ret)) return ret; - roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_FENCE_S, + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_FENCE, (wr->send_flags & IB_SEND_FENCE) ? 1 : 0); - roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SE_S, + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SE, (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0); - roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_CQE_S, + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_CQE, (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0); if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || @@ -616,8 +619,7 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp, dma_wmb(); *sge_idx = curr_idx; - roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OWNER_S, - owner_bit); + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_OWNER, owner_bit); return ret; } @@ -630,7 +632,7 @@ static inline void update_sq_db(struct hns_roce_dev *hr_dev, } else { struct hns_roce_v2_db sq_db = {}; - hr_reg_write(&sq_db, DB_TAG, qp->doorbell_qpn); + hr_reg_write(&sq_db, DB_TAG, qp->qpn); hr_reg_write(&sq_db, DB_CMD, HNS_ROCE_V2_SQ_DB); hr_reg_write(&sq_db, DB_PI, qp->sq.head); hr_reg_write(&sq_db, DB_SL, qp->sl); @@ -678,16 +680,15 @@ static void hns_roce_write512(struct hns_roce_dev *hr_dev, u64 *val, static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp, void *wqe) { +#define HNS_ROCE_SL_SHIFT 2 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe; /* All kinds of DirectWQE have the same header field layout */ - roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_FLAG_S, 1); - roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_DB_SL_L_M, - V2_RC_SEND_WQE_BYTE_4_DB_SL_L_S, qp->sl); - roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_DB_SL_H_M, - V2_RC_SEND_WQE_BYTE_4_DB_SL_H_S, qp->sl >> 2); - roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_M, - V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_S, qp->sq.head); + hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_FLAG); + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_DB_SL_L, qp->sl); + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_DB_SL_H, + qp->sl >> HNS_ROCE_SL_SHIFT); + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_WQE_INDEX, qp->sq.head); hns_roce_write512(hr_dev, wqe, qp->sq.db_reg); } @@ -1263,6 +1264,16 @@ static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev) return tail == priv->cmq.csq.head; } +static void update_cmdq_status(struct hns_roce_dev *hr_dev) +{ + struct hns_roce_v2_priv *priv = hr_dev->priv; + struct hnae3_handle *handle = priv->handle; + + if (handle->rinfo.reset_state == HNS_ROCE_STATE_RST_INIT || + handle->rinfo.instance_state == HNS_ROCE_STATE_INIT) + hr_dev->cmd.state = HNS_ROCE_CMDQ_STATE_FATAL_ERR; +} + static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, struct hns_roce_cmq_desc *desc, int num) { @@ -1294,7 +1305,8 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, } while (++timeout < priv->cmq.tx_timeout); if (hns_roce_cmq_csq_done(hr_dev)) { - for (ret = 0, i = 0; i < num; i++) { + ret = 0; + for (i = 0; i < num; i++) { /* check the result of hardware write back */ desc[i] = csq->desc[tail++]; if (tail == csq->desc_num) @@ -1305,17 +1317,19 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, continue; dev_err_ratelimited(hr_dev->dev, - "Cmdq IO error, opcode = %x, return = %x\n", + "Cmdq IO error, opcode = 0x%x, return = 0x%x.\n", desc->opcode, desc_ret); ret = -EIO; } } else { /* FW/HW reset or incorrect number of desc */ tail = roce_read(hr_dev, ROCEE_TX_CMQ_CI_REG); - dev_warn(hr_dev->dev, "CMDQ move tail from %d to %d\n", + dev_warn(hr_dev->dev, "CMDQ move tail from %u to %u.\n", csq->head, tail); csq->head = tail; + update_cmdq_status(hr_dev); + ret = -EAGAIN; } @@ -1330,6 +1344,9 @@ static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev, bool busy; int ret; + if (hr_dev->cmd.state == HNS_ROCE_CMDQ_STATE_FATAL_ERR) + return -EIO; + if (!v2_chk_mbox_is_avail(hr_dev, &busy)) return busy ? -EBUSY : 0; @@ -1342,17 +1359,17 @@ static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev, return ret; } -static int config_hem_ba_to_hw(struct hns_roce_dev *hr_dev, unsigned long obj, - dma_addr_t base_addr, u16 op) +static int config_hem_ba_to_hw(struct hns_roce_dev *hr_dev, + dma_addr_t base_addr, u8 cmd, unsigned long tag) { - struct hns_roce_cmd_mailbox *mbox = hns_roce_alloc_cmd_mailbox(hr_dev); + struct hns_roce_cmd_mailbox *mbox; int ret; + mbox = hns_roce_alloc_cmd_mailbox(hr_dev); if (IS_ERR(mbox)) return PTR_ERR(mbox); - ret = hns_roce_cmd_mbox(hr_dev, base_addr, mbox->dma, obj, 0, op, - HNS_ROCE_CMD_TIMEOUT_MSECS); + ret = hns_roce_cmd_mbox(hr_dev, base_addr, mbox->dma, cmd, tag); hns_roce_free_cmd_mailbox(hr_dev, mbox); return ret; } @@ -1384,20 +1401,20 @@ static void func_clr_hw_resetting_state(struct hns_roce_dev *hr_dev, hr_dev->dis_db = true; dev_warn(hr_dev->dev, - "Func clear is pending, device in resetting state.\n"); + "func clear is pending, device in resetting state.\n"); end = HNS_ROCE_V2_HW_RST_TIMEOUT; while (end) { if (!ops->get_hw_reset_stat(handle)) { hr_dev->is_reset = true; dev_info(hr_dev->dev, - "Func clear success after reset.\n"); + "func clear success after reset.\n"); return; } msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT); end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT; } - dev_warn(hr_dev->dev, "Func clear failed.\n"); + dev_warn(hr_dev->dev, "func clear failed.\n"); } static void func_clr_sw_resetting_state(struct hns_roce_dev *hr_dev, @@ -1409,21 +1426,21 @@ static void func_clr_sw_resetting_state(struct hns_roce_dev *hr_dev, hr_dev->dis_db = true; dev_warn(hr_dev->dev, - "Func clear is pending, device in resetting state.\n"); + "func clear is pending, device in resetting state.\n"); end = HNS_ROCE_V2_HW_RST_TIMEOUT; while (end) { if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt) { hr_dev->is_reset = true; dev_info(hr_dev->dev, - "Func clear success after sw reset\n"); + "func clear success after sw reset\n"); return; } msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT); end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT; } - dev_warn(hr_dev->dev, "Func clear failed because of unfinished sw reset\n"); + dev_warn(hr_dev->dev, "func clear failed because of unfinished sw reset\n"); } static void hns_roce_func_clr_rst_proc(struct hns_roce_dev *hr_dev, int retval, @@ -1436,7 +1453,7 @@ static void hns_roce_func_clr_rst_proc(struct hns_roce_dev *hr_dev, int retval, if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt) { hr_dev->dis_db = true; hr_dev->is_reset = true; - dev_info(hr_dev->dev, "Func clear success after reset.\n"); + dev_info(hr_dev->dev, "func clear success after reset.\n"); return; } @@ -1453,9 +1470,9 @@ static void hns_roce_func_clr_rst_proc(struct hns_roce_dev *hr_dev, int retval, if (retval && !flag) dev_warn(hr_dev->dev, - "Func clear read failed, ret = %d.\n", retval); + "func clear read failed, ret = %d.\n", retval); - dev_warn(hr_dev->dev, "Func clear failed.\n"); + dev_warn(hr_dev->dev, "func clear failed.\n"); } static void __hns_roce_function_clear(struct hns_roce_dev *hr_dev, int vf_id) @@ -1476,7 +1493,7 @@ static void __hns_roce_function_clear(struct hns_roce_dev *hr_dev, int vf_id) ret = hns_roce_cmq_send(hr_dev, &desc, 1); if (ret) { fclr_write_fail_flag = true; - dev_err(hr_dev->dev, "Func clear write failed, ret = %d.\n", + dev_err(hr_dev->dev, "func clear write failed, ret = %d.\n", ret); goto out; } @@ -1497,7 +1514,7 @@ static void __hns_roce_function_clear(struct hns_roce_dev *hr_dev, int vf_id) if (ret) continue; - if (roce_get_bit(resp->func_done, FUNC_CLEAR_RST_FUN_DONE_S)) { + if (hr_reg_read(resp, FUNC_CLEAR_RST_FUN_DONE)) { if (vf_id == 0) hr_dev->is_reset = true; return; @@ -1508,7 +1525,7 @@ out: hns_roce_func_clr_rst_proc(hr_dev, ret, fclr_write_fail_flag); } -static void hns_roce_free_vf_resource(struct hns_roce_dev *hr_dev, int vf_id) +static int hns_roce_free_vf_resource(struct hns_roce_dev *hr_dev, int vf_id) { enum hns_roce_opcode_type opcode = HNS_ROCE_OPC_ALLOC_VF_RES; struct hns_roce_cmq_desc desc[2]; @@ -1519,17 +1536,29 @@ static void hns_roce_free_vf_resource(struct hns_roce_dev *hr_dev, int vf_id) desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false); hr_reg_write(req_a, FUNC_RES_A_VF_ID, vf_id); - hns_roce_cmq_send(hr_dev, desc, 2); + + return hns_roce_cmq_send(hr_dev, desc, 2); } static void hns_roce_function_clear(struct hns_roce_dev *hr_dev) { + int ret; int i; + if (hr_dev->cmd.state == HNS_ROCE_CMDQ_STATE_FATAL_ERR) + return; + for (i = hr_dev->func_num - 1; i >= 0; i--) { __hns_roce_function_clear(hr_dev, i); - if (i != 0) - hns_roce_free_vf_resource(hr_dev, i); + + if (i == 0) + continue; + + ret = hns_roce_free_vf_resource(hr_dev, i); + if (ret) + ibdev_err(&hr_dev->ib_dev, + "failed to free vf resource, vf_id = %d, ret = %d.\n", + i, ret); } } @@ -1571,7 +1600,7 @@ static int hns_roce_query_func_info(struct hns_roce_dev *hr_dev) struct hns_roce_cmq_desc desc; int ret; - if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09) { + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { hr_dev->func_num = 1; return 0; } @@ -1755,17 +1784,16 @@ static int __hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev, swt = (struct hns_roce_vf_switch *)desc.data; hns_roce_cmq_setup_basic_desc(&desc, HNS_SWITCH_PARAMETER_CFG, true); swt->rocee_sel |= cpu_to_le32(HNS_ICL_SWITCH_CMD_ROCEE_SEL); - roce_set_field(swt->fun_id, VF_SWITCH_DATA_FUN_ID_VF_ID_M, - VF_SWITCH_DATA_FUN_ID_VF_ID_S, vf_id); + hr_reg_write(swt, VF_SWITCH_VF_ID, vf_id); ret = hns_roce_cmq_send(hr_dev, &desc, 1); if (ret) return ret; desc.flag = cpu_to_le16(HNS_ROCE_CMD_FLAG_IN); desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR); - roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LPBK_S, 1); - roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S, 0); - roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_DST_OVRD_S, 1); + hr_reg_enable(swt, VF_SWITCH_ALW_LPBK); + hr_reg_clear(swt, VF_SWITCH_ALW_LCL_LPBK); + hr_reg_enable(swt, VF_SWITCH_ALW_DST_OVRD); return hns_roce_cmq_send(hr_dev, &desc, 1); } @@ -1933,7 +1961,6 @@ static void set_default_caps(struct hns_roce_dev *hr_dev) caps->min_cqes = HNS_ROCE_MIN_CQE_NUM; caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM; caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM; - caps->max_extend_sg = HNS_ROCE_V2_MAX_EXTEND_SGE_NUM; caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM; caps->num_uars = HNS_ROCE_V2_UAR_NUM; @@ -1944,14 +1971,13 @@ static void set_default_caps(struct hns_roce_dev *hr_dev) caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM; caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM; - caps->num_qpc_timer = HNS_ROCE_V2_MAX_QPC_TIMER_NUM; - caps->num_cqc_timer = HNS_ROCE_V2_MAX_CQC_TIMER_NUM; + caps->qpc_timer_bt_num = HNS_ROCE_V2_MAX_QPC_TIMER_BT_NUM; + caps->cqc_timer_bt_num = HNS_ROCE_V2_MAX_CQC_TIMER_BT_NUM; caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA; caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA; caps->max_sq_desc_sz = HNS_ROCE_V2_MAX_SQ_DESC_SZ; caps->max_rq_desc_sz = HNS_ROCE_V2_MAX_RQ_DESC_SZ; - caps->max_srq_desc_sz = HNS_ROCE_V2_MAX_SRQ_DESC_SZ; caps->irrl_entry_sz = HNS_ROCE_V2_IRRL_ENTRY_SZ; caps->trrl_entry_sz = HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ; caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ; @@ -2003,7 +2029,8 @@ static void set_default_caps(struct hns_roce_dev *hr_dev) caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM; if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) { - caps->flags |= HNS_ROCE_CAP_FLAG_STASH; + caps->flags |= HNS_ROCE_CAP_FLAG_STASH | + HNS_ROCE_CAP_FLAG_DIRECT_WQE; caps->max_sq_inline = HNS_ROCE_V3_MAX_SQ_INLINE; } else { caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE; @@ -2144,7 +2171,6 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev) caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ; caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ; - caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM; caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM; caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0; caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0; @@ -2152,15 +2178,17 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev) caps->num_xrcds = HNS_ROCE_V2_MAX_XRCD_NUM; caps->reserved_xrcds = HNS_ROCE_V2_RSV_XRCD_NUM; - caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS; caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS; caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS; if (!caps->num_comp_vectors) - caps->num_comp_vectors = min_t(u32, caps->eqc_bt_num - 1, - (u32)priv->handle->rinfo.num_vectors - 2); + caps->num_comp_vectors = + min_t(u32, caps->eqc_bt_num - HNS_ROCE_V2_AEQE_VEC_NUM, + (u32)priv->handle->rinfo.num_vectors - + (HNS_ROCE_V2_AEQE_VEC_NUM + HNS_ROCE_V2_ABNORMAL_VEC_NUM)); if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) { + caps->eqe_hop_num = HNS_ROCE_V3_EQE_HOP_NUM; caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE; caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE; @@ -2181,6 +2209,7 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev) } else { u32 func_num = max_t(u32, 1, hr_dev->func_num); + caps->eqe_hop_num = HNS_ROCE_V2_EQE_HOP_NUM; caps->ceqe_size = HNS_ROCE_CEQE_SIZE; caps->aeqe_size = HNS_ROCE_AEQE_SIZE; caps->gid_table_len[0] /= func_num; @@ -2237,16 +2266,12 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev) caps->max_sq_inline = le16_to_cpu(resp_a->max_sq_inline); caps->max_rq_sg = le16_to_cpu(resp_a->max_rq_sg); caps->max_rq_sg = roundup_pow_of_two(caps->max_rq_sg); - caps->max_extend_sg = le32_to_cpu(resp_a->max_extend_sg); - caps->num_qpc_timer = le16_to_cpu(resp_a->num_qpc_timer); - caps->num_cqc_timer = le16_to_cpu(resp_a->num_cqc_timer); caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges); caps->max_srq_sges = roundup_pow_of_two(caps->max_srq_sges); caps->num_aeq_vectors = resp_a->num_aeq_vectors; caps->num_other_vectors = resp_a->num_other_vectors; caps->max_sq_desc_sz = resp_a->max_sq_desc_sz; caps->max_rq_desc_sz = resp_a->max_rq_desc_sz; - caps->max_srq_desc_sz = resp_a->max_srq_desc_sz; caps->cqe_sz = resp_a->cqe_sz; caps->mtpt_entry_sz = resp_b->mtpt_entry_sz; @@ -2266,87 +2291,39 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev) ctx_hop_num = resp_b->ctx_hop_num; pbl_hop_num = resp_b->pbl_hop_num; - caps->num_pds = 1 << roce_get_field(resp_c->cap_flags_num_pds, - V2_QUERY_PF_CAPS_C_NUM_PDS_M, - V2_QUERY_PF_CAPS_C_NUM_PDS_S); - caps->flags = roce_get_field(resp_c->cap_flags_num_pds, - V2_QUERY_PF_CAPS_C_CAP_FLAGS_M, - V2_QUERY_PF_CAPS_C_CAP_FLAGS_S); + caps->num_pds = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_PDS); + + caps->flags = hr_reg_read(resp_c, PF_CAPS_C_CAP_FLAGS); caps->flags |= le16_to_cpu(resp_d->cap_flags_ex) << HNS_ROCE_CAP_FLAGS_EX_SHIFT; - caps->num_cqs = 1 << roce_get_field(resp_c->max_gid_num_cqs, - V2_QUERY_PF_CAPS_C_NUM_CQS_M, - V2_QUERY_PF_CAPS_C_NUM_CQS_S); - caps->gid_table_len[0] = roce_get_field(resp_c->max_gid_num_cqs, - V2_QUERY_PF_CAPS_C_MAX_GID_M, - V2_QUERY_PF_CAPS_C_MAX_GID_S); - - caps->max_cqes = 1 << roce_get_field(resp_c->cq_depth, - V2_QUERY_PF_CAPS_C_CQ_DEPTH_M, - V2_QUERY_PF_CAPS_C_CQ_DEPTH_S); - caps->num_mtpts = 1 << roce_get_field(resp_c->num_mrws, - V2_QUERY_PF_CAPS_C_NUM_MRWS_M, - V2_QUERY_PF_CAPS_C_NUM_MRWS_S); - caps->num_qps = 1 << roce_get_field(resp_c->ord_num_qps, - V2_QUERY_PF_CAPS_C_NUM_QPS_M, - V2_QUERY_PF_CAPS_C_NUM_QPS_S); - caps->max_qp_init_rdma = roce_get_field(resp_c->ord_num_qps, - V2_QUERY_PF_CAPS_C_MAX_ORD_M, - V2_QUERY_PF_CAPS_C_MAX_ORD_S); + caps->num_cqs = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_CQS); + caps->gid_table_len[0] = hr_reg_read(resp_c, PF_CAPS_C_MAX_GID); + caps->max_cqes = 1 << hr_reg_read(resp_c, PF_CAPS_C_CQ_DEPTH); + caps->num_mtpts = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_MRWS); + caps->num_qps = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_QPS); + caps->max_qp_init_rdma = hr_reg_read(resp_c, PF_CAPS_C_MAX_ORD); caps->max_qp_dest_rdma = caps->max_qp_init_rdma; caps->max_wqes = 1 << le16_to_cpu(resp_c->sq_depth); - caps->num_srqs = 1 << roce_get_field(resp_d->wq_hop_num_max_srqs, - V2_QUERY_PF_CAPS_D_NUM_SRQS_M, - V2_QUERY_PF_CAPS_D_NUM_SRQS_S); - caps->cong_type = roce_get_field(resp_d->wq_hop_num_max_srqs, - V2_QUERY_PF_CAPS_D_CONG_TYPE_M, - V2_QUERY_PF_CAPS_D_CONG_TYPE_S); - caps->max_srq_wrs = 1 << le16_to_cpu(resp_d->srq_depth); - caps->ceqe_depth = 1 << roce_get_field(resp_d->num_ceqs_ceq_depth, - V2_QUERY_PF_CAPS_D_CEQ_DEPTH_M, - V2_QUERY_PF_CAPS_D_CEQ_DEPTH_S); - caps->num_comp_vectors = roce_get_field(resp_d->num_ceqs_ceq_depth, - V2_QUERY_PF_CAPS_D_NUM_CEQS_M, - V2_QUERY_PF_CAPS_D_NUM_CEQS_S); - - caps->aeqe_depth = 1 << roce_get_field(resp_d->arm_st_aeq_depth, - V2_QUERY_PF_CAPS_D_AEQ_DEPTH_M, - V2_QUERY_PF_CAPS_D_AEQ_DEPTH_S); - caps->default_aeq_arm_st = roce_get_field(resp_d->arm_st_aeq_depth, - V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_M, - V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_S); - caps->default_ceq_arm_st = roce_get_field(resp_d->arm_st_aeq_depth, - V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_M, - V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_S); - caps->reserved_pds = roce_get_field(resp_d->num_uars_rsv_pds, - V2_QUERY_PF_CAPS_D_RSV_PDS_M, - V2_QUERY_PF_CAPS_D_RSV_PDS_S); - caps->num_uars = 1 << roce_get_field(resp_d->num_uars_rsv_pds, - V2_QUERY_PF_CAPS_D_NUM_UARS_M, - V2_QUERY_PF_CAPS_D_NUM_UARS_S); - caps->reserved_qps = roce_get_field(resp_d->rsv_uars_rsv_qps, - V2_QUERY_PF_CAPS_D_RSV_QPS_M, - V2_QUERY_PF_CAPS_D_RSV_QPS_S); - caps->reserved_uars = roce_get_field(resp_d->rsv_uars_rsv_qps, - V2_QUERY_PF_CAPS_D_RSV_UARS_M, - V2_QUERY_PF_CAPS_D_RSV_UARS_S); - caps->reserved_mrws = roce_get_field(resp_e->chunk_size_shift_rsv_mrws, - V2_QUERY_PF_CAPS_E_RSV_MRWS_M, - V2_QUERY_PF_CAPS_E_RSV_MRWS_S); - caps->chunk_sz = 1 << roce_get_field(resp_e->chunk_size_shift_rsv_mrws, - V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_M, - V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_S); - caps->reserved_cqs = roce_get_field(resp_e->rsv_cqs, - V2_QUERY_PF_CAPS_E_RSV_CQS_M, - V2_QUERY_PF_CAPS_E_RSV_CQS_S); - caps->reserved_srqs = roce_get_field(resp_e->rsv_srqs, - V2_QUERY_PF_CAPS_E_RSV_SRQS_M, - V2_QUERY_PF_CAPS_E_RSV_SRQS_S); - caps->reserved_lkey = roce_get_field(resp_e->rsv_lkey, - V2_QUERY_PF_CAPS_E_RSV_LKEYS_M, - V2_QUERY_PF_CAPS_E_RSV_LKEYS_S); + caps->num_srqs = 1 << hr_reg_read(resp_d, PF_CAPS_D_NUM_SRQS); + caps->cong_type = hr_reg_read(resp_d, PF_CAPS_D_CONG_TYPE); + caps->max_srq_wrs = 1 << le16_to_cpu(resp_d->srq_depth); + caps->ceqe_depth = 1 << hr_reg_read(resp_d, PF_CAPS_D_CEQ_DEPTH); + caps->num_comp_vectors = hr_reg_read(resp_d, PF_CAPS_D_NUM_CEQS); + caps->aeqe_depth = 1 << hr_reg_read(resp_d, PF_CAPS_D_AEQ_DEPTH); + caps->default_aeq_arm_st = hr_reg_read(resp_d, PF_CAPS_D_AEQ_ARM_ST); + caps->default_ceq_arm_st = hr_reg_read(resp_d, PF_CAPS_D_CEQ_ARM_ST); + caps->reserved_pds = hr_reg_read(resp_d, PF_CAPS_D_RSV_PDS); + caps->num_uars = 1 << hr_reg_read(resp_d, PF_CAPS_D_NUM_UARS); + caps->reserved_qps = hr_reg_read(resp_d, PF_CAPS_D_RSV_QPS); + caps->reserved_uars = hr_reg_read(resp_d, PF_CAPS_D_RSV_UARS); + + caps->reserved_mrws = hr_reg_read(resp_e, PF_CAPS_E_RSV_MRWS); + caps->chunk_sz = 1 << hr_reg_read(resp_e, PF_CAPS_E_CHUNK_SIZE_SHIFT); + caps->reserved_cqs = hr_reg_read(resp_e, PF_CAPS_E_RSV_CQS); + caps->reserved_srqs = hr_reg_read(resp_e, PF_CAPS_E_RSV_SRQS); + caps->reserved_lkey = hr_reg_read(resp_e, PF_CAPS_E_RSV_LKEYS); caps->default_ceq_max_cnt = le16_to_cpu(resp_e->ceq_max_cnt); caps->default_ceq_period = le16_to_cpu(resp_e->ceq_period); caps->default_aeq_max_cnt = le16_to_cpu(resp_e->aeq_max_cnt); @@ -2361,15 +2338,9 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev) caps->cqe_hop_num = pbl_hop_num; caps->srqwqe_hop_num = pbl_hop_num; caps->idx_hop_num = pbl_hop_num; - caps->wqe_sq_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs, - V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_M, - V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_S); - caps->wqe_sge_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs, - V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_M, - V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_S); - caps->wqe_rq_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs, - V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_M, - V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_S); + caps->wqe_sq_hop_num = hr_reg_read(resp_d, PF_CAPS_D_SQWQE_HOP_NUM); + caps->wqe_sge_hop_num = hr_reg_read(resp_d, PF_CAPS_D_EX_SGE_HOP_NUM); + caps->wqe_rq_hop_num = hr_reg_read(resp_d, PF_CAPS_D_RQWQE_HOP_NUM); return 0; } @@ -2393,7 +2364,7 @@ static int hns_roce_config_entry_size(struct hns_roce_dev *hr_dev) struct hns_roce_caps *caps = &hr_dev->caps; int ret; - if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09) + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) return 0; ret = config_hem_entry_size(hr_dev, HNS_ROCE_CFG_QPC_SIZE, @@ -2660,6 +2631,198 @@ static void free_dip_list(struct hns_roce_dev *hr_dev) spin_unlock_irqrestore(&hr_dev->dip_list_lock, flags); } +static void free_mr_exit(struct hns_roce_dev *hr_dev) +{ + struct hns_roce_v2_priv *priv = hr_dev->priv; + struct hns_roce_v2_free_mr *free_mr = &priv->free_mr; + int ret; + int i; + + for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) { + if (free_mr->rsv_qp[i]) { + ret = ib_destroy_qp(free_mr->rsv_qp[i]); + if (ret) + ibdev_err(&hr_dev->ib_dev, + "failed to destroy qp in free mr.\n"); + + free_mr->rsv_qp[i] = NULL; + } + } + + if (free_mr->rsv_cq) { + ib_destroy_cq(free_mr->rsv_cq); + free_mr->rsv_cq = NULL; + } + + if (free_mr->rsv_pd) { + ib_dealloc_pd(free_mr->rsv_pd); + free_mr->rsv_pd = NULL; + } +} + +static int free_mr_alloc_res(struct hns_roce_dev *hr_dev) +{ + struct hns_roce_v2_priv *priv = hr_dev->priv; + struct hns_roce_v2_free_mr *free_mr = &priv->free_mr; + struct ib_device *ibdev = &hr_dev->ib_dev; + struct ib_cq_init_attr cq_init_attr = {}; + struct ib_qp_init_attr qp_init_attr = {}; + struct ib_pd *pd; + struct ib_cq *cq; + struct ib_qp *qp; + int ret; + int i; + + pd = ib_alloc_pd(ibdev, 0); + if (IS_ERR(pd)) { + ibdev_err(ibdev, "failed to create pd for free mr.\n"); + return PTR_ERR(pd); + } + free_mr->rsv_pd = pd; + + cq_init_attr.cqe = HNS_ROCE_FREE_MR_USED_CQE_NUM; + cq = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_init_attr); + if (IS_ERR(cq)) { + ibdev_err(ibdev, "failed to create cq for free mr.\n"); + ret = PTR_ERR(cq); + goto create_failed; + } + free_mr->rsv_cq = cq; + + qp_init_attr.qp_type = IB_QPT_RC; + qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; + qp_init_attr.send_cq = free_mr->rsv_cq; + qp_init_attr.recv_cq = free_mr->rsv_cq; + for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) { + qp_init_attr.cap.max_send_wr = HNS_ROCE_FREE_MR_USED_SQWQE_NUM; + qp_init_attr.cap.max_send_sge = HNS_ROCE_FREE_MR_USED_SQSGE_NUM; + qp_init_attr.cap.max_recv_wr = HNS_ROCE_FREE_MR_USED_RQWQE_NUM; + qp_init_attr.cap.max_recv_sge = HNS_ROCE_FREE_MR_USED_RQSGE_NUM; + + qp = ib_create_qp(free_mr->rsv_pd, &qp_init_attr); + if (IS_ERR(qp)) { + ibdev_err(ibdev, "failed to create qp for free mr.\n"); + ret = PTR_ERR(qp); + goto create_failed; + } + + free_mr->rsv_qp[i] = qp; + } + + return 0; + +create_failed: + free_mr_exit(hr_dev); + + return ret; +} + +static int free_mr_modify_rsv_qp(struct hns_roce_dev *hr_dev, + struct ib_qp_attr *attr, int sl_num) +{ + struct hns_roce_v2_priv *priv = hr_dev->priv; + struct hns_roce_v2_free_mr *free_mr = &priv->free_mr; + struct ib_device *ibdev = &hr_dev->ib_dev; + struct hns_roce_qp *hr_qp; + int loopback; + int mask; + int ret; + + hr_qp = to_hr_qp(free_mr->rsv_qp[sl_num]); + hr_qp->free_mr_en = 1; + + mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS; + attr->qp_state = IB_QPS_INIT; + attr->port_num = 1; + attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE; + ret = ib_modify_qp(&hr_qp->ibqp, attr, mask); + if (ret) { + ibdev_err(ibdev, "failed to modify qp to init, ret = %d.\n", + ret); + return ret; + } + + loopback = hr_dev->loop_idc; + /* Set qpc lbi = 1 incidate loopback IO */ + hr_dev->loop_idc = 1; + + mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN | + IB_QP_RQ_PSN | IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER; + attr->qp_state = IB_QPS_RTR; + attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; + attr->path_mtu = IB_MTU_256; + attr->dest_qp_num = hr_qp->qpn; + attr->rq_psn = HNS_ROCE_FREE_MR_USED_PSN; + + rdma_ah_set_sl(&attr->ah_attr, (u8)sl_num); + + ret = ib_modify_qp(&hr_qp->ibqp, attr, mask); + hr_dev->loop_idc = loopback; + if (ret) { + ibdev_err(ibdev, "failed to modify qp to rtr, ret = %d.\n", + ret); + return ret; + } + + mask = IB_QP_STATE | IB_QP_SQ_PSN | IB_QP_RETRY_CNT | IB_QP_TIMEOUT | + IB_QP_RNR_RETRY | IB_QP_MAX_QP_RD_ATOMIC; + attr->qp_state = IB_QPS_RTS; + attr->sq_psn = HNS_ROCE_FREE_MR_USED_PSN; + attr->retry_cnt = HNS_ROCE_FREE_MR_USED_QP_RETRY_CNT; + attr->timeout = HNS_ROCE_FREE_MR_USED_QP_TIMEOUT; + ret = ib_modify_qp(&hr_qp->ibqp, attr, mask); + if (ret) + ibdev_err(ibdev, "failed to modify qp to rts, ret = %d.\n", + ret); + + return ret; +} + +static int free_mr_modify_qp(struct hns_roce_dev *hr_dev) +{ + struct hns_roce_v2_priv *priv = hr_dev->priv; + struct hns_roce_v2_free_mr *free_mr = &priv->free_mr; + struct ib_qp_attr attr = {}; + int ret; + int i; + + rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0); + rdma_ah_set_static_rate(&attr.ah_attr, 3); + rdma_ah_set_port_num(&attr.ah_attr, 1); + + for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) { + ret = free_mr_modify_rsv_qp(hr_dev, &attr, i); + if (ret) + return ret; + } + + return 0; +} + +static int free_mr_init(struct hns_roce_dev *hr_dev) +{ + struct hns_roce_v2_priv *priv = hr_dev->priv; + struct hns_roce_v2_free_mr *free_mr = &priv->free_mr; + int ret; + + mutex_init(&free_mr->mutex); + + ret = free_mr_alloc_res(hr_dev); + if (ret) + return ret; + + ret = free_mr_modify_qp(hr_dev); + if (ret) + goto err_modify_qp; + + return 0; + +err_modify_qp: + free_mr_exit(hr_dev); + + return ret; +} + static int get_hem_table(struct hns_roce_dev *hr_dev) { unsigned int qpc_count; @@ -2776,21 +2939,21 @@ static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev) free_dip_list(hr_dev); } -static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, u64 in_param, - u64 out_param, u32 in_modifier, u8 op_modifier, - u16 op, u16 token, int event) +static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, + struct hns_roce_mbox_msg *mbox_msg) { struct hns_roce_cmq_desc desc; struct hns_roce_post_mbox *mb = (struct hns_roce_post_mbox *)desc.data; hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_POST_MB, false); - mb->in_param_l = cpu_to_le32(in_param); - mb->in_param_h = cpu_to_le32(in_param >> 32); - mb->out_param_l = cpu_to_le32(out_param); - mb->out_param_h = cpu_to_le32(out_param >> 32); - mb->cmd_tag = cpu_to_le32(in_modifier << 8 | op); - mb->token_event_en = cpu_to_le32(event << 16 | token); + mb->in_param_l = cpu_to_le32(mbox_msg->in_param); + mb->in_param_h = cpu_to_le32(mbox_msg->in_param >> 32); + mb->out_param_l = cpu_to_le32(mbox_msg->out_param); + mb->out_param_h = cpu_to_le32(mbox_msg->out_param >> 32); + mb->cmd_tag = cpu_to_le32(mbox_msg->tag << 8 | mbox_msg->cmd); + mb->token_event_en = cpu_to_le32(mbox_msg->event_en << 16 | + mbox_msg->token); return hns_roce_cmq_send(hr_dev, &desc, 1); } @@ -2808,6 +2971,9 @@ static int v2_wait_mbox_complete(struct hns_roce_dev *hr_dev, u32 timeout, mb_st = (struct hns_roce_mbox_status *)desc.data; end = msecs_to_jiffies(timeout) + jiffies; while (v2_chk_mbox_is_avail(hr_dev, &busy)) { + if (hr_dev->cmd.state == HNS_ROCE_CMDQ_STATE_FATAL_ERR) + return -EIO; + status = 0; hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST, true); @@ -2843,9 +3009,8 @@ static int v2_wait_mbox_complete(struct hns_roce_dev *hr_dev, u32 timeout, return ret; } -static int v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param, - u64 out_param, u32 in_modifier, u8 op_modifier, - u16 op, u16 token, int event) +static int v2_post_mbox(struct hns_roce_dev *hr_dev, + struct hns_roce_mbox_msg *mbox_msg) { u8 status = 0; int ret; @@ -2861,8 +3026,7 @@ static int v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param, } /* Post new message to mbox */ - ret = hns_roce_mbox_post(hr_dev, in_param, out_param, in_modifier, - op_modifier, op, token, event); + ret = hns_roce_mbox_post(hr_dev, mbox_msg); if (ret) dev_err_ratelimited(hr_dev->dev, "failed to post mailbox, ret = %d.\n", ret); @@ -2870,12 +3034,13 @@ static int v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param, return ret; } -static int v2_poll_mbox_done(struct hns_roce_dev *hr_dev, unsigned int timeout) +static int v2_poll_mbox_done(struct hns_roce_dev *hr_dev) { u8 status = 0; int ret; - ret = v2_wait_mbox_complete(hr_dev, timeout, &status); + ret = v2_wait_mbox_complete(hr_dev, HNS_ROCE_CMD_TIMEOUT_MSECS, + &status); if (!ret) { if (status != MB_ST_COMPLETE_SUCC) return -EBUSY; @@ -2912,10 +3077,8 @@ static int config_sgid_table(struct hns_roce_dev *hr_dev, hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SGID_TB, false); - roce_set_field(sgid_tb->table_idx_rsv, CFG_SGID_TB_TABLE_IDX_M, - CFG_SGID_TB_TABLE_IDX_S, gid_index); - roce_set_field(sgid_tb->vf_sgid_type_rsv, CFG_SGID_TB_VF_SGID_TYPE_M, - CFG_SGID_TB_VF_SGID_TYPE_S, sgid_type); + hr_reg_write(sgid_tb, CFG_SGID_TB_TABLE_IDX, gid_index); + hr_reg_write(sgid_tb, CFG_SGID_TB_VF_SGID_TYPE, sgid_type); copy_gid(&sgid_tb->vf_sgid_l, gid); @@ -2950,25 +3113,20 @@ static int config_gmv_table(struct hns_roce_dev *hr_dev, copy_gid(&tb_a->vf_sgid_l, gid); - roce_set_field(tb_a->vf_sgid_type_vlan, CFG_GMV_TB_VF_SGID_TYPE_M, - CFG_GMV_TB_VF_SGID_TYPE_S, sgid_type); - roce_set_bit(tb_a->vf_sgid_type_vlan, CFG_GMV_TB_VF_VLAN_EN_S, - vlan_id < VLAN_CFI_MASK); - roce_set_field(tb_a->vf_sgid_type_vlan, CFG_GMV_TB_VF_VLAN_ID_M, - CFG_GMV_TB_VF_VLAN_ID_S, vlan_id); + hr_reg_write(tb_a, GMV_TB_A_VF_SGID_TYPE, sgid_type); + hr_reg_write(tb_a, GMV_TB_A_VF_VLAN_EN, vlan_id < VLAN_CFI_MASK); + hr_reg_write(tb_a, GMV_TB_A_VF_VLAN_ID, vlan_id); tb_b->vf_smac_l = cpu_to_le32(*(u32 *)mac); - roce_set_field(tb_b->vf_smac_h, CFG_GMV_TB_SMAC_H_M, - CFG_GMV_TB_SMAC_H_S, *(u16 *)&mac[4]); - roce_set_field(tb_b->table_idx_rsv, CFG_GMV_TB_SGID_IDX_M, - CFG_GMV_TB_SGID_IDX_S, gid_index); + hr_reg_write(tb_b, GMV_TB_B_SMAC_H, *(u16 *)&mac[4]); + hr_reg_write(tb_b, GMV_TB_B_SGID_IDX, gid_index); return hns_roce_cmq_send(hr_dev, desc, 2); } -static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u32 port, - int gid_index, const union ib_gid *gid, +static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, int gid_index, + const union ib_gid *gid, const struct ib_gid_attr *attr) { enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1; @@ -3011,10 +3169,8 @@ static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port, reg_smac_l = *(u32 *)(&addr[0]); reg_smac_h = *(u16 *)(&addr[4]); - roce_set_field(smac_tb->tb_idx_rsv, CFG_SMAC_TB_IDX_M, - CFG_SMAC_TB_IDX_S, phy_port); - roce_set_field(smac_tb->vf_smac_h_rsv, CFG_SMAC_TB_VF_SMAC_H_M, - CFG_SMAC_TB_VF_SMAC_H_S, reg_smac_h); + hr_reg_write(smac_tb, CFG_SMAC_TB_IDX, phy_port); + hr_reg_write(smac_tb, CFG_SMAC_TB_VF_SMAC_H, reg_smac_h); smac_tb->vf_smac_l = cpu_to_le32(reg_smac_l); return hns_roce_cmq_send(hr_dev, &desc, 1); @@ -3043,38 +3199,29 @@ static int set_mtpt_pbl(struct hns_roce_dev *hr_dev, mpt_entry->pbl_size = cpu_to_le32(mr->npages); mpt_entry->pbl_ba_l = cpu_to_le32(pbl_ba >> 3); - roce_set_field(mpt_entry->byte_48_mode_ba, - V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S, - upper_32_bits(pbl_ba >> 3)); + hr_reg_write(mpt_entry, MPT_PBL_BA_H, upper_32_bits(pbl_ba >> 3)); mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0])); - roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M, - V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0])); + hr_reg_write(mpt_entry, MPT_PA0_H, upper_32_bits(pages[0])); mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1])); - roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M, - V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1])); - roce_set_field(mpt_entry->byte_64_buf_pa1, - V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M, - V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S, - to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift)); + hr_reg_write(mpt_entry, MPT_PA1_H, upper_32_bits(pages[1])); + hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ, + to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift)); return 0; } static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev, - void *mb_buf, struct hns_roce_mr *mr, - unsigned long mtpt_idx) + void *mb_buf, struct hns_roce_mr *mr) { struct hns_roce_v2_mpt_entry *mpt_entry; - int ret; mpt_entry = mb_buf; memset(mpt_entry, 0, sizeof(*mpt_entry)); hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_VALID); hr_reg_write(mpt_entry, MPT_PD, mr->pd); - hr_reg_enable(mpt_entry, MPT_L_INV_EN); hr_reg_write_bool(mpt_entry, MPT_BIND_EN, mr->access & IB_ACCESS_MW_BIND); @@ -3106,9 +3253,7 @@ static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev, to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift)); hr_reg_enable(mpt_entry, MPT_INNER_PA_VLD); - ret = set_mtpt_pbl(hr_dev, mpt_entry, mr); - - return ret; + return set_mtpt_pbl(hr_dev, mpt_entry, mr); } static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev, @@ -3119,24 +3264,19 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev, u32 mr_access_flags = mr->access; int ret = 0; - roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M, - V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID); - - roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M, - V2_MPT_BYTE_4_PD_S, mr->pd); + hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_VALID); + hr_reg_write(mpt_entry, MPT_PD, mr->pd); if (flags & IB_MR_REREG_ACCESS) { - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, - V2_MPT_BYTE_8_BIND_EN_S, + hr_reg_write(mpt_entry, MPT_BIND_EN, (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0)); - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, - V2_MPT_BYTE_8_ATOMIC_EN_S, + hr_reg_write(mpt_entry, MPT_ATOMIC_EN, mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0); - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S, + hr_reg_write(mpt_entry, MPT_RR_EN, mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0); - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S, + hr_reg_write(mpt_entry, MPT_RW_EN, mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0); - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S, + hr_reg_write(mpt_entry, MPT_LW_EN, mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0); } @@ -3167,37 +3307,27 @@ static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev, return -ENOBUFS; } - roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M, - V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE); - roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M, - V2_MPT_BYTE_4_PBL_HOP_NUM_S, 1); - roce_set_field(mpt_entry->byte_4_pd_hop_st, - V2_MPT_BYTE_4_PBL_BA_PG_SZ_M, - V2_MPT_BYTE_4_PBL_BA_PG_SZ_S, - to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift)); - roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M, - V2_MPT_BYTE_4_PD_S, mr->pd); + hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_FREE); + hr_reg_write(mpt_entry, MPT_PD, mr->pd); - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 1); - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1); - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1); + hr_reg_enable(mpt_entry, MPT_RA_EN); + hr_reg_enable(mpt_entry, MPT_R_INV_EN); - roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_FRE_S, 1); - roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0); - roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 0); - roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1); + hr_reg_enable(mpt_entry, MPT_FRE); + hr_reg_clear(mpt_entry, MPT_MR_MW); + hr_reg_enable(mpt_entry, MPT_BPD); + hr_reg_clear(mpt_entry, MPT_PA); + + hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM, 1); + hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ, + to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift)); + hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ, + to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift)); mpt_entry->pbl_size = cpu_to_le32(mr->npages); mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(pbl_ba >> 3)); - roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M, - V2_MPT_BYTE_48_PBL_BA_H_S, - upper_32_bits(pbl_ba >> 3)); - - roce_set_field(mpt_entry->byte_64_buf_pa1, - V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M, - V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S, - to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift)); + hr_reg_write(mpt_entry, MPT_PBL_BA_H, upper_32_bits(pbl_ba >> 3)); return 0; } @@ -3209,39 +3339,123 @@ static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw) mpt_entry = mb_buf; memset(mpt_entry, 0, sizeof(*mpt_entry)); - roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M, - V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE); - roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M, - V2_MPT_BYTE_4_PD_S, mw->pdn); - roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M, - V2_MPT_BYTE_4_PBL_HOP_NUM_S, - mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : - mw->pbl_hop_num); - roce_set_field(mpt_entry->byte_4_pd_hop_st, - V2_MPT_BYTE_4_PBL_BA_PG_SZ_M, - V2_MPT_BYTE_4_PBL_BA_PG_SZ_S, - mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET); - - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1); - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1); - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S, 1); - - roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0); - roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 1); - roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1); - roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BQP_S, - mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1); + hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_FREE); + hr_reg_write(mpt_entry, MPT_PD, mw->pdn); - roce_set_field(mpt_entry->byte_64_buf_pa1, - V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M, - V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S, - mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET); + hr_reg_enable(mpt_entry, MPT_R_INV_EN); + hr_reg_enable(mpt_entry, MPT_LW_EN); + + hr_reg_enable(mpt_entry, MPT_MR_MW); + hr_reg_enable(mpt_entry, MPT_BPD); + hr_reg_clear(mpt_entry, MPT_PA); + hr_reg_write(mpt_entry, MPT_BQP, + mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1); mpt_entry->lkey = cpu_to_le32(mw->rkey); + hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM, + mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : + mw->pbl_hop_num); + hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ, + mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET); + hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ, + mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET); + return 0; } +static int free_mr_post_send_lp_wqe(struct hns_roce_qp *hr_qp) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device); + struct ib_device *ibdev = &hr_dev->ib_dev; + const struct ib_send_wr *bad_wr; + struct ib_rdma_wr rdma_wr = {}; + struct ib_send_wr *send_wr; + int ret; + + send_wr = &rdma_wr.wr; + send_wr->opcode = IB_WR_RDMA_WRITE; + + ret = hns_roce_v2_post_send(&hr_qp->ibqp, send_wr, &bad_wr); + if (ret) { + ibdev_err(ibdev, "failed to post wqe for free mr, ret = %d.\n", + ret); + return ret; + } + + return 0; +} + +static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries, + struct ib_wc *wc); + +static void free_mr_send_cmd_to_hw(struct hns_roce_dev *hr_dev) +{ + struct hns_roce_v2_priv *priv = hr_dev->priv; + struct hns_roce_v2_free_mr *free_mr = &priv->free_mr; + struct ib_wc wc[ARRAY_SIZE(free_mr->rsv_qp)]; + struct ib_device *ibdev = &hr_dev->ib_dev; + struct hns_roce_qp *hr_qp; + unsigned long end; + int cqe_cnt = 0; + int npolled; + int ret; + int i; + + /* + * If the device initialization is not complete or in the uninstall + * process, then there is no need to execute free mr. + */ + if (priv->handle->rinfo.reset_state == HNS_ROCE_STATE_RST_INIT || + priv->handle->rinfo.instance_state == HNS_ROCE_STATE_INIT || + hr_dev->state == HNS_ROCE_DEVICE_STATE_UNINIT) + return; + + mutex_lock(&free_mr->mutex); + + for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) { + hr_qp = to_hr_qp(free_mr->rsv_qp[i]); + + ret = free_mr_post_send_lp_wqe(hr_qp); + if (ret) { + ibdev_err(ibdev, + "failed to send wqe (qp:0x%lx) for free mr, ret = %d.\n", + hr_qp->qpn, ret); + break; + } + + cqe_cnt++; + } + + end = msecs_to_jiffies(HNS_ROCE_V2_FREE_MR_TIMEOUT) + jiffies; + while (cqe_cnt) { + npolled = hns_roce_v2_poll_cq(free_mr->rsv_cq, cqe_cnt, wc); + if (npolled < 0) { + ibdev_err(ibdev, + "failed to poll cqe for free mr, remain %d cqe.\n", + cqe_cnt); + goto out; + } + + if (time_after(jiffies, end)) { + ibdev_err(ibdev, + "failed to poll cqe for free mr and timeout, remain %d cqe.\n", + cqe_cnt); + goto out; + } + cqe_cnt -= npolled; + } + +out: + mutex_unlock(&free_mr->mutex); +} + +static void hns_roce_v2_dereg_mr(struct hns_roce_dev *hr_dev) +{ + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) + free_mr_send_cmd_to_hw(hr_dev); +} + static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n) { return hns_roce_buf_offset(hr_cq->mtr.kmem, n * hr_cq->cqe_size); @@ -3577,7 +3791,6 @@ static const u32 wc_send_op_map[] = { HR_WC_OP_MAP(RDMA_READ, RDMA_READ), HR_WC_OP_MAP(RDMA_WRITE, RDMA_WRITE), HR_WC_OP_MAP(RDMA_WRITE_WITH_IMM, RDMA_WRITE), - HR_WC_OP_MAP(LOCAL_INV, LOCAL_INV), HR_WC_OP_MAP(ATOM_CMP_AND_SWAP, COMP_SWAP), HR_WC_OP_MAP(ATOM_FETCH_AND_ADD, FETCH_ADD), HR_WC_OP_MAP(ATOM_MSK_CMP_AND_SWAP, MASKED_COMP_SWAP), @@ -3627,9 +3840,6 @@ static void fill_send_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe) case HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM: wc->wc_flags |= IB_WC_WITH_IMM; break; - case HNS_ROCE_V2_WQE_OP_LOCAL_INV: - wc->wc_flags |= IB_WC_WITH_INVALIDATE; - break; case HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP: case HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD: case HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP: @@ -3815,38 +4025,38 @@ out: } static int get_op_for_set_hem(struct hns_roce_dev *hr_dev, u32 type, - int step_idx, u16 *mbox_op) + u32 step_idx, u8 *mbox_cmd) { - u16 op; + u8 cmd; switch (type) { case HEM_TYPE_QPC: - op = HNS_ROCE_CMD_WRITE_QPC_BT0; + cmd = HNS_ROCE_CMD_WRITE_QPC_BT0; break; case HEM_TYPE_MTPT: - op = HNS_ROCE_CMD_WRITE_MPT_BT0; + cmd = HNS_ROCE_CMD_WRITE_MPT_BT0; break; case HEM_TYPE_CQC: - op = HNS_ROCE_CMD_WRITE_CQC_BT0; + cmd = HNS_ROCE_CMD_WRITE_CQC_BT0; break; case HEM_TYPE_SRQC: - op = HNS_ROCE_CMD_WRITE_SRQC_BT0; + cmd = HNS_ROCE_CMD_WRITE_SRQC_BT0; break; case HEM_TYPE_SCCC: - op = HNS_ROCE_CMD_WRITE_SCCC_BT0; + cmd = HNS_ROCE_CMD_WRITE_SCCC_BT0; break; case HEM_TYPE_QPC_TIMER: - op = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0; + cmd = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0; break; case HEM_TYPE_CQC_TIMER: - op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0; + cmd = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0; break; default: dev_warn(hr_dev->dev, "failed to check hem type %u.\n", type); return -EINVAL; } - *mbox_op = op + step_idx; + *mbox_cmd = cmd + step_idx; return 0; } @@ -3869,10 +4079,10 @@ static int config_gmv_ba_to_hw(struct hns_roce_dev *hr_dev, unsigned long obj, } static int set_hem_to_hw(struct hns_roce_dev *hr_dev, int obj, - dma_addr_t base_addr, u32 hem_type, int step_idx) + dma_addr_t base_addr, u32 hem_type, u32 step_idx) { int ret; - u16 op; + u8 cmd; if (unlikely(hem_type == HEM_TYPE_GMV)) return config_gmv_ba_to_hw(hr_dev, obj, base_addr); @@ -3880,16 +4090,16 @@ static int set_hem_to_hw(struct hns_roce_dev *hr_dev, int obj, if (unlikely(hem_type == HEM_TYPE_SCCC && step_idx)) return 0; - ret = get_op_for_set_hem(hr_dev, hem_type, step_idx, &op); + ret = get_op_for_set_hem(hr_dev, hem_type, step_idx, &cmd); if (ret < 0) return ret; - return config_hem_ba_to_hw(hr_dev, obj, base_addr, op); + return config_hem_ba_to_hw(hr_dev, base_addr, cmd, obj); } static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem_table *table, int obj, - int step_idx) + u32 step_idx) { struct hns_roce_hem_iter iter; struct hns_roce_hem_mhop mhop; @@ -3947,29 +4157,29 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev, } static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev, - struct hns_roce_hem_table *table, int obj, - int step_idx) + struct hns_roce_hem_table *table, + int tag, u32 step_idx) { - struct device *dev = hr_dev->dev; struct hns_roce_cmd_mailbox *mailbox; + struct device *dev = hr_dev->dev; + u8 cmd = 0xff; int ret; - u16 op = 0xff; if (!hns_roce_check_whether_mhop(hr_dev, table->type)) return 0; switch (table->type) { case HEM_TYPE_QPC: - op = HNS_ROCE_CMD_DESTROY_QPC_BT0; + cmd = HNS_ROCE_CMD_DESTROY_QPC_BT0; break; case HEM_TYPE_MTPT: - op = HNS_ROCE_CMD_DESTROY_MPT_BT0; + cmd = HNS_ROCE_CMD_DESTROY_MPT_BT0; break; case HEM_TYPE_CQC: - op = HNS_ROCE_CMD_DESTROY_CQC_BT0; + cmd = HNS_ROCE_CMD_DESTROY_CQC_BT0; break; case HEM_TYPE_SRQC: - op = HNS_ROCE_CMD_DESTROY_SRQC_BT0; + cmd = HNS_ROCE_CMD_DESTROY_SRQC_BT0; break; case HEM_TYPE_SCCC: case HEM_TYPE_QPC_TIMER: @@ -3982,15 +4192,13 @@ static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev, return 0; } - op += step_idx; + cmd += step_idx; mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); - /* configure the tag and op */ - ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, obj, 0, op, - HNS_ROCE_CMD_TIMEOUT_MSECS); + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, cmd, tag); hns_roce_free_cmd_mailbox(hr_dev, mailbox); return ret; @@ -4014,9 +4222,8 @@ static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev, memcpy(mailbox->buf, context, qpc_size); memcpy(mailbox->buf + qpc_size, qpc_mask, qpc_size); - ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0, - HNS_ROCE_CMD_MODIFY_QPC, - HNS_ROCE_CMD_TIMEOUT_MSECS); + ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, + HNS_ROCE_CMD_MODIFY_QPC, hr_qp->qpn); hns_roce_free_cmd_mailbox(hr_dev, mailbox); @@ -4081,7 +4288,6 @@ static inline int get_pdn(struct ib_pd *ib_pd) static void modify_qp_reset_to_init(struct ib_qp *ibqp, const struct ib_qp_attr *attr, - int attr_mask, struct hns_roce_v2_qp_context *context, struct hns_roce_v2_qp_context *qpc_mask) { @@ -4145,7 +4351,7 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, } static void modify_qp_init_to_init(struct ib_qp *ibqp, - const struct ib_qp_attr *attr, int attr_mask, + const struct ib_qp_attr *attr, struct hns_roce_v2_qp_context *context, struct hns_roce_v2_qp_context *qpc_mask) { @@ -4394,7 +4600,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, hr_reg_clear(qpc_mask, QPC_DQPN); } - memcpy(&(context->dmac), dmac, sizeof(u32)); + memcpy(&context->dmac, dmac, sizeof(u32)); hr_reg_write(context, QPC_DMAC_H, *((u16 *)(&dmac[4]))); qpc_mask->dmac = 0; hr_reg_clear(qpc_mask, QPC_DMAC_H); @@ -4488,14 +4694,6 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, return 0; } -static inline u16 get_udp_sport(u32 fl, u32 lqpn, u32 rqpn) -{ - if (!fl) - fl = rdma_calc_flow_label(lqpn, rqpn); - - return rdma_flow_label_to_udp_sport(fl); -} - static int get_dip_ctx_idx(struct ib_qp *ibqp, const struct ib_qp_attr *attr, u32 *dip_idx) { @@ -4672,6 +4870,18 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp, u8 hr_port; int ret; + /* + * If free_mr_en of qp is set, it means that this qp comes from + * free mr. This qp will perform the loopback operation. + * In the loopback scenario, only sl needs to be set. + */ + if (hr_qp->free_mr_en) { + hr_reg_write(context, QPC_SL, rdma_ah_get_sl(&attr->ah_attr)); + hr_reg_clear(qpc_mask, QPC_SL); + hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr); + return 0; + } + ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num : hr_qp->port + 1; hr_port = ib_port - 1; is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) && @@ -4683,9 +4893,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp, if (ret) return ret; - if (gid_attr) - is_udp = (gid_attr->gid_type == - IB_GID_TYPE_ROCE_UDP_ENCAP); + is_udp = (gid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP); } /* Only HIP08 needs to set the vlan_en bits in QPC */ @@ -4712,8 +4920,9 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp, } hr_reg_write(context, QPC_UDPSPN, - is_udp ? get_udp_sport(grh->flow_label, ibqp->qp_num, - attr->dest_qp_num) : 0); + is_udp ? rdma_get_udp_sport(grh->flow_label, ibqp->qp_num, + attr->dest_qp_num) : + 0); hr_reg_clear(qpc_mask, QPC_UDPSPN); @@ -4739,7 +4948,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp, hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr); if (unlikely(hr_qp->sl > MAX_SERVICE_LEVEL)) { ibdev_err(ibdev, - "failed to fill QPC, sl (%d) shouldn't be larger than %d.\n", + "failed to fill QPC, sl (%u) shouldn't be larger than %d.\n", hr_qp->sl, MAX_SERVICE_LEVEL); return -EINVAL; } @@ -4768,7 +4977,8 @@ static bool check_qp_state(enum ib_qp_state cur_state, [IB_QPS_ERR] = true }, [IB_QPS_SQD] = {}, [IB_QPS_SQE] = {}, - [IB_QPS_ERR] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true } + [IB_QPS_ERR] = { [IB_QPS_RESET] = true, + [IB_QPS_ERR] = true } }; return sm[cur_state][new_state]; @@ -4792,11 +5002,9 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp, if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { memset(qpc_mask, 0, hr_dev->caps.qpc_sz); - modify_qp_reset_to_init(ibqp, attr, attr_mask, context, - qpc_mask); + modify_qp_reset_to_init(ibqp, attr, context, qpc_mask); } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) { - modify_qp_init_to_init(ibqp, attr, attr_mask, context, - qpc_mask); + modify_qp_init_to_init(ibqp, attr, context, qpc_mask); } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context, qpc_mask); @@ -4817,14 +5025,14 @@ static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout) if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { if (*timeout > QP_ACK_TIMEOUT_MAX_HIP08) { ibdev_warn(&hr_dev->ib_dev, - "Local ACK timeout shall be 0 to 20.\n"); + "local ACK timeout shall be 0 to 20.\n"); return false; } *timeout += QP_ACK_TIMEOUT_OFFSET; } else if (hr_dev->pci_dev->revision > PCI_REVISION_ID_HIP08) { if (*timeout > QP_ACK_TIMEOUT_MAX) { ibdev_warn(&hr_dev->ib_dev, - "Local ACK timeout shall be 0 to 31.\n"); + "local ACK timeout shall be 0 to 31.\n"); return false; } } @@ -5084,9 +5292,8 @@ static int to_ib_qp_st(enum hns_roce_v2_qp_state state) return (state < ARRAY_SIZE(map)) ? map[state] : -1; } -static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev, - struct hns_roce_qp *hr_qp, - struct hns_roce_v2_qp_context *hr_context) +static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev, u32 qpn, + void *buffer) { struct hns_roce_cmd_mailbox *mailbox; int ret; @@ -5095,13 +5302,12 @@ static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev, if (IS_ERR(mailbox)) return PTR_ERR(mailbox); - ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0, - HNS_ROCE_CMD_QUERY_QPC, - HNS_ROCE_CMD_TIMEOUT_MSECS); + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_QPC, + qpn); if (ret) goto out; - memcpy(hr_context, mailbox->buf, hr_dev->caps.qpc_sz); + memcpy(buffer, mailbox->buf, hr_dev->caps.qpc_sz); out: hns_roce_free_cmd_mailbox(hr_dev, mailbox); @@ -5131,7 +5337,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, goto done; } - ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, &context); + ret = hns_roce_v2_query_qpc(hr_dev, hr_qp->qpn, &context); if (ret) { ibdev_err(ibdev, "failed to query QPC, ret = %d.\n", ret); ret = -EINVAL; @@ -5329,7 +5535,7 @@ static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev, msleep(20); } - ibdev_err(ibdev, "Query SCC clr done flag overtime.\n"); + ibdev_err(ibdev, "query SCC clr done flag overtime.\n"); ret = -ETIMEDOUT; out: @@ -5463,9 +5669,8 @@ static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq, hr_reg_write(srq_context, SRQC_LIMIT_WL, srq_attr->srq_limit); hr_reg_clear(srqc_mask, SRQC_LIMIT_WL); - ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, srq->srqn, 0, - HNS_ROCE_CMD_MODIFY_SRQC, - HNS_ROCE_CMD_TIMEOUT_MSECS); + ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, + HNS_ROCE_CMD_MODIFY_SRQC, srq->srqn); hns_roce_free_cmd_mailbox(hr_dev, mailbox); if (ret) { ibdev_err(&hr_dev->ib_dev, @@ -5491,9 +5696,8 @@ static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) return PTR_ERR(mailbox); srq_context = mailbox->buf; - ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srq->srqn, 0, - HNS_ROCE_CMD_QUERY_SRQC, - HNS_ROCE_CMD_TIMEOUT_MSECS); + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, + HNS_ROCE_CMD_QUERY_SRQC, srq->srqn); if (ret) { ibdev_err(&hr_dev->ib_dev, "failed to process cmd of querying SRQ, ret = %d.\n", @@ -5543,9 +5747,8 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) hr_reg_write(cq_context, CQC_CQ_PERIOD, cq_period); hr_reg_clear(cqc_mask, CQC_CQ_PERIOD); - ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 1, - HNS_ROCE_CMD_MODIFY_CQC, - HNS_ROCE_CMD_TIMEOUT_MSECS); + ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, + HNS_ROCE_CMD_MODIFY_CQC, hr_cq->cqn); hns_roce_free_cmd_mailbox(hr_dev, mailbox); if (ret) ibdev_err(&hr_dev->ib_dev, @@ -5555,6 +5758,64 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) return ret; } +static int hns_roce_v2_query_cqc(struct hns_roce_dev *hr_dev, u32 cqn, + void *buffer) +{ + struct hns_roce_v2_cq_context *context; + struct hns_roce_cmd_mailbox *mailbox; + int ret; + + mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + context = mailbox->buf; + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, + HNS_ROCE_CMD_QUERY_CQC, cqn); + if (ret) { + ibdev_err(&hr_dev->ib_dev, + "failed to process cmd when querying CQ, ret = %d.\n", + ret); + goto err_mailbox; + } + + memcpy(buffer, context, sizeof(*context)); + +err_mailbox: + hns_roce_free_cmd_mailbox(hr_dev, mailbox); + + return ret; +} + +static int hns_roce_v2_query_mpt(struct hns_roce_dev *hr_dev, u32 key, + void *buffer) +{ + struct hns_roce_v2_mpt_entry *context; + struct hns_roce_cmd_mailbox *mailbox; + int ret; + + mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + context = mailbox->buf; + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_MPT, + key_to_hw_index(key)); + if (ret) { + ibdev_err(&hr_dev->ib_dev, + "failed to process cmd when querying MPT, ret = %d.\n", + ret); + goto err_mailbox; + } + + memcpy(buffer, context, sizeof(*context)); + +err_mailbox: + hns_roce_free_cmd_mailbox(hr_dev, mailbox); + + return ret; +} + static void hns_roce_irq_work_handle(struct work_struct *work) { struct hns_roce_work *irq_work = @@ -5563,26 +5824,26 @@ static void hns_roce_irq_work_handle(struct work_struct *work) switch (irq_work->event_type) { case HNS_ROCE_EVENT_TYPE_PATH_MIG: - ibdev_info(ibdev, "Path migrated succeeded.\n"); + ibdev_info(ibdev, "path migrated succeeded.\n"); break; case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED: - ibdev_warn(ibdev, "Path migration failed.\n"); + ibdev_warn(ibdev, "path migration failed.\n"); break; case HNS_ROCE_EVENT_TYPE_COMM_EST: break; case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: - ibdev_warn(ibdev, "Send queue drained.\n"); + ibdev_warn(ibdev, "send queue drained.\n"); break; case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: - ibdev_err(ibdev, "Local work queue 0x%x catast error, sub_event type is: %d\n", + ibdev_err(ibdev, "local work queue 0x%x catast error, sub_event type is: %d\n", irq_work->queue_num, irq_work->sub_type); break; case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: - ibdev_err(ibdev, "Invalid request local work queue 0x%x error.\n", + ibdev_err(ibdev, "invalid request local work queue 0x%x error.\n", irq_work->queue_num); break; case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: - ibdev_err(ibdev, "Local access violation work queue 0x%x error, sub_event type is: %d\n", + ibdev_err(ibdev, "local access violation work queue 0x%x error, sub_event type is: %d\n", irq_work->queue_num, irq_work->sub_type); break; case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH: @@ -5604,7 +5865,7 @@ static void hns_roce_irq_work_handle(struct work_struct *work) ibdev_warn(ibdev, "DB overflow.\n"); break; case HNS_ROCE_EVENT_TYPE_FLR: - ibdev_warn(ibdev, "Function level reset.\n"); + ibdev_warn(ibdev, "function level reset.\n"); break; case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION: ibdev_err(ibdev, "xrc domain violation error.\n"); @@ -5628,12 +5889,12 @@ static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev, if (!irq_work) return; - INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle); + INIT_WORK(&irq_work->work, hns_roce_irq_work_handle); irq_work->hr_dev = hr_dev; irq_work->event_type = eq->event_type; irq_work->sub_type = eq->sub_type; irq_work->queue_num = queue_num; - queue_work(hr_dev->irq_workq, &(irq_work->work)); + queue_work(hr_dev->irq_workq, &irq_work->work); } static void update_eq_db(struct hns_roce_eq *eq) @@ -5668,16 +5929,16 @@ static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq) (eq->cons_index & (eq->entries - 1)) * eq->eqe_size); - return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^ + return (hr_reg_read(aeqe, AEQE_OWNER) ^ !!(eq->cons_index & eq->entries)) ? aeqe : NULL; } -static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, - struct hns_roce_eq *eq) +static irqreturn_t hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, + struct hns_roce_eq *eq) { struct device *dev = hr_dev->dev; struct hns_roce_aeqe *aeqe = next_aeqe_sw_v2(eq); - int aeqe_found = 0; + irqreturn_t aeqe_found = IRQ_NONE; int event_type; u32 queue_num; int sub_type; @@ -5688,15 +5949,9 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, */ dma_rmb(); - event_type = roce_get_field(aeqe->asyn, - HNS_ROCE_V2_AEQE_EVENT_TYPE_M, - HNS_ROCE_V2_AEQE_EVENT_TYPE_S); - sub_type = roce_get_field(aeqe->asyn, - HNS_ROCE_V2_AEQE_SUB_TYPE_M, - HNS_ROCE_V2_AEQE_SUB_TYPE_S); - queue_num = roce_get_field(aeqe->event.queue_event.num, - HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M, - HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S); + event_type = hr_reg_read(aeqe, AEQE_EVENT_TYPE); + sub_type = hr_reg_read(aeqe, AEQE_SUB_TYPE); + queue_num = hr_reg_read(aeqe, AEQE_EVENT_QUEUE_NUM); switch (event_type) { case HNS_ROCE_EVENT_TYPE_PATH_MIG: @@ -5729,7 +5984,7 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, case HNS_ROCE_EVENT_TYPE_FLR: break; default: - dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n", + dev_err(dev, "unhandled event %d on EQ %d at idx %u.\n", event_type, eq->eqn, eq->cons_index); break; } @@ -5737,7 +5992,7 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, eq->event_type = event_type; eq->sub_type = sub_type; ++eq->cons_index; - aeqe_found = 1; + aeqe_found = IRQ_HANDLED; hns_roce_v2_init_irq_work(hr_dev, eq, queue_num); @@ -5745,7 +6000,8 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, } update_eq_db(eq); - return aeqe_found; + + return IRQ_RETVAL(aeqe_found); } static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq) @@ -5756,15 +6012,15 @@ static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq) (eq->cons_index & (eq->entries - 1)) * eq->eqe_size); - return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^ - (!!(eq->cons_index & eq->entries)) ? ceqe : NULL; + return (hr_reg_read(ceqe, CEQE_OWNER) ^ + !!(eq->cons_index & eq->entries)) ? ceqe : NULL; } -static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev, - struct hns_roce_eq *eq) +static irqreturn_t hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev, + struct hns_roce_eq *eq) { struct hns_roce_ceqe *ceqe = next_ceqe_sw_v2(eq); - int ceqe_found = 0; + irqreturn_t ceqe_found = IRQ_NONE; u32 cqn; while (ceqe) { @@ -5773,59 +6029,53 @@ static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev, */ dma_rmb(); - cqn = roce_get_field(ceqe->comp, HNS_ROCE_V2_CEQE_COMP_CQN_M, - HNS_ROCE_V2_CEQE_COMP_CQN_S); + cqn = hr_reg_read(ceqe, CEQE_CQN); hns_roce_cq_completion(hr_dev, cqn); ++eq->cons_index; - ceqe_found = 1; + ceqe_found = IRQ_HANDLED; ceqe = next_ceqe_sw_v2(eq); } update_eq_db(eq); - return ceqe_found; + return IRQ_RETVAL(ceqe_found); } static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr) { struct hns_roce_eq *eq = eq_ptr; struct hns_roce_dev *hr_dev = eq->hr_dev; - int int_work; + irqreturn_t int_work; if (eq->type_flag == HNS_ROCE_CEQ) /* Completion event interrupt */ int_work = hns_roce_v2_ceq_int(hr_dev, eq); else - /* Asychronous event interrupt */ + /* Asynchronous event interrupt */ int_work = hns_roce_v2_aeq_int(hr_dev, eq); return IRQ_RETVAL(int_work); } -static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id) +static irqreturn_t abnormal_interrupt_basic(struct hns_roce_dev *hr_dev, + u32 int_st) { - struct hns_roce_dev *hr_dev = dev_id; - struct device *dev = hr_dev->dev; - int int_work = 0; - u32 int_st; + struct pci_dev *pdev = hr_dev->pci_dev; + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + const struct hnae3_ae_ops *ops = ae_dev->ops; + irqreturn_t int_work = IRQ_NONE; u32 int_en; - /* Abnormal interrupt */ - int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG); int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG); if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) { - struct pci_dev *pdev = hr_dev->pci_dev; - struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); - const struct hnae3_ae_ops *ops = ae_dev->ops; + dev_err(hr_dev->dev, "AEQ overflow!\n"); - dev_err(dev, "AEQ overflow!\n"); - - int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S; - roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st); + roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, + 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S); /* Set reset level for reset_event() */ if (ops->set_default_reset_request) @@ -5837,19 +6087,165 @@ static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id) int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S; roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en); - int_work = 1; - } else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_RAS_INT_S)) { - dev_err(dev, "RAS interrupt!\n"); + int_work = IRQ_HANDLED; + } else { + dev_err(hr_dev->dev, "there is no basic abn irq found.\n"); + } - int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_RAS_INT_S; - roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st); + return IRQ_RETVAL(int_work); +} - int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S; - roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en); +static int fmea_ram_ecc_query(struct hns_roce_dev *hr_dev, + struct fmea_ram_ecc *ecc_info) +{ + struct hns_roce_cmq_desc desc; + struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data; + int ret; + + hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_QUERY_RAM_ECC, true); + ret = hns_roce_cmq_send(hr_dev, &desc, 1); + if (ret) + return ret; + + ecc_info->is_ecc_err = hr_reg_read(req, QUERY_RAM_ECC_1BIT_ERR); + ecc_info->res_type = hr_reg_read(req, QUERY_RAM_ECC_RES_TYPE); + ecc_info->index = hr_reg_read(req, QUERY_RAM_ECC_TAG); + + return 0; +} + +static int fmea_recover_gmv(struct hns_roce_dev *hr_dev, u32 idx) +{ + struct hns_roce_cmq_desc desc; + struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data; + u32 addr_upper; + u32 addr_low; + int ret; - int_work = 1; + hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GMV_BT, true); + hr_reg_write(req, CFG_GMV_BT_IDX, idx); + + ret = hns_roce_cmq_send(hr_dev, &desc, 1); + if (ret) { + dev_err(hr_dev->dev, + "failed to execute cmd to read gmv, ret = %d.\n", ret); + return ret; + } + + addr_low = hr_reg_read(req, CFG_GMV_BT_BA_L); + addr_upper = hr_reg_read(req, CFG_GMV_BT_BA_H); + + hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GMV_BT, false); + hr_reg_write(req, CFG_GMV_BT_BA_L, addr_low); + hr_reg_write(req, CFG_GMV_BT_BA_H, addr_upper); + hr_reg_write(req, CFG_GMV_BT_IDX, idx); + + return hns_roce_cmq_send(hr_dev, &desc, 1); +} + +static u64 fmea_get_ram_res_addr(u32 res_type, __le64 *data) +{ + if (res_type == ECC_RESOURCE_QPC_TIMER || + res_type == ECC_RESOURCE_CQC_TIMER || + res_type == ECC_RESOURCE_SCCC) + return le64_to_cpu(*data); + + return le64_to_cpu(*data) << PAGE_SHIFT; +} + +static int fmea_recover_others(struct hns_roce_dev *hr_dev, u32 res_type, + u32 index) +{ + u8 write_bt0_op = fmea_ram_res[res_type].write_bt0_op; + u8 read_bt0_op = fmea_ram_res[res_type].read_bt0_op; + struct hns_roce_cmd_mailbox *mailbox; + u64 addr; + int ret; + + mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, read_bt0_op, index); + if (ret) { + dev_err(hr_dev->dev, + "failed to execute cmd to read fmea ram, ret = %d.\n", + ret); + goto out; + } + + addr = fmea_get_ram_res_addr(res_type, mailbox->buf); + + ret = hns_roce_cmd_mbox(hr_dev, addr, 0, write_bt0_op, index); + if (ret) + dev_err(hr_dev->dev, + "failed to execute cmd to write fmea ram, ret = %d.\n", + ret); + +out: + hns_roce_free_cmd_mailbox(hr_dev, mailbox); + return ret; +} + +static void fmea_ram_ecc_recover(struct hns_roce_dev *hr_dev, + struct fmea_ram_ecc *ecc_info) +{ + u32 res_type = ecc_info->res_type; + u32 index = ecc_info->index; + int ret; + + BUILD_BUG_ON(ARRAY_SIZE(fmea_ram_res) != ECC_RESOURCE_COUNT); + + if (res_type >= ECC_RESOURCE_COUNT) { + dev_err(hr_dev->dev, "unsupported fmea ram ecc type %u.\n", + res_type); + return; + } + + if (res_type == ECC_RESOURCE_GMV) + ret = fmea_recover_gmv(hr_dev, index); + else + ret = fmea_recover_others(hr_dev, res_type, index); + if (ret) + dev_err(hr_dev->dev, + "failed to recover %s, index = %u, ret = %d.\n", + fmea_ram_res[res_type].name, index, ret); +} + +static void fmea_ram_ecc_work(struct work_struct *ecc_work) +{ + struct hns_roce_dev *hr_dev = + container_of(ecc_work, struct hns_roce_dev, ecc_work); + struct fmea_ram_ecc ecc_info = {}; + + if (fmea_ram_ecc_query(hr_dev, &ecc_info)) { + dev_err(hr_dev->dev, "failed to query fmea ram ecc.\n"); + return; + } + + if (!ecc_info.is_ecc_err) { + dev_err(hr_dev->dev, "there is no fmea ram ecc err found.\n"); + return; + } + + fmea_ram_ecc_recover(hr_dev, &ecc_info); +} + +static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id) +{ + struct hns_roce_dev *hr_dev = dev_id; + irqreturn_t int_work = IRQ_NONE; + u32 int_st; + + int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG); + + if (int_st) { + int_work = abnormal_interrupt_basic(hr_dev, int_st); + } else if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) { + queue_work(hr_dev->irq_workq, &hr_dev->ecc_work); + int_work = IRQ_HANDLED; } else { - dev_err(dev, "There is no abnormal irq found!\n"); + dev_err(hr_dev->dev, "there is no abnormal irq found.\n"); } return IRQ_RETVAL(int_work); @@ -5868,21 +6264,20 @@ static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev, roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG, enable_flag); } -static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn) +static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, u32 eqn) { struct device *dev = hr_dev->dev; int ret; + u8 cmd; if (eqn < hr_dev->caps.num_comp_vectors) - ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M, - 0, HNS_ROCE_CMD_DESTROY_CEQC, - HNS_ROCE_CMD_TIMEOUT_MSECS); + cmd = HNS_ROCE_CMD_DESTROY_CEQC; else - ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M, - 0, HNS_ROCE_CMD_DESTROY_AEQC, - HNS_ROCE_CMD_TIMEOUT_MSECS); + cmd = HNS_ROCE_CMD_DESTROY_AEQC; + + ret = hns_roce_destroy_hw_ctx(hr_dev, cmd, eqn & HNS_ROCE_V2_EQN_M); if (ret) - dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn); + dev_err(dev, "[mailbox cmd] destroy eqc(%u) failed.\n", eqn); } static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) @@ -5980,22 +6375,21 @@ static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT, NULL, 0); if (err) - dev_err(hr_dev->dev, "Failed to alloc EQE mtr, err %d\n", err); + dev_err(hr_dev->dev, "failed to alloc EQE mtr, err %d\n", err); return err; } static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev, - struct hns_roce_eq *eq, - unsigned int eq_cmd) + struct hns_roce_eq *eq, u8 eq_cmd) { struct hns_roce_cmd_mailbox *mailbox; int ret; /* Allocate mailbox memory */ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); - if (IS_ERR_OR_NULL(mailbox)) - return -ENOMEM; + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); ret = alloc_eq_buf(hr_dev, eq); if (ret) @@ -6005,8 +6399,7 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev, if (ret) goto err_cmd_mbox; - ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0, - eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS); + ret = hns_roce_create_hw_ctx(hr_dev, mailbox, eq_cmd, eq->eqn); if (ret) { dev_err(hr_dev->dev, "[mailbox cmd] create eqc failed.\n"); goto err_cmd_mbox; @@ -6071,7 +6464,7 @@ static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num, 0, hr_dev->irq_names[j - comp_num], &eq_table->eq[j - other_num]); if (ret) { - dev_err(hr_dev->dev, "Request irq error!\n"); + dev_err(hr_dev->dev, "request irq error!\n"); goto err_request_failed; } } @@ -6117,14 +6510,14 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev) struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; struct device *dev = hr_dev->dev; struct hns_roce_eq *eq; - unsigned int eq_cmd; - int irq_num; - int eq_num; int other_num; int comp_num; int aeq_num; - int i; + int irq_num; + int eq_num; + u8 eq_cmd; int ret; + int i; other_num = hr_dev->caps.num_other_vectors; comp_num = hr_dev->caps.num_comp_vectors; @@ -6169,6 +6562,8 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev) } } + INIT_WORK(&hr_dev->ecc_work, fmea_ram_ecc_work); + hr_dev->irq_workq = alloc_ordered_workqueue("hns_roce_irq_workq", 0); if (!hr_dev->irq_workq) { dev_err(dev, "failed to create irq workqueue.\n"); @@ -6222,10 +6617,6 @@ static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev) kfree(eq_table->eq); } -static const struct hns_roce_dfx_hw hns_roce_dfx_hw_v2 = { - .query_cqc_info = hns_roce_v2_query_cqc_info, -}; - static const struct ib_device_ops hns_roce_v2_dev_ops = { .destroy_qp = hns_roce_v2_destroy_qp, .modify_cq = hns_roce_v2_modify_cq, @@ -6261,10 +6652,14 @@ static const struct hns_roce_hw hns_roce_hw_v2 = { .set_hem = hns_roce_v2_set_hem, .clear_hem = hns_roce_v2_clear_hem, .modify_qp = hns_roce_v2_modify_qp, + .dereg_mr = hns_roce_v2_dereg_mr, .qp_flow_control_init = hns_roce_v2_qp_flow_control_init, .init_eq = hns_roce_v2_init_eq_table, .cleanup_eq = hns_roce_v2_cleanup_eq_table, .write_srqc = hns_roce_v2_write_srqc, + .query_cqc = hns_roce_v2_query_cqc, + .query_qpc = hns_roce_v2_query_qpc, + .query_mpt = hns_roce_v2_query_mpt, .hns_roce_dev_ops = &hns_roce_v2_dev_ops, .hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops, }; @@ -6296,7 +6691,6 @@ static void hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev, hr_dev->is_vf = id->driver_data; hr_dev->dev = &handle->pdev->dev; hr_dev->hw = &hns_roce_hw_v2; - hr_dev->dfx = &hns_roce_dfx_hw_v2; hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG; hr_dev->odb_offset = hr_dev->sdb_offset; @@ -6342,14 +6736,25 @@ static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle) ret = hns_roce_init(hr_dev); if (ret) { dev_err(hr_dev->dev, "RoCE Engine init failed!\n"); - goto error_failed_get_cfg; + goto error_failed_cfg; + } + + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { + ret = free_mr_init(hr_dev); + if (ret) { + dev_err(hr_dev->dev, "failed to init free mr!\n"); + goto error_failed_roce_init; + } } handle->priv = hr_dev; return 0; -error_failed_get_cfg: +error_failed_roce_init: + hns_roce_exit(hr_dev); + +error_failed_cfg: kfree(hr_dev->priv); error_failed_kzalloc: @@ -6371,6 +6776,9 @@ static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle, hr_dev->state = HNS_ROCE_DEVICE_STATE_UNINIT; hns_roce_handle_device_err(hr_dev); + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) + free_mr_exit(hr_dev); + hns_roce_exit(hr_dev); kfree(hr_dev->priv); ib_dealloc_device(&hr_dev->ib_dev); @@ -6394,7 +6802,7 @@ static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle) if (!id) return 0; - if (id->driver_data && handle->pdev->revision < PCI_REVISION_ID_HIP09) + if (id->driver_data && handle->pdev->revision == PCI_REVISION_ID_HIP08) return 0; ret = __hns_roce_hw_v2_init_instance(handle); @@ -6478,7 +6886,7 @@ static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle) dev_err(dev, "In reset process RoCE reinit failed %d.\n", ret); } else { handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED; - dev_info(dev, "Reset done, RoCE client reinit finished.\n"); + dev_info(dev, "reset done, RoCE client reinit finished.\n"); } return ret; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index 35c61da7ba15..c7bf2d52c1cd 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -35,41 +35,25 @@ #include <linux/bitops.h> -#define HNS_ROCE_VF_QPC_BT_NUM 256 -#define HNS_ROCE_VF_SCCC_BT_NUM 64 -#define HNS_ROCE_VF_SRQC_BT_NUM 64 -#define HNS_ROCE_VF_CQC_BT_NUM 64 -#define HNS_ROCE_VF_MPT_BT_NUM 64 -#define HNS_ROCE_VF_SMAC_NUM 32 -#define HNS_ROCE_VF_SL_NUM 8 -#define HNS_ROCE_VF_GMV_BT_NUM 256 - #define HNS_ROCE_V2_MAX_QP_NUM 0x1000 -#define HNS_ROCE_V2_MAX_QPC_TIMER_NUM 0x200 #define HNS_ROCE_V2_MAX_WQE_NUM 0x8000 -#define HNS_ROCE_V2_MAX_SRQ 0x100000 #define HNS_ROCE_V2_MAX_SRQ_WR 0x8000 #define HNS_ROCE_V2_MAX_SRQ_SGE 64 #define HNS_ROCE_V2_MAX_CQ_NUM 0x100000 -#define HNS_ROCE_V2_MAX_CQC_TIMER_NUM 0x100 +#define HNS_ROCE_V2_MAX_QPC_TIMER_BT_NUM 0x100 +#define HNS_ROCE_V2_MAX_CQC_TIMER_BT_NUM 0x100 #define HNS_ROCE_V2_MAX_SRQ_NUM 0x100000 #define HNS_ROCE_V2_MAX_CQE_NUM 0x400000 -#define HNS_ROCE_V2_MAX_SRQWQE_NUM 0x8000 #define HNS_ROCE_V2_MAX_RQ_SGE_NUM 64 #define HNS_ROCE_V2_MAX_SQ_SGE_NUM 64 -#define HNS_ROCE_V2_MAX_EXTEND_SGE_NUM 0x200000 #define HNS_ROCE_V2_MAX_SQ_INLINE 0x20 #define HNS_ROCE_V3_MAX_SQ_INLINE 0x400 #define HNS_ROCE_V2_MAX_RC_INL_INN_SZ 32 #define HNS_ROCE_V2_UAR_NUM 256 #define HNS_ROCE_V2_PHY_UAR_NUM 1 -#define HNS_ROCE_V2_MAX_IRQ_NUM 65 -#define HNS_ROCE_V2_COMP_VEC_NUM 63 #define HNS_ROCE_V2_AEQE_VEC_NUM 1 #define HNS_ROCE_V2_ABNORMAL_VEC_NUM 1 #define HNS_ROCE_V2_MAX_MTPT_NUM 0x100000 -#define HNS_ROCE_V2_MAX_MTT_SEGS 0x1000000 -#define HNS_ROCE_V2_MAX_CQE_SEGS 0x1000000 #define HNS_ROCE_V2_MAX_SRQWQE_SEGS 0x1000000 #define HNS_ROCE_V2_MAX_IDX_SEGS 0x1000000 #define HNS_ROCE_V2_MAX_PD_NUM 0x1000000 @@ -79,9 +63,7 @@ #define HNS_ROCE_V2_MAX_QP_DEST_RDMA 128 #define HNS_ROCE_V2_MAX_SQ_DESC_SZ 64 #define HNS_ROCE_V2_MAX_RQ_DESC_SZ 16 -#define HNS_ROCE_V2_MAX_SRQ_DESC_SZ 64 #define HNS_ROCE_V2_IRRL_ENTRY_SZ 64 -#define HNS_ROCE_V2_TRRL_ENTRY_SZ 48 #define HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ 100 #define HNS_ROCE_V2_CQC_ENTRY_SZ 64 #define HNS_ROCE_V2_SRQC_ENTRY_SZ 64 @@ -98,12 +80,11 @@ #define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ PAGE_SIZE #define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE -#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000 +#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFF000 #define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2 #define HNS_ROCE_INVALID_LKEY 0x0 #define HNS_ROCE_INVALID_SGE_LENGTH 0x80000000 #define HNS_ROCE_CMQ_TX_TIMEOUT 30000 -#define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2 #define HNS_ROCE_V2_RSV_QPS 8 #define HNS_ROCE_V2_HW_RST_TIMEOUT 1000 @@ -117,12 +98,14 @@ #define HNS_ROCE_CQE_HOP_NUM 1 #define HNS_ROCE_SRQWQE_HOP_NUM 1 #define HNS_ROCE_PBL_HOP_NUM 2 -#define HNS_ROCE_EQE_HOP_NUM 2 #define HNS_ROCE_IDX_HOP_NUM 1 #define HNS_ROCE_SQWQE_HOP_NUM 2 #define HNS_ROCE_EXT_SGE_HOP_NUM 1 #define HNS_ROCE_RQWQE_HOP_NUM 2 +#define HNS_ROCE_V2_EQE_HOP_NUM 2 +#define HNS_ROCE_V3_EQE_HOP_NUM 1 + #define HNS_ROCE_BA_PG_SZ_SUPPORTED_256K 6 #define HNS_ROCE_BA_PG_SZ_SUPPORTED_16K 2 #define HNS_ROCE_V2_GID_INDEX_NUM 16 @@ -153,6 +136,18 @@ enum { #define CMD_CSQ_DESC_NUM 1024 #define CMD_CRQ_DESC_NUM 1024 +/* Free mr used parameters */ +#define HNS_ROCE_FREE_MR_USED_CQE_NUM 128 +#define HNS_ROCE_FREE_MR_USED_QP_NUM 0x8 +#define HNS_ROCE_FREE_MR_USED_PSN 0x0808 +#define HNS_ROCE_FREE_MR_USED_QP_RETRY_CNT 0x7 +#define HNS_ROCE_FREE_MR_USED_QP_TIMEOUT 0x12 +#define HNS_ROCE_FREE_MR_USED_SQWQE_NUM 128 +#define HNS_ROCE_FREE_MR_USED_SQSGE_NUM 0x2 +#define HNS_ROCE_FREE_MR_USED_RQWQE_NUM 128 +#define HNS_ROCE_FREE_MR_USED_RQSGE_NUM 0x2 +#define HNS_ROCE_V2_FREE_MR_TIMEOUT 4500 + enum { NO_ARMED = 0x0, REG_NXT_CEQE = 0x2, @@ -184,7 +179,6 @@ enum { HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP = 0x8, HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD = 0x9, HNS_ROCE_V2_WQE_OP_FAST_REG_PMR = 0xa, - HNS_ROCE_V2_WQE_OP_LOCAL_INV = 0xb, HNS_ROCE_V2_WQE_OP_BIND_MW = 0xc, HNS_ROCE_V2_WQE_OP_MASK = 0x1f, }; @@ -252,6 +246,7 @@ enum hns_roce_opcode_type { HNS_ROCE_OPC_CFG_GMV_TBL = 0x850f, HNS_ROCE_OPC_CFG_GMV_BT = 0x8510, HNS_ROCE_OPC_EXT_CFG = 0x8512, + HNS_ROCE_QUERY_RAM_ECC = 0x8513, HNS_SWITCH_PARAMETER_CFG = 0x1033, }; @@ -305,33 +300,6 @@ struct hns_roce_v2_cq_context { #define HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM 0x0 #define HNS_ROCE_V2_CQ_DEFAULT_INTERVAL 0x0 -#define V2_CQC_BYTE_4_ARM_ST_S 6 -#define V2_CQC_BYTE_4_ARM_ST_M GENMASK(7, 6) - -#define V2_CQC_BYTE_4_CEQN_S 15 -#define V2_CQC_BYTE_4_CEQN_M GENMASK(23, 15) - -#define V2_CQC_BYTE_8_CQN_S 0 -#define V2_CQC_BYTE_8_CQN_M GENMASK(23, 0) - -#define V2_CQC_BYTE_16_CQE_HOP_NUM_S 30 -#define V2_CQC_BYTE_16_CQE_HOP_NUM_M GENMASK(31, 30) - -#define V2_CQC_BYTE_28_CQ_PRODUCER_IDX_S 0 -#define V2_CQC_BYTE_28_CQ_PRODUCER_IDX_M GENMASK(23, 0) - -#define V2_CQC_BYTE_32_CQ_CONSUMER_IDX_S 0 -#define V2_CQC_BYTE_32_CQ_CONSUMER_IDX_M GENMASK(23, 0) - -#define V2_CQC_BYTE_52_CQE_CNT_S 0 -#define V2_CQC_BYTE_52_CQE_CNT_M GENMASK(23, 0) - -#define V2_CQC_BYTE_56_CQ_MAX_CNT_S 0 -#define V2_CQC_BYTE_56_CQ_MAX_CNT_M GENMASK(15, 0) - -#define V2_CQC_BYTE_56_CQ_PERIOD_S 16 -#define V2_CQC_BYTE_56_CQ_PERIOD_M GENMASK(31, 16) - #define CQC_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_cq_context, h, l) #define CQC_CQ_ST CQC_FIELD_LOC(1, 0) @@ -434,6 +402,7 @@ enum hns_roce_v2_qp_state { struct hns_roce_v2_qp_context_ex { __le32 data[64]; }; + struct hns_roce_v2_qp_context { __le32 byte_4_sqpn_tst; __le32 wqe_sge_ba; @@ -786,16 +755,20 @@ struct hns_roce_v2_mpt_entry { #define MPT_INNER_PA_VLD MPT_FIELD_LOC(71, 71) #define MPT_MW_BIND_QPN MPT_FIELD_LOC(95, 72) #define MPT_BOUND_LKEY MPT_FIELD_LOC(127, 96) -#define MPT_LEN MPT_FIELD_LOC(191, 128) +#define MPT_LEN_L MPT_FIELD_LOC(159, 128) +#define MPT_LEN_H MPT_FIELD_LOC(191, 160) #define MPT_LKEY MPT_FIELD_LOC(223, 192) #define MPT_VA MPT_FIELD_LOC(287, 224) #define MPT_PBL_SIZE MPT_FIELD_LOC(319, 288) -#define MPT_PBL_BA MPT_FIELD_LOC(380, 320) +#define MPT_PBL_BA_L MPT_FIELD_LOC(351, 320) +#define MPT_PBL_BA_H MPT_FIELD_LOC(380, 352) #define MPT_BLK_MODE MPT_FIELD_LOC(381, 381) #define MPT_RSV0 MPT_FIELD_LOC(383, 382) -#define MPT_PA0 MPT_FIELD_LOC(441, 384) +#define MPT_PA0_L MPT_FIELD_LOC(415, 384) +#define MPT_PA0_H MPT_FIELD_LOC(441, 416) #define MPT_BOUND_VA MPT_FIELD_LOC(447, 442) -#define MPT_PA1 MPT_FIELD_LOC(505, 448) +#define MPT_PA1_L MPT_FIELD_LOC(479, 448) +#define MPT_PA1_H MPT_FIELD_LOC(505, 480) #define MPT_PERSIST_EN MPT_FIELD_LOC(506, 506) #define MPT_RSV2 MPT_FIELD_LOC(507, 507) #define MPT_PBL_BUF_PG_SZ MPT_FIELD_LOC(511, 508) @@ -901,48 +874,24 @@ struct hns_roce_v2_ud_send_wqe { u8 dgid[GID_LEN_V2]; }; -#define V2_UD_SEND_WQE_BYTE_4_OPCODE_S 0 -#define V2_UD_SEND_WQE_BYTE_4_OPCODE_M GENMASK(4, 0) - -#define V2_UD_SEND_WQE_BYTE_4_OWNER_S 7 - -#define V2_UD_SEND_WQE_BYTE_4_CQE_S 8 - -#define V2_UD_SEND_WQE_BYTE_4_SE_S 11 - -#define V2_UD_SEND_WQE_BYTE_16_PD_S 0 -#define V2_UD_SEND_WQE_BYTE_16_PD_M GENMASK(23, 0) - -#define V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S 24 -#define V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M GENMASK(31, 24) - -#define V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S 0 -#define V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M GENMASK(23, 0) - -#define V2_UD_SEND_WQE_BYTE_24_UDPSPN_S 16 -#define V2_UD_SEND_WQE_BYTE_24_UDPSPN_M GENMASK(31, 16) - -#define V2_UD_SEND_WQE_BYTE_32_DQPN_S 0 -#define V2_UD_SEND_WQE_BYTE_32_DQPN_M GENMASK(23, 0) - -#define V2_UD_SEND_WQE_BYTE_36_VLAN_S 0 -#define V2_UD_SEND_WQE_BYTE_36_VLAN_M GENMASK(15, 0) - -#define V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S 16 -#define V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M GENMASK(23, 16) - -#define V2_UD_SEND_WQE_BYTE_36_TCLASS_S 24 -#define V2_UD_SEND_WQE_BYTE_36_TCLASS_M GENMASK(31, 24) - -#define V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S 0 -#define V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M GENMASK(19, 0) - -#define V2_UD_SEND_WQE_BYTE_40_SL_S 20 -#define V2_UD_SEND_WQE_BYTE_40_SL_M GENMASK(23, 20) - -#define V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S 30 - -#define V2_UD_SEND_WQE_BYTE_40_LBI_S 31 +#define UD_SEND_WQE_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_ud_send_wqe, h, l) + +#define UD_SEND_WQE_OPCODE UD_SEND_WQE_FIELD_LOC(4, 0) +#define UD_SEND_WQE_OWNER UD_SEND_WQE_FIELD_LOC(7, 7) +#define UD_SEND_WQE_CQE UD_SEND_WQE_FIELD_LOC(8, 8) +#define UD_SEND_WQE_SE UD_SEND_WQE_FIELD_LOC(11, 11) +#define UD_SEND_WQE_PD UD_SEND_WQE_FIELD_LOC(119, 96) +#define UD_SEND_WQE_SGE_NUM UD_SEND_WQE_FIELD_LOC(127, 120) +#define UD_SEND_WQE_MSG_START_SGE_IDX UD_SEND_WQE_FIELD_LOC(151, 128) +#define UD_SEND_WQE_UDPSPN UD_SEND_WQE_FIELD_LOC(191, 176) +#define UD_SEND_WQE_DQPN UD_SEND_WQE_FIELD_LOC(247, 224) +#define UD_SEND_WQE_VLAN UD_SEND_WQE_FIELD_LOC(271, 256) +#define UD_SEND_WQE_HOPLIMIT UD_SEND_WQE_FIELD_LOC(279, 272) +#define UD_SEND_WQE_TCLASS UD_SEND_WQE_FIELD_LOC(287, 280) +#define UD_SEND_WQE_FLOW_LABEL UD_SEND_WQE_FIELD_LOC(307, 288) +#define UD_SEND_WQE_SL UD_SEND_WQE_FIELD_LOC(311, 308) +#define UD_SEND_WQE_VLAN_EN UD_SEND_WQE_FIELD_LOC(318, 318) +#define UD_SEND_WQE_LBI UD_SEND_WQE_FIELD_LOC(319, 319) struct hns_roce_v2_rc_send_wqe { __le32 byte_4; @@ -957,42 +906,22 @@ struct hns_roce_v2_rc_send_wqe { __le64 va; }; -#define V2_RC_SEND_WQE_BYTE_4_OPCODE_S 0 -#define V2_RC_SEND_WQE_BYTE_4_OPCODE_M GENMASK(4, 0) - -#define V2_RC_SEND_WQE_BYTE_4_DB_SL_L_S 5 -#define V2_RC_SEND_WQE_BYTE_4_DB_SL_L_M GENMASK(6, 5) - -#define V2_RC_SEND_WQE_BYTE_4_DB_SL_H_S 13 -#define V2_RC_SEND_WQE_BYTE_4_DB_SL_H_M GENMASK(14, 13) - -#define V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_S 15 -#define V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_M GENMASK(30, 15) - -#define V2_RC_SEND_WQE_BYTE_4_OWNER_S 7 - -#define V2_RC_SEND_WQE_BYTE_4_CQE_S 8 - -#define V2_RC_SEND_WQE_BYTE_4_FENCE_S 9 - -#define V2_RC_SEND_WQE_BYTE_4_SO_S 10 - -#define V2_RC_SEND_WQE_BYTE_4_SE_S 11 - -#define V2_RC_SEND_WQE_BYTE_4_INLINE_S 12 - -#define V2_RC_SEND_WQE_BYTE_4_FLAG_S 31 - -#define V2_RC_SEND_WQE_BYTE_16_XRC_SRQN_S 0 -#define V2_RC_SEND_WQE_BYTE_16_XRC_SRQN_M GENMASK(23, 0) - -#define V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S 24 -#define V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M GENMASK(31, 24) - -#define V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S 0 -#define V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M GENMASK(23, 0) - -#define V2_RC_SEND_WQE_BYTE_20_INL_TYPE_S 31 +#define RC_SEND_WQE_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_rc_send_wqe, h, l) + +#define RC_SEND_WQE_OPCODE RC_SEND_WQE_FIELD_LOC(4, 0) +#define RC_SEND_WQE_DB_SL_L RC_SEND_WQE_FIELD_LOC(6, 5) +#define RC_SEND_WQE_DB_SL_H RC_SEND_WQE_FIELD_LOC(14, 13) +#define RC_SEND_WQE_OWNER RC_SEND_WQE_FIELD_LOC(7, 7) +#define RC_SEND_WQE_CQE RC_SEND_WQE_FIELD_LOC(8, 8) +#define RC_SEND_WQE_FENCE RC_SEND_WQE_FIELD_LOC(9, 9) +#define RC_SEND_WQE_SE RC_SEND_WQE_FIELD_LOC(11, 11) +#define RC_SEND_WQE_INLINE RC_SEND_WQE_FIELD_LOC(12, 12) +#define RC_SEND_WQE_WQE_INDEX RC_SEND_WQE_FIELD_LOC(30, 15) +#define RC_SEND_WQE_FLAG RC_SEND_WQE_FIELD_LOC(31, 31) +#define RC_SEND_WQE_XRC_SRQN RC_SEND_WQE_FIELD_LOC(119, 96) +#define RC_SEND_WQE_SGE_NUM RC_SEND_WQE_FIELD_LOC(127, 120) +#define RC_SEND_WQE_MSG_START_SGE_IDX RC_SEND_WQE_FIELD_LOC(151, 128) +#define RC_SEND_WQE_INL_TYPE RC_SEND_WQE_FIELD_LOC(159, 159) struct hns_roce_wqe_frmr_seg { __le32 pbl_size; @@ -1035,7 +964,10 @@ struct hns_roce_func_clear { __le32 rsv[4]; }; -#define FUNC_CLEAR_RST_FUN_DONE_S 0 +#define FUNC_CLEAR_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_func_clear, h, l) + +#define FUNC_CLEAR_RST_FUN_DONE FUNC_CLEAR_FIELD_LOC(32, 32) + /* Each physical function manages up to 248 virtual functions, it takes up to * 100ms for each function to execute clear. If an abnormal reset occurs, it is * executed twice at most, so it takes up to 249 * 2 * 100ms. @@ -1114,12 +1046,12 @@ struct hns_roce_vf_switch { __le32 resv3; }; -#define VF_SWITCH_DATA_FUN_ID_VF_ID_S 3 -#define VF_SWITCH_DATA_FUN_ID_VF_ID_M GENMASK(10, 3) +#define VF_SWITCH_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_vf_switch, h, l) -#define VF_SWITCH_DATA_CFG_ALW_LPBK_S 1 -#define VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S 2 -#define VF_SWITCH_DATA_CFG_ALW_DST_OVRD_S 3 +#define VF_SWITCH_VF_ID VF_SWITCH_FIELD_LOC(42, 35) +#define VF_SWITCH_ALW_LPBK VF_SWITCH_FIELD_LOC(65, 65) +#define VF_SWITCH_ALW_LCL_LPBK VF_SWITCH_FIELD_LOC(66, 66) +#define VF_SWITCH_ALW_DST_OVRD VF_SWITCH_FIELD_LOC(67, 67) struct hns_roce_post_mbox { __le32 in_param_l; @@ -1173,6 +1105,11 @@ enum { #define CFG_GMV_BT_BA_H CMQ_REQ_FIELD_LOC(51, 32) #define CFG_GMV_BT_IDX CMQ_REQ_FIELD_LOC(95, 64) +/* Fields of HNS_ROCE_QUERY_RAM_ECC */ +#define QUERY_RAM_ECC_1BIT_ERR CMQ_REQ_FIELD_LOC(31, 0) +#define QUERY_RAM_ECC_RES_TYPE CMQ_REQ_FIELD_LOC(63, 32) +#define QUERY_RAM_ECC_TAG CMQ_REQ_FIELD_LOC(95, 64) + struct hns_roce_cfg_sgid_tb { __le32 table_idx_rsv; __le32 vf_sgid_l; @@ -1182,11 +1119,10 @@ struct hns_roce_cfg_sgid_tb { __le32 vf_sgid_type_rsv; }; -#define CFG_SGID_TB_TABLE_IDX_S 0 -#define CFG_SGID_TB_TABLE_IDX_M GENMASK(7, 0) +#define SGID_TB_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_cfg_sgid_tb, h, l) -#define CFG_SGID_TB_VF_SGID_TYPE_S 0 -#define CFG_SGID_TB_VF_SGID_TYPE_M GENMASK(1, 0) +#define CFG_SGID_TB_TABLE_IDX SGID_TB_FIELD_LOC(7, 0) +#define CFG_SGID_TB_VF_SGID_TYPE SGID_TB_FIELD_LOC(161, 160) struct hns_roce_cfg_smac_tb { __le32 tb_idx_rsv; @@ -1194,11 +1130,11 @@ struct hns_roce_cfg_smac_tb { __le32 vf_smac_h_rsv; __le32 rsv[3]; }; -#define CFG_SMAC_TB_IDX_S 0 -#define CFG_SMAC_TB_IDX_M GENMASK(7, 0) -#define CFG_SMAC_TB_VF_SMAC_H_S 0 -#define CFG_SMAC_TB_VF_SMAC_H_M GENMASK(15, 0) +#define SMAC_TB_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_cfg_smac_tb, h, l) + +#define CFG_SMAC_TB_IDX SMAC_TB_FIELD_LOC(7, 0) +#define CFG_SMAC_TB_VF_SMAC_H SMAC_TB_FIELD_LOC(79, 64) struct hns_roce_cfg_gmv_tb_a { __le32 vf_sgid_l; @@ -1209,16 +1145,11 @@ struct hns_roce_cfg_gmv_tb_a { __le32 resv; }; -#define CFG_GMV_TB_SGID_IDX_S 0 -#define CFG_GMV_TB_SGID_IDX_M GENMASK(7, 0) - -#define CFG_GMV_TB_VF_SGID_TYPE_S 0 -#define CFG_GMV_TB_VF_SGID_TYPE_M GENMASK(1, 0) - -#define CFG_GMV_TB_VF_VLAN_EN_S 2 +#define GMV_TB_A_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_cfg_gmv_tb_a, h, l) -#define CFG_GMV_TB_VF_VLAN_ID_S 16 -#define CFG_GMV_TB_VF_VLAN_ID_M GENMASK(27, 16) +#define GMV_TB_A_VF_SGID_TYPE GMV_TB_A_FIELD_LOC(129, 128) +#define GMV_TB_A_VF_VLAN_EN GMV_TB_A_FIELD_LOC(130, 130) +#define GMV_TB_A_VF_VLAN_ID GMV_TB_A_FIELD_LOC(155, 144) struct hns_roce_cfg_gmv_tb_b { __le32 vf_smac_l; @@ -1227,8 +1158,10 @@ struct hns_roce_cfg_gmv_tb_b { __le32 resv[3]; }; -#define CFG_GMV_TB_SMAC_H_S 0 -#define CFG_GMV_TB_SMAC_H_M GENMASK(15, 0) +#define GMV_TB_B_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_cfg_gmv_tb_b, h, l) + +#define GMV_TB_B_SMAC_H GMV_TB_B_FIELD_LOC(47, 32) +#define GMV_TB_B_SGID_IDX GMV_TB_B_FIELD_LOC(71, 64) #define HNS_ROCE_QUERY_PF_CAPS_CMD_NUM 5 struct hns_roce_query_pf_caps_a { @@ -1237,7 +1170,7 @@ struct hns_roce_query_pf_caps_a { __le16 max_sq_sg; __le16 max_sq_inline; __le16 max_rq_sg; - __le32 max_extend_sg; + __le32 rsv0; __le16 num_qpc_timer; __le16 num_cqc_timer; __le16 max_srq_sges; @@ -1245,7 +1178,7 @@ struct hns_roce_query_pf_caps_a { u8 num_other_vectors; u8 max_sq_desc_sz; u8 max_rq_desc_sz; - u8 max_srq_desc_sz; + u8 rsv1; u8 cqe_sz; }; @@ -1280,29 +1213,17 @@ struct hns_roce_query_pf_caps_c { __le16 rq_depth; }; -#define V2_QUERY_PF_CAPS_C_NUM_PDS_S 0 -#define V2_QUERY_PF_CAPS_C_NUM_PDS_M GENMASK(19, 0) - -#define V2_QUERY_PF_CAPS_C_CAP_FLAGS_S 20 -#define V2_QUERY_PF_CAPS_C_CAP_FLAGS_M GENMASK(31, 20) +#define PF_CAPS_C_FIELD_LOC(h, l) \ + FIELD_LOC(struct hns_roce_query_pf_caps_c, h, l) -#define V2_QUERY_PF_CAPS_C_NUM_CQS_S 0 -#define V2_QUERY_PF_CAPS_C_NUM_CQS_M GENMASK(19, 0) - -#define V2_QUERY_PF_CAPS_C_MAX_GID_S 20 -#define V2_QUERY_PF_CAPS_C_MAX_GID_M GENMASK(28, 20) - -#define V2_QUERY_PF_CAPS_C_CQ_DEPTH_S 0 -#define V2_QUERY_PF_CAPS_C_CQ_DEPTH_M GENMASK(22, 0) - -#define V2_QUERY_PF_CAPS_C_NUM_MRWS_S 0 -#define V2_QUERY_PF_CAPS_C_NUM_MRWS_M GENMASK(19, 0) - -#define V2_QUERY_PF_CAPS_C_NUM_QPS_S 0 -#define V2_QUERY_PF_CAPS_C_NUM_QPS_M GENMASK(19, 0) - -#define V2_QUERY_PF_CAPS_C_MAX_ORD_S 20 -#define V2_QUERY_PF_CAPS_C_MAX_ORD_M GENMASK(27, 20) +#define PF_CAPS_C_NUM_PDS PF_CAPS_C_FIELD_LOC(19, 0) +#define PF_CAPS_C_CAP_FLAGS PF_CAPS_C_FIELD_LOC(31, 20) +#define PF_CAPS_C_NUM_CQS PF_CAPS_C_FIELD_LOC(51, 32) +#define PF_CAPS_C_MAX_GID PF_CAPS_C_FIELD_LOC(60, 52) +#define PF_CAPS_C_CQ_DEPTH PF_CAPS_C_FIELD_LOC(86, 64) +#define PF_CAPS_C_NUM_MRWS PF_CAPS_C_FIELD_LOC(115, 96) +#define PF_CAPS_C_NUM_QPS PF_CAPS_C_FIELD_LOC(147, 128) +#define PF_CAPS_C_MAX_ORD PF_CAPS_C_FIELD_LOC(155, 148) struct hns_roce_query_pf_caps_d { __le32 wq_hop_num_max_srqs; @@ -1313,20 +1234,26 @@ struct hns_roce_query_pf_caps_d { __le32 num_uars_rsv_pds; __le32 rsv_uars_rsv_qps; }; -#define V2_QUERY_PF_CAPS_D_NUM_SRQS_S 0 -#define V2_QUERY_PF_CAPS_D_NUM_SRQS_M GENMASK(19, 0) -#define V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_S 20 -#define V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_M GENMASK(21, 20) - -#define V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_S 22 -#define V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_M GENMASK(23, 22) - -#define V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_S 24 -#define V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_M GENMASK(25, 24) - -#define V2_QUERY_PF_CAPS_D_CONG_TYPE_S 26 -#define V2_QUERY_PF_CAPS_D_CONG_TYPE_M GENMASK(29, 26) +#define PF_CAPS_D_FIELD_LOC(h, l) \ + FIELD_LOC(struct hns_roce_query_pf_caps_d, h, l) + +#define PF_CAPS_D_NUM_SRQS PF_CAPS_D_FIELD_LOC(19, 0) +#define PF_CAPS_D_RQWQE_HOP_NUM PF_CAPS_D_FIELD_LOC(21, 20) +#define PF_CAPS_D_EX_SGE_HOP_NUM PF_CAPS_D_FIELD_LOC(23, 22) +#define PF_CAPS_D_SQWQE_HOP_NUM PF_CAPS_D_FIELD_LOC(25, 24) +#define PF_CAPS_D_CONG_TYPE PF_CAPS_D_FIELD_LOC(29, 26) +#define PF_CAPS_D_CEQ_DEPTH PF_CAPS_D_FIELD_LOC(85, 64) +#define PF_CAPS_D_NUM_CEQS PF_CAPS_D_FIELD_LOC(95, 86) +#define PF_CAPS_D_AEQ_DEPTH PF_CAPS_D_FIELD_LOC(117, 96) +#define PF_CAPS_D_AEQ_ARM_ST PF_CAPS_D_FIELD_LOC(119, 118) +#define PF_CAPS_D_CEQ_ARM_ST PF_CAPS_D_FIELD_LOC(121, 120) +#define PF_CAPS_D_RSV_PDS PF_CAPS_D_FIELD_LOC(147, 128) +#define PF_CAPS_D_NUM_UARS PF_CAPS_D_FIELD_LOC(155, 148) +#define PF_CAPS_D_RSV_QPS PF_CAPS_D_FIELD_LOC(179, 160) +#define PF_CAPS_D_RSV_UARS PF_CAPS_D_FIELD_LOC(187, 180) + +#define HNS_ROCE_CAP_FLAGS_EX_SHIFT 12 struct hns_roce_congestion_algorithm { u8 alg_sel; @@ -1335,33 +1262,6 @@ struct hns_roce_congestion_algorithm { u8 wnd_mode_sel; }; -#define V2_QUERY_PF_CAPS_D_CEQ_DEPTH_S 0 -#define V2_QUERY_PF_CAPS_D_CEQ_DEPTH_M GENMASK(21, 0) - -#define V2_QUERY_PF_CAPS_D_NUM_CEQS_S 22 -#define V2_QUERY_PF_CAPS_D_NUM_CEQS_M GENMASK(31, 22) - -#define V2_QUERY_PF_CAPS_D_AEQ_DEPTH_S 0 -#define V2_QUERY_PF_CAPS_D_AEQ_DEPTH_M GENMASK(21, 0) - -#define V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_S 22 -#define V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_M GENMASK(23, 22) - -#define V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_S 24 -#define V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_M GENMASK(25, 24) - -#define V2_QUERY_PF_CAPS_D_RSV_PDS_S 0 -#define V2_QUERY_PF_CAPS_D_RSV_PDS_M GENMASK(19, 0) - -#define V2_QUERY_PF_CAPS_D_NUM_UARS_S 20 -#define V2_QUERY_PF_CAPS_D_NUM_UARS_M GENMASK(27, 20) - -#define V2_QUERY_PF_CAPS_D_RSV_QPS_S 0 -#define V2_QUERY_PF_CAPS_D_RSV_QPS_M GENMASK(19, 0) - -#define V2_QUERY_PF_CAPS_D_RSV_UARS_S 20 -#define V2_QUERY_PF_CAPS_D_RSV_UARS_M GENMASK(27, 20) - struct hns_roce_query_pf_caps_e { __le32 chunk_size_shift_rsv_mrws; __le32 rsv_cqs; @@ -1373,20 +1273,14 @@ struct hns_roce_query_pf_caps_e { __le16 aeq_period; }; -#define V2_QUERY_PF_CAPS_E_RSV_MRWS_S 0 -#define V2_QUERY_PF_CAPS_E_RSV_MRWS_M GENMASK(19, 0) - -#define V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_S 20 -#define V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_M GENMASK(31, 20) - -#define V2_QUERY_PF_CAPS_E_RSV_CQS_S 0 -#define V2_QUERY_PF_CAPS_E_RSV_CQS_M GENMASK(19, 0) - -#define V2_QUERY_PF_CAPS_E_RSV_SRQS_S 0 -#define V2_QUERY_PF_CAPS_E_RSV_SRQS_M GENMASK(19, 0) +#define PF_CAPS_E_FIELD_LOC(h, l) \ + FIELD_LOC(struct hns_roce_query_pf_caps_e, h, l) -#define V2_QUERY_PF_CAPS_E_RSV_LKEYS_S 0 -#define V2_QUERY_PF_CAPS_E_RSV_LKEYS_M GENMASK(19, 0) +#define PF_CAPS_E_RSV_MRWS PF_CAPS_E_FIELD_LOC(19, 0) +#define PF_CAPS_E_CHUNK_SIZE_SHIFT PF_CAPS_E_FIELD_LOC(31, 20) +#define PF_CAPS_E_RSV_CQS PF_CAPS_E_FIELD_LOC(51, 32) +#define PF_CAPS_E_RSV_SRQS PF_CAPS_E_FIELD_LOC(83, 64) +#define PF_CAPS_E_RSV_LKEYS PF_CAPS_E_FIELD_LOC(115, 96) struct hns_roce_cmq_req { __le32 data[6]; @@ -1432,16 +1326,30 @@ struct hns_roce_link_table { #define HNS_ROCE_EXT_LLM_ENTRY(addr, id) (((id) << (64 - 12)) | ((addr) >> 12)) #define HNS_ROCE_EXT_LLM_MIN_PAGES(que_num) ((que_num) * 4 + 2) +struct hns_roce_v2_free_mr { + struct ib_qp *rsv_qp[HNS_ROCE_FREE_MR_USED_QP_NUM]; + struct ib_cq *rsv_cq; + struct ib_pd *rsv_pd; + struct mutex mutex; +}; + struct hns_roce_v2_priv { struct hnae3_handle *handle; struct hns_roce_v2_cmq cmq; struct hns_roce_link_table ext_llm; + struct hns_roce_v2_free_mr free_mr; }; struct hns_roce_dip { u8 dgid[GID_LEN_V2]; u32 dip_idx; - struct list_head node; /* all dips are on a list */ + struct list_head node; /* all dips are on a list */ +}; + +struct fmea_ram_ecc { + u32 is_ecc_err; + u32 res_type; + u32 index; }; /* only for RNR timeout issue of HIP08 */ @@ -1479,14 +1387,10 @@ struct hns_roce_dip { #define HNS_ROCE_EQ_INIT_CONS_IDX 0 #define HNS_ROCE_EQ_INIT_NXT_EQE_BA 0 -#define HNS_ROCE_V2_CEQ_CEQE_OWNER_S 31 -#define HNS_ROCE_V2_AEQ_AEQE_OWNER_S 31 - #define HNS_ROCE_V2_COMP_EQE_NUM 0x1000 #define HNS_ROCE_V2_ASYNC_EQE_NUM 0x1000 #define HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S 0 -#define HNS_ROCE_V2_VF_INT_ST_RAS_INT_S 1 #define HNS_ROCE_EQ_DB_CMD_AEQ 0x0 #define HNS_ROCE_EQ_DB_CMD_AEQ_ARMED 0x1 @@ -1538,18 +1442,6 @@ struct hns_roce_eq_context { #define EQC_NEX_EQE_BA_H EQC_FIELD_LOC(339, 320) #define EQC_EQE_SIZE EQC_FIELD_LOC(341, 340) -#define HNS_ROCE_V2_CEQE_COMP_CQN_S 0 -#define HNS_ROCE_V2_CEQE_COMP_CQN_M GENMASK(23, 0) - -#define HNS_ROCE_V2_AEQE_EVENT_TYPE_S 0 -#define HNS_ROCE_V2_AEQE_EVENT_TYPE_M GENMASK(7, 0) - -#define HNS_ROCE_V2_AEQE_SUB_TYPE_S 8 -#define HNS_ROCE_V2_AEQE_SUB_TYPE_M GENMASK(15, 8) - -#define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S 0 -#define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M GENMASK(23, 0) - #define MAX_SERVICE_LEVEL 0x7 struct hns_roce_wqe_atomic_seg { @@ -1567,9 +1459,6 @@ struct hns_roce_sccc_clr_done { __le32 rsv[5]; }; -int hns_roce_v2_query_cqc_info(struct hns_roce_dev *hr_dev, u32 cqn, - int *buffer); - static inline void hns_roce_write64(struct hns_roce_dev *hr_dev, __le32 val[2], void __iomem *dest) { diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c deleted file mode 100644 index 5a97b5a0b7be..000000000000 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c +++ /dev/null @@ -1,35 +0,0 @@ -// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) -// Copyright (c) 2019 Hisilicon Limited. - -#include "hnae3.h" -#include "hns_roce_device.h" -#include "hns_roce_cmd.h" -#include "hns_roce_hw_v2.h" - -int hns_roce_v2_query_cqc_info(struct hns_roce_dev *hr_dev, u32 cqn, - int *buffer) -{ - struct hns_roce_v2_cq_context *cq_context; - struct hns_roce_cmd_mailbox *mailbox; - int ret; - - mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); - if (IS_ERR(mailbox)) - return PTR_ERR(mailbox); - - cq_context = mailbox->buf; - ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, cqn, 0, - HNS_ROCE_CMD_QUERY_CQC, - HNS_ROCE_CMD_TIMEOUT_MSECS); - if (ret) { - dev_err(hr_dev->dev, "QUERY cqc cmd process error\n"); - goto err_mailbox; - } - - memcpy(buffer, cq_context, sizeof(*cq_context)); - -err_mailbox: - hns_roce_free_cmd_mailbox(hr_dev, mailbox); - - return ret; -} diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 4194b626f3c6..dcf89689a4c6 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -31,7 +31,6 @@ * SOFTWARE. */ #include <linux/acpi.h> -#include <linux/of_platform.h> #include <linux/module.h> #include <linux/pci.h> #include <rdma/ib_addr.h> @@ -70,7 +69,7 @@ static int hns_roce_add_gid(const struct ib_gid_attr *attr, void **context) if (port >= hr_dev->caps.num_ports) return -EINVAL; - ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &attr->gid, attr); + ret = hr_dev->hw->set_gid(hr_dev, attr->index, &attr->gid, attr); return ret; } @@ -84,7 +83,7 @@ static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context) if (port >= hr_dev->caps.num_ports) return -EINVAL; - ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, NULL, NULL); + ret = hr_dev->hw->set_gid(hr_dev, attr->index, NULL, NULL); return ret; } @@ -98,7 +97,7 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u32 port, netdev = hr_dev->iboe.netdevs[port]; if (!netdev) { - dev_err(dev, "Can't find netdev on port(%u)!\n", port); + dev_err(dev, "can't find netdev on port(%u)!\n", port); return -ENODEV; } @@ -152,9 +151,6 @@ static int hns_roce_setup_mtu_mac(struct hns_roce_dev *hr_dev) u8 i; for (i = 0; i < hr_dev->caps.num_ports; i++) { - if (hr_dev->hw->set_mtu) - hr_dev->hw->set_mtu(hr_dev, hr_dev->iboe.phy_port[i], - hr_dev->caps.max_mtu); ret = hns_roce_set_mac(hr_dev, i, hr_dev->iboe.netdevs[i]->dev_addr); if (ret) @@ -243,7 +239,7 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u32 port_num, net_dev = hr_dev->iboe.netdevs[port]; if (!net_dev) { spin_unlock_irqrestore(&hr_dev->iboe.lock, flags); - dev_err(dev, "Find netdev %u failed!\n", port); + dev_err(dev, "find netdev %u failed!\n", port); return -EINVAL; } @@ -270,6 +266,9 @@ static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device, static int hns_roce_query_pkey(struct ib_device *ib_dev, u32 port, u16 index, u16 *pkey) { + if (index > 0) + return -EINVAL; + *pkey = PKEY_ID; return 0; @@ -307,9 +306,22 @@ hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address, entry->address = address; entry->mmap_type = mmap_type; - ret = rdma_user_mmap_entry_insert_exact( - ucontext, &entry->rdma_entry, length, - mmap_type == HNS_ROCE_MMAP_TYPE_DB ? 0 : 1); + switch (mmap_type) { + /* pgoff 0 must be used by DB for compatibility */ + case HNS_ROCE_MMAP_TYPE_DB: + ret = rdma_user_mmap_entry_insert_exact( + ucontext, &entry->rdma_entry, length, 0); + break; + case HNS_ROCE_MMAP_TYPE_DWQE: + ret = rdma_user_mmap_entry_insert_range( + ucontext, &entry->rdma_entry, length, 1, + U32_MAX); + break; + default: + ret = -EINVAL; + break; + } + if (ret) { kfree(entry); return NULL; @@ -323,18 +335,12 @@ static void hns_roce_dealloc_uar_entry(struct hns_roce_ucontext *context) if (context->db_mmap_entry) rdma_user_mmap_entry_remove( &context->db_mmap_entry->rdma_entry); - - if (context->tptr_mmap_entry) - rdma_user_mmap_entry_remove( - &context->tptr_mmap_entry->rdma_entry); } static int hns_roce_alloc_uar_entry(struct ib_ucontext *uctx) { struct hns_roce_ucontext *context = to_hr_ucontext(uctx); - struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device); u64 address; - int ret; address = context->uar.pfn << PAGE_SHIFT; context->db_mmap_entry = hns_roce_user_mmap_entry_insert( @@ -342,27 +348,7 @@ static int hns_roce_alloc_uar_entry(struct ib_ucontext *uctx) if (!context->db_mmap_entry) return -ENOMEM; - if (!hr_dev->tptr_dma_addr || !hr_dev->tptr_size) - return 0; - - /* - * FIXME: using io_remap_pfn_range on the dma address returned - * by dma_alloc_coherent is totally wrong. - */ - context->tptr_mmap_entry = - hns_roce_user_mmap_entry_insert(uctx, hr_dev->tptr_dma_addr, - hr_dev->tptr_size, - HNS_ROCE_MMAP_TYPE_TPTR); - if (!context->tptr_mmap_entry) { - ret = -ENOMEM; - goto err; - } - return 0; - -err: - hns_roce_dealloc_uar_entry(context); - return ret; } static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx, @@ -436,10 +422,15 @@ static int hns_roce_mmap(struct ib_ucontext *uctx, struct vm_area_struct *vma) entry = to_hns_mmap(rdma_entry); pfn = entry->address >> PAGE_SHIFT; - prot = vma->vm_page_prot; - if (entry->mmap_type != HNS_ROCE_MMAP_TYPE_TPTR) - prot = pgprot_noncached(prot); + switch (entry->mmap_type) { + case HNS_ROCE_MMAP_TYPE_DB: + case HNS_ROCE_MMAP_TYPE_DWQE: + prot = pgprot_device(vma->vm_page_prot); + break; + default: + return -EINVAL; + } ret = rdma_user_mmap_io(uctx, vma, pfn, rdma_entry->npages * PAGE_SIZE, prot, rdma_entry); @@ -524,7 +515,6 @@ static const struct ib_device_ops hns_roce_dev_ops = { .destroy_ah = hns_roce_destroy_ah, .destroy_cq = hns_roce_destroy_cq, .disassociate_ucontext = hns_roce_disassociate_ucontext, - .fill_res_cq_entry = hns_roce_fill_res_cq_entry, .get_dma_mr = hns_roce_get_dma_mr, .get_link_layer = hns_roce_get_link_layer, .get_port_immutable = hns_roce_port_immutable, @@ -575,6 +565,15 @@ static const struct ib_device_ops hns_roce_dev_xrcd_ops = { INIT_RDMA_OBJ_SIZE(ib_xrcd, hns_roce_xrcd, ibxrcd), }; +static const struct ib_device_ops hns_roce_dev_restrack_ops = { + .fill_res_cq_entry = hns_roce_fill_res_cq_entry, + .fill_res_cq_entry_raw = hns_roce_fill_res_cq_entry_raw, + .fill_res_qp_entry = hns_roce_fill_res_qp_entry, + .fill_res_qp_entry_raw = hns_roce_fill_res_qp_entry_raw, + .fill_res_mr_entry = hns_roce_fill_res_mr_entry, + .fill_res_mr_entry_raw = hns_roce_fill_res_mr_entry_raw, +}; + static int hns_roce_register_device(struct hns_roce_dev *hr_dev) { int ret; @@ -614,6 +613,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_ops); ib_set_device_ops(ib_dev, &hns_roce_dev_ops); + ib_set_device_ops(ib_dev, &hns_roce_dev_restrack_ops); for (i = 0; i < hr_dev->caps.num_ports; i++) { if (!hr_dev->iboe.netdevs[i]) continue; @@ -659,17 +659,17 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev) ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table, HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz, - hr_dev->caps.num_mtpts, 1); + hr_dev->caps.num_mtpts); if (ret) { - dev_err(dev, "Failed to init MTPT context memory, aborting.\n"); + dev_err(dev, "failed to init MTPT context memory, aborting.\n"); return ret; } ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.qp_table, HEM_TYPE_QPC, hr_dev->caps.qpc_sz, - hr_dev->caps.num_qps, 1); + hr_dev->caps.num_qps); if (ret) { - dev_err(dev, "Failed to init QP context memory, aborting.\n"); + dev_err(dev, "failed to init QP context memory, aborting.\n"); goto err_unmap_dmpt; } @@ -677,9 +677,9 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev) HEM_TYPE_IRRL, hr_dev->caps.irrl_entry_sz * hr_dev->caps.max_qp_init_rdma, - hr_dev->caps.num_qps, 1); + hr_dev->caps.num_qps); if (ret) { - dev_err(dev, "Failed to init irrl_table memory, aborting.\n"); + dev_err(dev, "failed to init irrl_table memory, aborting.\n"); goto err_unmap_qp; } @@ -689,19 +689,19 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev) HEM_TYPE_TRRL, hr_dev->caps.trrl_entry_sz * hr_dev->caps.max_qp_dest_rdma, - hr_dev->caps.num_qps, 1); + hr_dev->caps.num_qps); if (ret) { dev_err(dev, - "Failed to init trrl_table memory, aborting.\n"); + "failed to init trrl_table memory, aborting.\n"); goto err_unmap_irrl; } } ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cq_table.table, HEM_TYPE_CQC, hr_dev->caps.cqc_entry_sz, - hr_dev->caps.num_cqs, 1); + hr_dev->caps.num_cqs); if (ret) { - dev_err(dev, "Failed to init CQ context memory, aborting.\n"); + dev_err(dev, "failed to init CQ context memory, aborting.\n"); goto err_unmap_trrl; } @@ -709,10 +709,10 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev) ret = hns_roce_init_hem_table(hr_dev, &hr_dev->srq_table.table, HEM_TYPE_SRQC, hr_dev->caps.srqc_entry_sz, - hr_dev->caps.num_srqs, 1); + hr_dev->caps.num_srqs); if (ret) { dev_err(dev, - "Failed to init SRQ context memory, aborting.\n"); + "failed to init SRQ context memory, aborting.\n"); goto err_unmap_cq; } } @@ -722,10 +722,10 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev) &hr_dev->qp_table.sccc_table, HEM_TYPE_SCCC, hr_dev->caps.sccc_sz, - hr_dev->caps.num_qps, 1); + hr_dev->caps.num_qps); if (ret) { dev_err(dev, - "Failed to init SCC context memory, aborting.\n"); + "failed to init SCC context memory, aborting.\n"); goto err_unmap_srq; } } @@ -734,10 +734,10 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev) ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qpc_timer_table, HEM_TYPE_QPC_TIMER, hr_dev->caps.qpc_timer_entry_sz, - hr_dev->caps.num_qpc_timer, 1); + hr_dev->caps.qpc_timer_bt_num); if (ret) { dev_err(dev, - "Failed to init QPC timer memory, aborting.\n"); + "failed to init QPC timer memory, aborting.\n"); goto err_unmap_ctx; } } @@ -746,10 +746,10 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev) ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cqc_timer_table, HEM_TYPE_CQC_TIMER, hr_dev->caps.cqc_timer_entry_sz, - hr_dev->caps.num_cqc_timer, 1); + hr_dev->caps.cqc_timer_bt_num); if (ret) { dev_err(dev, - "Failed to init CQC timer memory, aborting.\n"); + "failed to init CQC timer memory, aborting.\n"); goto err_unmap_qpc_timer; } } @@ -758,7 +758,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev) ret = hns_roce_init_hem_table(hr_dev, &hr_dev->gmv_table, HEM_TYPE_GMV, hr_dev->caps.gmv_entry_sz, - hr_dev->caps.gmv_entry_num, 1); + hr_dev->caps.gmv_entry_num); if (ret) { dev_err(dev, "failed to init gmv table memory, ret = %d\n", @@ -816,7 +816,6 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev) int ret; spin_lock_init(&hr_dev->sm_lock); - spin_lock_init(&hr_dev->bt_cmd_lock); if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB || hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) { @@ -828,13 +827,13 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev) ret = hns_roce_uar_alloc(hr_dev, &hr_dev->priv_uar); if (ret) { - dev_err(dev, "Failed to allocate priv_uar.\n"); + dev_err(dev, "failed to allocate priv_uar.\n"); goto err_uar_table_free; } ret = hns_roce_init_qp_table(hr_dev); if (ret) { - dev_err(dev, "Failed to init qp_table.\n"); + dev_err(dev, "failed to init qp_table.\n"); goto err_uar_table_free; } @@ -847,9 +846,8 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev) hns_roce_init_cq_table(hr_dev); - if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) { + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) hns_roce_init_srq_table(hr_dev); - } return 0; @@ -907,26 +905,19 @@ int hns_roce_init(struct hns_roce_dev *hr_dev) struct device *dev = hr_dev->dev; int ret; - if (hr_dev->hw->reset) { - ret = hr_dev->hw->reset(hr_dev, true); - if (ret) { - dev_err(dev, "Reset RoCE engine failed!\n"); - return ret; - } - } hr_dev->is_reset = false; if (hr_dev->hw->cmq_init) { ret = hr_dev->hw->cmq_init(hr_dev); if (ret) { - dev_err(dev, "Init RoCE Command Queue failed!\n"); - goto error_failed_cmq_init; + dev_err(dev, "init RoCE Command Queue failed!\n"); + return ret; } } ret = hr_dev->hw->hw_profile(hr_dev); if (ret) { - dev_err(dev, "Get RoCE engine profile failed!\n"); + dev_err(dev, "get RoCE engine profile failed!\n"); goto error_failed_cmd_init; } @@ -1003,12 +994,6 @@ error_failed_cmd_init: if (hr_dev->hw->cmq_exit) hr_dev->hw->cmq_exit(hr_dev); -error_failed_cmq_init: - if (hr_dev->hw->reset) { - if (hr_dev->hw->reset(hr_dev, false)) - dev_err(dev, "Dereset RoCE engine failed!\n"); - } - return ret; } @@ -1028,8 +1013,6 @@ void hns_roce_exit(struct hns_roce_dev *hr_dev) hns_roce_cmd_cleanup(hr_dev); if (hr_dev->hw->cmq_exit) hr_dev->hw->cmq_exit(hr_dev); - if (hr_dev->hw->reset) - hr_dev->hw->reset(hr_dev, false); } MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 7089ac780291..845ac7d3831f 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -31,7 +31,6 @@ * SOFTWARE. */ -#include <linux/platform_device.h> #include <linux/vmalloc.h> #include <rdma/ib_umem.h> #include "hns_roce_device.h" @@ -48,24 +47,6 @@ unsigned long key_to_hw_index(u32 key) return (key << 24) | (key >> 8); } -static int hns_roce_hw_create_mpt(struct hns_roce_dev *hr_dev, - struct hns_roce_cmd_mailbox *mailbox, - unsigned long mpt_index) -{ - return hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, mpt_index, 0, - HNS_ROCE_CMD_CREATE_MPT, - HNS_ROCE_CMD_TIMEOUT_MSECS); -} - -int hns_roce_hw_destroy_mpt(struct hns_roce_dev *hr_dev, - struct hns_roce_cmd_mailbox *mailbox, - unsigned long mpt_index) -{ - return hns_roce_cmd_mbox(hr_dev, 0, mailbox ? mailbox->dma : 0, - mpt_index, !mailbox, HNS_ROCE_CMD_DESTROY_MPT, - HNS_ROCE_CMD_TIMEOUT_MSECS); -} - static int alloc_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr) { struct hns_roce_ida *mtpt_ida = &hr_dev->mr_table.mtpt_ida; @@ -81,7 +62,7 @@ static int alloc_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr) return -ENOMEM; } - mr->key = hw_index_to_key(id); /* MR key */ + mr->key = hw_index_to_key(id); /* MR key */ err = hns_roce_table_get(hr_dev, &hr_dev->mr_table.mtpt_table, (unsigned long)id); @@ -138,14 +119,13 @@ static void free_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr) hns_roce_mtr_destroy(hr_dev, &mr->pbl_mtr); } -static void hns_roce_mr_free(struct hns_roce_dev *hr_dev, - struct hns_roce_mr *mr) +static void hns_roce_mr_free(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr) { struct ib_device *ibdev = &hr_dev->ib_dev; int ret; if (mr->enabled) { - ret = hns_roce_hw_destroy_mpt(hr_dev, NULL, + ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_MPT, key_to_hw_index(mr->key) & (hr_dev->caps.num_mtpts - 1)); if (ret) @@ -167,14 +147,11 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev, /* Allocate mailbox memory */ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); - if (IS_ERR(mailbox)) { - ret = PTR_ERR(mailbox); - return ret; - } + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); if (mr->type != MR_TYPE_FRMR) - ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr, - mtpt_idx); + ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr); else ret = hr_dev->hw->frmr_write_mtpt(hr_dev, mailbox->buf, mr); if (ret) { @@ -182,7 +159,7 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev, goto err_page; } - ret = hns_roce_hw_create_mpt(hr_dev, mailbox, + ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_MPT, mtpt_idx & (hr_dev->caps.num_mtpts - 1)); if (ret) { dev_err(dev, "failed to create mpt, ret = %d.\n", ret); @@ -213,7 +190,7 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc) int ret; mr = kzalloc(sizeof(*mr), GFP_KERNEL); - if (mr == NULL) + if (!mr) return ERR_PTR(-ENOMEM); mr->type = MR_TYPE_DMA; @@ -272,7 +249,6 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, goto err_alloc_pbl; mr->ibmr.rkey = mr->ibmr.lkey = mr->key; - mr->ibmr.length = length; return &mr->ibmr; @@ -305,13 +281,14 @@ struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, return ERR_CAST(mailbox); mtpt_idx = key_to_hw_index(mr->key) & (hr_dev->caps.num_mtpts - 1); - ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, mtpt_idx, 0, - HNS_ROCE_CMD_QUERY_MPT, - HNS_ROCE_CMD_TIMEOUT_MSECS); + + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_MPT, + mtpt_idx); if (ret) goto free_cmd_mbox; - ret = hns_roce_hw_destroy_mpt(hr_dev, NULL, mtpt_idx); + ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_MPT, + mtpt_idx); if (ret) ibdev_warn(ib_dev, "failed to destroy MPT, ret = %d.\n", ret); @@ -341,7 +318,8 @@ struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, goto free_cmd_mbox; } - ret = hns_roce_hw_create_mpt(hr_dev, mailbox, mtpt_idx); + ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_MPT, + mtpt_idx); if (ret) { ibdev_err(ib_dev, "failed to create MPT, ret = %d.\n", ret); goto free_cmd_mbox; @@ -361,16 +339,14 @@ int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) { struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device); struct hns_roce_mr *mr = to_hr_mr(ibmr); - int ret = 0; - if (hr_dev->hw->dereg_mr) { - ret = hr_dev->hw->dereg_mr(hr_dev, mr, udata); - } else { - hns_roce_mr_free(hr_dev, mr); - kfree(mr); - } + if (hr_dev->hw->dereg_mr) + hr_dev->hw->dereg_mr(hr_dev); - return ret; + hns_roce_mr_free(hr_dev, mr); + kfree(mr); + + return 0; } struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, @@ -486,7 +462,7 @@ static void hns_roce_mw_free(struct hns_roce_dev *hr_dev, int ret; if (mw->enabled) { - ret = hns_roce_hw_destroy_mpt(hr_dev, NULL, + ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_MPT, key_to_hw_index(mw->rkey) & (hr_dev->caps.num_mtpts - 1)); if (ret) @@ -526,7 +502,7 @@ static int hns_roce_mw_enable(struct hns_roce_dev *hr_dev, goto err_page; } - ret = hns_roce_hw_create_mpt(hr_dev, mailbox, + ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_MPT, mtpt_idx & (hr_dev->caps.num_mtpts - 1)); if (ret) { dev_err(dev, "MW CREATE_MPT failed (%d)\n", ret); @@ -609,15 +585,12 @@ static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, while (offset < end && npage < max_count) { count = 0; mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list, - offset, &count, NULL); + offset, &count); if (!mtts) return -ENOBUFS; for (i = 0; i < count && npage < max_count; i++) { - if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) - addr = to_hr_hw_page_addr(pages[npage]); - else - addr = pages[npage]; + addr = pages[npage]; mtts[i] = cpu_to_le64(addr); npage++; @@ -824,11 +797,11 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, } int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, - int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr) + u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr) { struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg; int mtt_count, left; - int start_index; + u32 start_index; int total = 0; __le64 *mtts; u32 npage; @@ -847,10 +820,7 @@ int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, continue; addr = cfg->root_ba + (npage << HNS_HW_PAGE_SHIFT); - if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) - mtt_buf[total] = to_hr_hw_page_addr(addr); - else - mtt_buf[total] = addr; + mtt_buf[total] = addr; total++; } @@ -864,7 +834,7 @@ int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, mtt_count = 0; mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list, start_index + total, - &mtt_count, NULL); + &mtt_count); if (!mtts || !mtt_count) goto done; @@ -884,10 +854,10 @@ done: static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev, struct hns_roce_buf_attr *attr, struct hns_roce_hem_cfg *cfg, - unsigned int *buf_page_shift, int unalinged_size) + unsigned int *buf_page_shift, u64 unalinged_size) { struct hns_roce_buf_region *r; - int first_region_padding; + u64 first_region_padding; int page_cnt, region_cnt; unsigned int page_shift; size_t buf_size; diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c index 81ffad77ae42..783e71852c50 100644 --- a/drivers/infiniband/hw/hns/hns_roce_pd.c +++ b/drivers/infiniband/hw/hns/hns_roce_pd.c @@ -30,7 +30,6 @@ * SOFTWARE. */ -#include <linux/platform_device.h> #include <linux/pci.h> #include "hns_roce_device.h" @@ -86,7 +85,6 @@ int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar) { struct hns_roce_ida *uar_ida = &hr_dev->uar_ida; - struct resource *res; int id; /* Using bitmap to manager UAR index */ @@ -104,18 +102,9 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar) else uar->index = 0; - if (!dev_is_pci(hr_dev->dev)) { - res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0); - if (!res) { - ida_free(&uar_ida->ida, id); - dev_err(&hr_dev->pdev->dev, "memory resource not found!\n"); - return -EINVAL; - } - uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index; - } else { - uar->pfn = ((pci_resource_start(hr_dev->pci_dev, 2)) - >> PAGE_SHIFT); - } + uar->pfn = ((pci_resource_start(hr_dev->pci_dev, 2)) >> PAGE_SHIFT); + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_DIRECT_WQE) + hr_dev->dwqe_page = pci_resource_start(hr_dev->pci_dev, 4); return 0; } diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 9af4509894e6..f0bd82a18069 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -32,7 +32,6 @@ */ #include <linux/pci.h> -#include <linux/platform_device.h> #include <rdma/ib_addr.h> #include <rdma/ib_umem.h> #include <rdma/uverbs_ioctl.h> @@ -57,7 +56,7 @@ static void flush_work_handle(struct work_struct *work) if (test_and_clear_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag)) { ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL); if (ret) - dev_err(dev, "Modify QP to error state failed(%d) during CQE flush\n", + dev_err(dev, "modify QP to error state failed(%d) during CQE flush\n", ret); } @@ -106,16 +105,15 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) xa_unlock(&hr_dev->qp_table_xa); if (!qp) { - dev_warn(dev, "Async event for bogus QP %08x\n", qpn); + dev_warn(dev, "async event for bogus QP %08x\n", qpn); return; } - if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 && - (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR || - event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR || - event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR || - event_type == HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION || - event_type == HNS_ROCE_EVENT_TYPE_INVALID_XRCETH)) { + if (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR || + event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR || + event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR || + event_type == HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION || + event_type == HNS_ROCE_EVENT_TYPE_INVALID_XRCETH) { qp->state = IB_QPS_ERR; flush_cqe(hr_dev, qp); @@ -219,14 +217,7 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) int ret; if (hr_qp->ibqp.qp_type == IB_QPT_GSI) { - /* when hw version is v1, the sqpn is allocated */ - if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) - num = HNS_ROCE_MAX_PORTS + - hr_dev->iboe.phy_port[hr_qp->port]; - else - num = 1; - - hr_qp->doorbell_qpn = 1; + num = 1; } else { mutex_lock(&qp_table->bank_mutex); bankid = get_least_load_bankid_for_qp(qp_table->bank); @@ -242,8 +233,6 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) qp_table->bank[bankid].inuse++; mutex_unlock(&qp_table->bank_mutex); - - hr_qp->doorbell_qpn = (u32)num; } hr_qp->qpn = num; @@ -251,26 +240,6 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) return 0; } -enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state) -{ - switch (state) { - case IB_QPS_RESET: - return HNS_ROCE_QP_STATE_RST; - case IB_QPS_INIT: - return HNS_ROCE_QP_STATE_INIT; - case IB_QPS_RTR: - return HNS_ROCE_QP_STATE_RTR; - case IB_QPS_RTS: - return HNS_ROCE_QP_STATE_RTS; - case IB_QPS_SQD: - return HNS_ROCE_QP_STATE_SQD; - case IB_QPS_ERR: - return HNS_ROCE_QP_STATE_ERR; - default: - return HNS_ROCE_QP_NUM_STATE; - } -} - static void add_qp_to_list(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, struct ib_cq *send_cq, struct ib_cq *recv_cq) @@ -306,7 +275,7 @@ static int hns_roce_qp_store(struct hns_roce_dev *hr_dev, ret = xa_err(xa_store_irq(xa, hr_qp->qpn, hr_qp, GFP_KERNEL)); if (ret) - dev_err(hr_dev->dev, "Failed to xa store for QPC\n"); + dev_err(hr_dev->dev, "failed to xa store for QPC\n"); else /* add QP to device's QP list for softwc */ add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq, @@ -324,22 +293,17 @@ static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) if (!hr_qp->qpn) return -EINVAL; - /* In v1 engine, GSI QP context is saved in the RoCE hw's register */ - if (hr_qp->ibqp.qp_type == IB_QPT_GSI && - hr_dev->hw_rev == HNS_ROCE_HW_VER1) - return 0; - /* Alloc memory for QPC */ ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn); if (ret) { - dev_err(dev, "Failed to get QPC table\n"); + dev_err(dev, "failed to get QPC table\n"); goto err_out; } /* Alloc memory for IRRL */ ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn); if (ret) { - dev_err(dev, "Failed to get IRRL table\n"); + dev_err(dev, "failed to get IRRL table\n"); goto err_put_qp; } @@ -348,7 +312,7 @@ static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table, hr_qp->qpn); if (ret) { - dev_err(dev, "Failed to get TRRL table\n"); + dev_err(dev, "failed to get TRRL table\n"); goto err_put_irrl; } } @@ -358,7 +322,7 @@ static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table, hr_qp->qpn); if (ret) { - dev_err(dev, "Failed to get SCC CTX table\n"); + dev_err(dev, "failed to get SCC CTX table\n"); goto err_put_trrl; } } @@ -379,6 +343,11 @@ err_out: return ret; } +static void qp_user_mmap_entry_remove(struct hns_roce_qp *hr_qp) +{ + rdma_user_mmap_entry_remove(&hr_qp->dwqe_mmap_entry->rdma_entry); +} + void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) { struct xarray *xa = &hr_dev->qp_table_xa; @@ -402,11 +371,6 @@ static void free_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) { struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; - /* In v1 engine, GSI QP context is saved in the RoCE hw's register */ - if (hr_qp->ibqp.qp_type == IB_QPT_GSI && - hr_dev->hw_rev == HNS_ROCE_HW_VER1) - return; - if (hr_dev->caps.trrl_entry_sz) hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn); hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); @@ -495,11 +459,8 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap, hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) + hr_qp->rq.rsv_sge); - if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE) - hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz); - else - hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz * - hr_qp->rq.max_gs); + hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz * + hr_qp->rq.max_gs); hr_qp->rq.wqe_cnt = cnt; if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE && @@ -535,11 +496,6 @@ static void set_ext_sge_param(struct hns_roce_dev *hr_dev, u32 sq_wqe_cnt, hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT; - if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) { - hr_qp->sq.max_gs = HNS_ROCE_SGE_IN_WQE; - return; - } - hr_qp->sq.max_gs = max(1U, cap->max_send_sge); wqe_sge_cnt = get_wqe_ext_sge_cnt(hr_qp); @@ -780,7 +736,11 @@ static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, goto err_inline; } + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_DIRECT_WQE) + hr_qp->en_flags |= HNS_ROCE_QP_CAP_DIRECT_WQE; + return 0; + err_inline: free_rq_inline_buf(hr_qp); @@ -822,6 +782,35 @@ static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev, hns_roce_qp_has_rq(init_attr)); } +static int qp_mmap_entry(struct hns_roce_qp *hr_qp, + struct hns_roce_dev *hr_dev, + struct ib_udata *udata, + struct hns_roce_ib_create_qp_resp *resp) +{ + struct hns_roce_ucontext *uctx = + rdma_udata_to_drv_context(udata, + struct hns_roce_ucontext, ibucontext); + struct rdma_user_mmap_entry *rdma_entry; + u64 address; + + address = hr_dev->dwqe_page + hr_qp->qpn * HNS_ROCE_DWQE_SIZE; + + hr_qp->dwqe_mmap_entry = + hns_roce_user_mmap_entry_insert(&uctx->ibucontext, address, + HNS_ROCE_DWQE_SIZE, + HNS_ROCE_MMAP_TYPE_DWQE); + + if (!hr_qp->dwqe_mmap_entry) { + ibdev_err(&hr_dev->ib_dev, "failed to get dwqe mmap entry.\n"); + return -ENOMEM; + } + + rdma_entry = &hr_qp->dwqe_mmap_entry->rdma_entry; + resp->dwqe_mmap_key = rdma_user_mmap_get_offset(rdma_entry); + + return 0; +} + static int alloc_user_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, struct ib_qp_init_attr *init_attr, @@ -909,10 +898,16 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, hr_qp->en_flags |= HNS_ROCE_QP_CAP_OWNER_DB; if (udata) { + if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) { + ret = qp_mmap_entry(hr_qp, hr_dev, udata, resp); + if (ret) + return ret; + } + ret = alloc_user_qp_db(hr_dev, hr_qp, init_attr, udata, ucmd, resp); if (ret) - return ret; + goto err_remove_qp; } else { ret = alloc_kernel_qp_db(hr_dev, hr_qp, init_attr); if (ret) @@ -920,6 +915,12 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, } return 0; + +err_remove_qp: + if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) + qp_user_mmap_entry_remove(hr_qp); + + return ret; } static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, @@ -933,6 +934,8 @@ static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, hns_roce_db_unmap_user(uctx, &hr_qp->rdb); if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) hns_roce_db_unmap_user(uctx, &hr_qp->sdb); + if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) + qp_user_mmap_entry_remove(hr_qp); } else { if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) hns_roce_free_db(hr_dev, &hr_qp->rdb); @@ -1158,7 +1161,7 @@ static int check_qp_type(struct hns_roce_dev *hr_dev, enum ib_qp_type type, goto out; break; case IB_QPT_UD: - if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 && + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && is_user) goto out; break; @@ -1200,7 +1203,7 @@ int hns_roce_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr, ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, hr_qp); if (ret) - ibdev_err(ibdev, "Create QP type 0x%x failed(%d)\n", + ibdev_err(ibdev, "create QP type 0x%x failed(%d)\n", init_attr->qp_type, ret); return ret; @@ -1391,7 +1394,7 @@ void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, } } -static inline void *get_wqe(struct hns_roce_qp *hr_qp, int offset) +static inline void *get_wqe(struct hns_roce_qp *hr_qp, u32 offset) { return hns_roce_buf_offset(hr_qp->mtr.kmem, offset); } diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c index 259444c0a630..989a2af2e938 100644 --- a/drivers/infiniband/hw/hns/hns_roce_restrack.c +++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c @@ -9,112 +9,223 @@ #include "hns_roce_device.h" #include "hns_roce_hw_v2.h" -static int hns_roce_fill_cq(struct sk_buff *msg, - struct hns_roce_v2_cq_context *context) +#define MAX_ENTRY_NUM 256 + +int hns_roce_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq) { - if (rdma_nl_put_driver_u32(msg, "state", - roce_get_field(context->byte_4_pg_ceqn, - V2_CQC_BYTE_4_ARM_ST_M, - V2_CQC_BYTE_4_ARM_ST_S))) + struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); + struct nlattr *table_attr; + + table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER); + if (!table_attr) + return -EMSGSIZE; + + if (rdma_nl_put_driver_u32(msg, "cq_depth", hr_cq->cq_depth)) goto err; - if (rdma_nl_put_driver_u32(msg, "ceqn", - roce_get_field(context->byte_4_pg_ceqn, - V2_CQC_BYTE_4_CEQN_M, - V2_CQC_BYTE_4_CEQN_S))) + if (rdma_nl_put_driver_u32(msg, "cons_index", hr_cq->cons_index)) goto err; - if (rdma_nl_put_driver_u32(msg, "cqn", - roce_get_field(context->byte_8_cqn, - V2_CQC_BYTE_8_CQN_M, - V2_CQC_BYTE_8_CQN_S))) + if (rdma_nl_put_driver_u32(msg, "cqe_size", hr_cq->cqe_size)) goto err; - if (rdma_nl_put_driver_u32(msg, "hopnum", - roce_get_field(context->byte_16_hop_addr, - V2_CQC_BYTE_16_CQE_HOP_NUM_M, - V2_CQC_BYTE_16_CQE_HOP_NUM_S))) + if (rdma_nl_put_driver_u32(msg, "arm_sn", hr_cq->arm_sn)) goto err; - if (rdma_nl_put_driver_u32( - msg, "pi", - roce_get_field(context->byte_28_cq_pi, - V2_CQC_BYTE_28_CQ_PRODUCER_IDX_M, - V2_CQC_BYTE_28_CQ_PRODUCER_IDX_S))) + nla_nest_end(msg, table_attr); + + return 0; + +err: + nla_nest_cancel(msg, table_attr); + + return -EMSGSIZE; +} + +int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device); + struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); + struct hns_roce_v2_cq_context context; + u32 data[MAX_ENTRY_NUM] = {}; + int offset = 0; + int ret; + + if (!hr_dev->hw->query_cqc) + return -EINVAL; + + ret = hr_dev->hw->query_cqc(hr_dev, hr_cq->cqn, &context); + if (ret) + return -EINVAL; + + data[offset++] = hr_reg_read(&context, CQC_CQ_ST); + data[offset++] = hr_reg_read(&context, CQC_SHIFT); + data[offset++] = hr_reg_read(&context, CQC_CQE_SIZE); + data[offset++] = hr_reg_read(&context, CQC_CQE_CNT); + data[offset++] = hr_reg_read(&context, CQC_CQ_PRODUCER_IDX); + data[offset++] = hr_reg_read(&context, CQC_CQ_CONSUMER_IDX); + data[offset++] = hr_reg_read(&context, CQC_DB_RECORD_EN); + data[offset++] = hr_reg_read(&context, CQC_ARM_ST); + data[offset++] = hr_reg_read(&context, CQC_CMD_SN); + data[offset++] = hr_reg_read(&context, CQC_CEQN); + data[offset++] = hr_reg_read(&context, CQC_CQ_MAX_CNT); + data[offset++] = hr_reg_read(&context, CQC_CQ_PERIOD); + data[offset++] = hr_reg_read(&context, CQC_CQE_HOP_NUM); + data[offset++] = hr_reg_read(&context, CQC_CQE_BAR_PG_SZ); + data[offset++] = hr_reg_read(&context, CQC_CQE_BUF_PG_SZ); + + ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, offset * sizeof(u32), data); + + return ret; +} + +int hns_roce_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp) +{ + struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp); + struct nlattr *table_attr; + + table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER); + if (!table_attr) + return -EMSGSIZE; + + if (rdma_nl_put_driver_u32_hex(msg, "sq_wqe_cnt", hr_qp->sq.wqe_cnt)) goto err; - if (rdma_nl_put_driver_u32( - msg, "ci", - roce_get_field(context->byte_32_cq_ci, - V2_CQC_BYTE_32_CQ_CONSUMER_IDX_M, - V2_CQC_BYTE_32_CQ_CONSUMER_IDX_S))) + if (rdma_nl_put_driver_u32_hex(msg, "sq_max_gs", hr_qp->sq.max_gs)) goto err; - if (rdma_nl_put_driver_u32( - msg, "coalesce", - roce_get_field(context->byte_56_cqe_period_maxcnt, - V2_CQC_BYTE_56_CQ_MAX_CNT_M, - V2_CQC_BYTE_56_CQ_MAX_CNT_S))) + if (rdma_nl_put_driver_u32_hex(msg, "rq_wqe_cnt", hr_qp->rq.wqe_cnt)) goto err; - if (rdma_nl_put_driver_u32( - msg, "period", - roce_get_field(context->byte_56_cqe_period_maxcnt, - V2_CQC_BYTE_56_CQ_PERIOD_M, - V2_CQC_BYTE_56_CQ_PERIOD_S))) + if (rdma_nl_put_driver_u32_hex(msg, "rq_max_gs", hr_qp->rq.max_gs)) goto err; - if (rdma_nl_put_driver_u32(msg, "cnt", - roce_get_field(context->byte_52_cqe_cnt, - V2_CQC_BYTE_52_CQE_CNT_M, - V2_CQC_BYTE_52_CQE_CNT_S))) + if (rdma_nl_put_driver_u32_hex(msg, "ext_sge_sge_cnt", hr_qp->sge.sge_cnt)) goto err; + nla_nest_end(msg, table_attr); + return 0; err: + nla_nest_cancel(msg, table_attr); + return -EMSGSIZE; } -int hns_roce_fill_res_cq_entry(struct sk_buff *msg, - struct ib_cq *ib_cq) +int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp) { - struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device); - struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); - struct hns_roce_v2_cq_context *context; - struct nlattr *table_attr; + struct hns_roce_dev *hr_dev = to_hr_dev(ib_qp->device); + struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp); + struct hns_roce_v2_qp_context context; + u32 data[MAX_ENTRY_NUM] = {}; + int offset = 0; int ret; - if (!hr_dev->dfx->query_cqc_info) + if (!hr_dev->hw->query_qpc) return -EINVAL; - context = kzalloc(sizeof(struct hns_roce_v2_cq_context), GFP_KERNEL); - if (!context) - return -ENOMEM; - - ret = hr_dev->dfx->query_cqc_info(hr_dev, hr_cq->cqn, (int *)context); + ret = hr_dev->hw->query_qpc(hr_dev, hr_qp->qpn, &context); if (ret) - goto err; + return -EINVAL; + + data[offset++] = hr_reg_read(&context, QPC_QP_ST); + data[offset++] = hr_reg_read(&context, QPC_ERR_TYPE); + data[offset++] = hr_reg_read(&context, QPC_CHECK_FLG); + data[offset++] = hr_reg_read(&context, QPC_SRQ_EN); + data[offset++] = hr_reg_read(&context, QPC_SRQN); + data[offset++] = hr_reg_read(&context, QPC_QKEY_XRCD); + data[offset++] = hr_reg_read(&context, QPC_TX_CQN); + data[offset++] = hr_reg_read(&context, QPC_RX_CQN); + data[offset++] = hr_reg_read(&context, QPC_SQ_PRODUCER_IDX); + data[offset++] = hr_reg_read(&context, QPC_SQ_CONSUMER_IDX); + data[offset++] = hr_reg_read(&context, QPC_RQ_RECORD_EN); + data[offset++] = hr_reg_read(&context, QPC_RQ_PRODUCER_IDX); + data[offset++] = hr_reg_read(&context, QPC_RQ_CONSUMER_IDX); + data[offset++] = hr_reg_read(&context, QPC_SQ_SHIFT); + data[offset++] = hr_reg_read(&context, QPC_RQWS); + data[offset++] = hr_reg_read(&context, QPC_RQ_SHIFT); + data[offset++] = hr_reg_read(&context, QPC_SGE_SHIFT); + data[offset++] = hr_reg_read(&context, QPC_SQ_HOP_NUM); + data[offset++] = hr_reg_read(&context, QPC_RQ_HOP_NUM); + data[offset++] = hr_reg_read(&context, QPC_SGE_HOP_NUM); + data[offset++] = hr_reg_read(&context, QPC_WQE_SGE_BA_PG_SZ); + data[offset++] = hr_reg_read(&context, QPC_WQE_SGE_BUF_PG_SZ); + data[offset++] = hr_reg_read(&context, QPC_RETRY_NUM_INIT); + data[offset++] = hr_reg_read(&context, QPC_RETRY_CNT); + data[offset++] = hr_reg_read(&context, QPC_SQ_CUR_PSN); + data[offset++] = hr_reg_read(&context, QPC_SQ_MAX_PSN); + data[offset++] = hr_reg_read(&context, QPC_SQ_FLUSH_IDX); + data[offset++] = hr_reg_read(&context, QPC_SQ_MAX_IDX); + data[offset++] = hr_reg_read(&context, QPC_SQ_TX_ERR); + data[offset++] = hr_reg_read(&context, QPC_SQ_RX_ERR); + data[offset++] = hr_reg_read(&context, QPC_RQ_RX_ERR); + data[offset++] = hr_reg_read(&context, QPC_RQ_TX_ERR); + data[offset++] = hr_reg_read(&context, QPC_RQ_CQE_IDX); + data[offset++] = hr_reg_read(&context, QPC_RQ_RTY_TX_ERR); + + ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, offset * sizeof(u32), data); + + return ret; +} + +int hns_roce_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr) +{ + struct hns_roce_mr *hr_mr = to_hr_mr(ib_mr); + struct nlattr *table_attr; table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER); - if (!table_attr) { - ret = -EMSGSIZE; + if (!table_attr) + return -EMSGSIZE; + + if (rdma_nl_put_driver_u32_hex(msg, "pbl_hop_num", hr_mr->pbl_hop_num)) + goto err; + + if (rdma_nl_put_driver_u32_hex(msg, "ba_pg_shift", + hr_mr->pbl_mtr.hem_cfg.ba_pg_shift)) goto err; - } - if (hns_roce_fill_cq(msg, context)) { - ret = -EMSGSIZE; - goto err_cancel_table; - } + if (rdma_nl_put_driver_u32_hex(msg, "buf_pg_shift", + hr_mr->pbl_mtr.hem_cfg.buf_pg_shift)) + goto err; nla_nest_end(msg, table_attr); - kfree(context); return 0; -err_cancel_table: - nla_nest_cancel(msg, table_attr); err: - kfree(context); + nla_nest_cancel(msg, table_attr); + + return -EMSGSIZE; +} + +int hns_roce_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(ib_mr->device); + struct hns_roce_mr *hr_mr = to_hr_mr(ib_mr); + struct hns_roce_v2_mpt_entry context; + u32 data[MAX_ENTRY_NUM] = {}; + int offset = 0; + int ret; + + if (!hr_dev->hw->query_mpt) + return -EINVAL; + + ret = hr_dev->hw->query_mpt(hr_dev, hr_mr->key, &context); + if (ret) + return -EINVAL; + + data[offset++] = hr_reg_read(&context, MPT_ST); + data[offset++] = hr_reg_read(&context, MPT_PD); + data[offset++] = hr_reg_read(&context, MPT_LKEY); + data[offset++] = hr_reg_read(&context, MPT_LEN_L); + data[offset++] = hr_reg_read(&context, MPT_LEN_H); + data[offset++] = hr_reg_read(&context, MPT_PBL_SIZE); + data[offset++] = hr_reg_read(&context, MPT_PBL_HOP_NUM); + data[offset++] = hr_reg_read(&context, MPT_PBL_BA_PG_SZ); + data[offset++] = hr_reg_read(&context, MPT_PBL_BUF_PG_SZ); + + ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, offset * sizeof(u32), data); + return ret; } diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c index e64ef6903fb4..8dae98f827eb 100644 --- a/drivers/infiniband/hw/hns/hns_roce_srq.c +++ b/drivers/infiniband/hw/hns/hns_roce_srq.c @@ -59,58 +59,39 @@ static void hns_roce_ib_srq_event(struct hns_roce_srq *srq, } } -static int hns_roce_hw_create_srq(struct hns_roce_dev *dev, - struct hns_roce_cmd_mailbox *mailbox, - unsigned long srq_num) +static int alloc_srqn(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) { - return hns_roce_cmd_mbox(dev, mailbox->dma, 0, srq_num, 0, - HNS_ROCE_CMD_CREATE_SRQ, - HNS_ROCE_CMD_TIMEOUT_MSECS); -} - -static int hns_roce_hw_destroy_srq(struct hns_roce_dev *dev, - struct hns_roce_cmd_mailbox *mailbox, - unsigned long srq_num) -{ - return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, srq_num, - mailbox ? 0 : 1, HNS_ROCE_CMD_DESTROY_SRQ, - HNS_ROCE_CMD_TIMEOUT_MSECS); -} - -static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) -{ - struct hns_roce_srq_table *srq_table = &hr_dev->srq_table; struct hns_roce_ida *srq_ida = &hr_dev->srq_table.srq_ida; - struct ib_device *ibdev = &hr_dev->ib_dev; - struct hns_roce_cmd_mailbox *mailbox; - int ret; int id; id = ida_alloc_range(&srq_ida->ida, srq_ida->min, srq_ida->max, GFP_KERNEL); if (id < 0) { - ibdev_err(ibdev, "failed to alloc srq(%d).\n", id); + ibdev_err(&hr_dev->ib_dev, "failed to alloc srq(%d).\n", id); return -ENOMEM; } - srq->srqn = (unsigned long)id; - ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn); - if (ret) { - ibdev_err(ibdev, "failed to get SRQC table, ret = %d.\n", ret); - goto err_out; - } + srq->srqn = id; - ret = xa_err(xa_store(&srq_table->xa, srq->srqn, srq, GFP_KERNEL)); - if (ret) { - ibdev_err(ibdev, "failed to store SRQC, ret = %d.\n", ret); - goto err_put; - } + return 0; +} + +static void free_srqn(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) +{ + ida_free(&hr_dev->srq_table.srq_ida.ida, (int)srq->srqn); +} + +static int hns_roce_create_srqc(struct hns_roce_dev *hr_dev, + struct hns_roce_srq *srq) +{ + struct ib_device *ibdev = &hr_dev->ib_dev; + struct hns_roce_cmd_mailbox *mailbox; + int ret; mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); - if (IS_ERR_OR_NULL(mailbox)) { + if (IS_ERR(mailbox)) { ibdev_err(ibdev, "failed to alloc mailbox for SRQC.\n"); - ret = -ENOMEM; - goto err_xa; + return PTR_ERR(mailbox); } ret = hr_dev->hw->write_srqc(srq, mailbox->buf); @@ -119,24 +100,44 @@ static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) goto err_mbox; } - ret = hns_roce_hw_create_srq(hr_dev, mailbox, srq->srqn); - if (ret) { + ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_SRQ, + srq->srqn); + if (ret) ibdev_err(ibdev, "failed to config SRQC, ret = %d.\n", ret); - goto err_mbox; - } +err_mbox: hns_roce_free_cmd_mailbox(hr_dev, mailbox); + return ret; +} + +static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) +{ + struct hns_roce_srq_table *srq_table = &hr_dev->srq_table; + struct ib_device *ibdev = &hr_dev->ib_dev; + int ret; + + ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn); + if (ret) { + ibdev_err(ibdev, "failed to get SRQC table, ret = %d.\n", ret); + return ret; + } + + ret = xa_err(xa_store(&srq_table->xa, srq->srqn, srq, GFP_KERNEL)); + if (ret) { + ibdev_err(ibdev, "failed to store SRQC, ret = %d.\n", ret); + goto err_put; + } + + ret = hns_roce_create_srqc(hr_dev, srq); + if (ret) + goto err_xa; return 0; -err_mbox: - hns_roce_free_cmd_mailbox(hr_dev, mailbox); err_xa: xa_erase(&srq_table->xa, srq->srqn); err_put: hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn); -err_out: - ida_free(&srq_ida->ida, id); return ret; } @@ -146,7 +147,8 @@ static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) struct hns_roce_srq_table *srq_table = &hr_dev->srq_table; int ret; - ret = hns_roce_hw_destroy_srq(hr_dev, NULL, srq->srqn); + ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_SRQ, + srq->srqn); if (ret) dev_err(hr_dev->dev, "DESTROY_SRQ failed (%d) for SRQN %06lx\n", ret, srq->srqn); @@ -158,7 +160,6 @@ static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) wait_for_completion(&srq->free); hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn); - ida_free(&srq_table->srq_ida.ida, (int)srq->srqn); } static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq, @@ -406,10 +407,14 @@ int hns_roce_create_srq(struct ib_srq *ib_srq, if (ret) return ret; - ret = alloc_srqc(hr_dev, srq); + ret = alloc_srqn(hr_dev, srq); if (ret) goto err_srq_buf; + ret = alloc_srqc(hr_dev, srq); + if (ret) + goto err_srqn; + if (udata) { resp.srqn = srq->srqn; if (ib_copy_to_udata(udata, &resp, @@ -428,6 +433,8 @@ int hns_roce_create_srq(struct ib_srq *ib_srq, err_srqc: free_srqc(hr_dev, srq); +err_srqn: + free_srqn(hr_dev, srq); err_srq_buf: free_srq_buf(hr_dev, srq); @@ -440,6 +447,7 @@ int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) struct hns_roce_srq *srq = to_hr_srq(ibsrq); free_srqc(hr_dev, srq); + free_srqn(hr_dev, srq); free_srq_buf(hr_dev, srq); return 0; } |