aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/hns/hns_roce_cq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/hns/hns_roce_cq.c')
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c300
1 files changed, 126 insertions, 174 deletions
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 22541d19cd09..af1d8823b3f0 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -39,51 +39,8 @@
#include <rdma/hns-abi.h>
#include "hns_roce_common.h"
-static void hns_roce_ib_cq_comp(struct hns_roce_cq *hr_cq)
-{
- struct ib_cq *ibcq = &hr_cq->ib_cq;
-
- ibcq->comp_handler(ibcq, ibcq->cq_context);
-}
-
-static void hns_roce_ib_cq_event(struct hns_roce_cq *hr_cq,
- enum hns_roce_event event_type)
-{
- struct hns_roce_dev *hr_dev;
- struct ib_event event;
- struct ib_cq *ibcq;
-
- ibcq = &hr_cq->ib_cq;
- hr_dev = to_hr_dev(ibcq->device);
-
- if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
- event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
- event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
- dev_err(hr_dev->dev,
- "hns_roce_ib: Unexpected event type 0x%x on CQ %06lx\n",
- event_type, hr_cq->cqn);
- return;
- }
-
- if (ibcq->event_handler) {
- event.device = ibcq->device;
- event.event = IB_EVENT_CQ_ERR;
- event.element.cq = ibcq;
- ibcq->event_handler(&event, ibcq->cq_context);
- }
-}
-
-static int hns_roce_sw2hw_cq(struct hns_roce_dev *dev,
- struct hns_roce_cmd_mailbox *mailbox,
- unsigned long cq_num)
-{
- return hns_roce_cmd_mbox(dev, mailbox->dma, 0, cq_num, 0,
- HNS_ROCE_CMD_SW2HW_CQ, HNS_ROCE_CMD_TIMEOUT_MSECS);
-}
-
-static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
- struct hns_roce_mtt *hr_mtt,
- struct hns_roce_cq *hr_cq, int vector)
+static int hns_roce_alloc_cqc(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cq *hr_cq)
{
struct hns_roce_cmd_mailbox *mailbox;
struct hns_roce_hem_table *mtt_table;
@@ -101,35 +58,32 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
else
mtt_table = &hr_dev->mr_table.mtt_table;
- mtts = hns_roce_table_find(hr_dev, mtt_table,
- hr_mtt->first_seg, &dma_handle);
- if (!mtts) {
- dev_err(dev, "CQ alloc.Failed to find cq buf addr.\n");
- return -EINVAL;
- }
+ mtts = hns_roce_table_find(hr_dev, mtt_table, hr_cq->mtt.first_seg,
+ &dma_handle);
- if (vector >= hr_dev->caps.num_comp_vectors) {
- dev_err(dev, "CQ alloc.Invalid vector.\n");
+ if (!mtts) {
+ dev_err(dev, "Failed to find mtt for CQ buf.\n");
return -EINVAL;
}
- hr_cq->vector = vector;
ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn);
- if (ret == -1) {
- dev_err(dev, "CQ alloc.Failed to alloc index.\n");
- return -ENOMEM;
+ if (ret) {
+ dev_err(dev, "Num of CQ out of range.\n");
+ return ret;
}
/* Get CQC memory HEM(Hardware Entry Memory) table */
ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn);
if (ret) {
- dev_err(dev, "CQ alloc.Failed to get context mem.\n");
+ dev_err(dev,
+ "Get context mem failed(%d) when CQ(0x%lx) alloc.\n",
+ ret, hr_cq->cqn);
goto err_out;
}
ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
if (ret) {
- dev_err(dev, "CQ alloc failed xa_store.\n");
+ dev_err(dev, "Failed to xa_store CQ.\n");
goto err_put;
}
@@ -140,14 +94,16 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
goto err_xa;
}
- hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle,
- nent, vector);
+ hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle);
/* Send mailbox to hw */
- ret = hns_roce_sw2hw_cq(hr_dev, mailbox, hr_cq->cqn);
+ ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 0,
+ HNS_ROCE_CMD_CREATE_CQC, HNS_ROCE_CMD_TIMEOUT_MSECS);
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret) {
- dev_err(dev, "CQ alloc.Failed to cmd mailbox.\n");
+ dev_err(dev,
+ "Send cmd mailbox failed(%d) when CQ(0x%lx) alloc.\n",
+ ret, hr_cq->cqn);
goto err_xa;
}
@@ -170,24 +126,17 @@ err_out:
return ret;
}
-static int hns_roce_hw2sw_cq(struct hns_roce_dev *dev,
- struct hns_roce_cmd_mailbox *mailbox,
- unsigned long cq_num)
-{
- return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, cq_num,
- mailbox ? 0 : 1, HNS_ROCE_CMD_HW2SW_CQ,
- HNS_ROCE_CMD_TIMEOUT_MSECS);
-}
-
-void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
+void hns_roce_free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
struct device *dev = hr_dev->dev;
int ret;
- ret = hns_roce_hw2sw_cq(hr_dev, NULL, hr_cq->cqn);
+ ret = hns_roce_cmd_mbox(hr_dev, 0, 0, hr_cq->cqn, 1,
+ HNS_ROCE_CMD_DESTROY_CQC,
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
if (ret)
- dev_err(dev, "HW2SW_CQ failed (%d) for CQN %06lx\n", ret,
+ dev_err(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n", ret,
hr_cq->cqn);
xa_erase(&cq_table->array, hr_cq->cqn);
@@ -204,103 +153,91 @@ void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
}
-static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
- struct ib_udata *udata,
- struct hns_roce_cq_buf *buf,
- struct ib_umem **umem, u64 buf_addr, int cqe)
+static int get_cq_umem(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
+ struct hns_roce_ib_create_cq ucmd,
+ struct ib_udata *udata)
{
- int ret;
- u32 page_shift;
+ struct hns_roce_buf *buf = &hr_cq->buf;
+ struct hns_roce_mtt *mtt = &hr_cq->mtt;
+ struct ib_umem **umem = &hr_cq->umem;
u32 npages;
+ int ret;
- *umem = ib_umem_get(udata, buf_addr, cqe * hr_dev->caps.cq_entry_sz,
- IB_ACCESS_LOCAL_WRITE, 1);
+ *umem = ib_umem_get(udata, ucmd.buf_addr, buf->size,
+ IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(*umem))
return PTR_ERR(*umem);
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
- buf->hr_mtt.mtt_type = MTT_TYPE_CQE;
+ mtt->mtt_type = MTT_TYPE_CQE;
else
- buf->hr_mtt.mtt_type = MTT_TYPE_WQE;
-
- if (hr_dev->caps.cqe_buf_pg_sz) {
- npages = (ib_umem_page_count(*umem) +
- (1 << hr_dev->caps.cqe_buf_pg_sz) - 1) /
- (1 << hr_dev->caps.cqe_buf_pg_sz);
- page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz;
- ret = hns_roce_mtt_init(hr_dev, npages, page_shift,
- &buf->hr_mtt);
- } else {
- ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(*umem),
- PAGE_SHIFT, &buf->hr_mtt);
- }
+ mtt->mtt_type = MTT_TYPE_WQE;
+
+ npages = DIV_ROUND_UP(ib_umem_page_count(*umem),
+ 1 << hr_dev->caps.cqe_buf_pg_sz);
+ ret = hns_roce_mtt_init(hr_dev, npages, buf->page_shift, mtt);
if (ret)
goto err_buf;
- ret = hns_roce_ib_umem_write_mtt(hr_dev, &buf->hr_mtt, *umem);
+ ret = hns_roce_ib_umem_write_mtt(hr_dev, mtt, *umem);
if (ret)
goto err_mtt;
return 0;
err_mtt:
- hns_roce_mtt_cleanup(hr_dev, &buf->hr_mtt);
+ hns_roce_mtt_cleanup(hr_dev, mtt);
err_buf:
ib_umem_release(*umem);
return ret;
}
-static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev,
- struct hns_roce_cq_buf *buf, u32 nent)
+static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
+ struct hns_roce_buf *buf = &hr_cq->buf;
+ struct hns_roce_mtt *mtt = &hr_cq->mtt;
int ret;
- u32 page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz;
- ret = hns_roce_buf_alloc(hr_dev, nent * hr_dev->caps.cq_entry_sz,
- (1 << page_shift) * 2, &buf->hr_buf,
- page_shift);
+ ret = hns_roce_buf_alloc(hr_dev, buf->size, (1 << buf->page_shift) * 2,
+ buf, buf->page_shift);
if (ret)
goto out;
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
- buf->hr_mtt.mtt_type = MTT_TYPE_CQE;
+ mtt->mtt_type = MTT_TYPE_CQE;
else
- buf->hr_mtt.mtt_type = MTT_TYPE_WQE;
+ mtt->mtt_type = MTT_TYPE_WQE;
- ret = hns_roce_mtt_init(hr_dev, buf->hr_buf.npages,
- buf->hr_buf.page_shift, &buf->hr_mtt);
+ ret = hns_roce_mtt_init(hr_dev, buf->npages, buf->page_shift, mtt);
if (ret)
goto err_buf;
- ret = hns_roce_buf_write_mtt(hr_dev, &buf->hr_mtt, &buf->hr_buf);
+ ret = hns_roce_buf_write_mtt(hr_dev, mtt, buf);
if (ret)
goto err_mtt;
return 0;
err_mtt:
- hns_roce_mtt_cleanup(hr_dev, &buf->hr_mtt);
+ hns_roce_mtt_cleanup(hr_dev, mtt);
err_buf:
- hns_roce_buf_free(hr_dev, nent * hr_dev->caps.cq_entry_sz,
- &buf->hr_buf);
+ hns_roce_buf_free(hr_dev, buf->size, buf);
+
out:
return ret;
}
-static void hns_roce_ib_free_cq_buf(struct hns_roce_dev *hr_dev,
- struct hns_roce_cq_buf *buf, int cqe)
+static void free_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
- hns_roce_buf_free(hr_dev, (cqe + 1) * hr_dev->caps.cq_entry_sz,
- &buf->hr_buf);
+ hns_roce_buf_free(hr_dev, hr_cq->buf.size, &hr_cq->buf);
}
static int create_user_cq(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq,
struct ib_udata *udata,
- struct hns_roce_ib_create_cq_resp *resp,
- int cq_entries)
+ struct hns_roce_ib_create_cq_resp *resp)
{
struct hns_roce_ib_create_cq ucmd;
struct device *dev = hr_dev->dev;
@@ -314,9 +251,7 @@ static int create_user_cq(struct hns_roce_dev *hr_dev,
}
/* Get user space address, write it into mtt table */
- ret = hns_roce_ib_get_cq_umem(hr_dev, udata, &hr_cq->hr_buf,
- &hr_cq->umem, ucmd.buf_addr,
- cq_entries);
+ ret = get_cq_umem(hr_dev, hr_cq, ucmd, udata);
if (ret) {
dev_err(dev, "Failed to get_cq_umem.\n");
return ret;
@@ -337,17 +272,16 @@ static int create_user_cq(struct hns_roce_dev *hr_dev,
return 0;
err_mtt:
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+ hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
ib_umem_release(hr_cq->umem);
return ret;
}
static int create_kernel_cq(struct hns_roce_dev *hr_dev,
- struct hns_roce_cq *hr_cq, int cq_entries)
+ struct hns_roce_cq *hr_cq)
{
struct device *dev = hr_dev->dev;
- struct hns_roce_uar *uar;
int ret;
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
@@ -361,15 +295,14 @@ static int create_kernel_cq(struct hns_roce_dev *hr_dev,
}
/* Init mtt table and write buff address to mtt table */
- ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf, cq_entries);
+ ret = alloc_cq_buf(hr_dev, hr_cq);
if (ret) {
dev_err(dev, "Failed to alloc_cq_buf.\n");
goto err_db;
}
- uar = &hr_dev->priv_uar;
hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset +
- DB_REG_OFFSET * uar->index;
+ DB_REG_OFFSET * hr_dev->priv_uar.index;
return 0;
@@ -392,64 +325,69 @@ static void destroy_user_cq(struct hns_roce_dev *hr_dev,
(udata->outlen >= sizeof(*resp)))
hns_roce_db_unmap_user(context, &hr_cq->db);
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+ hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
ib_umem_release(hr_cq->umem);
}
static void destroy_kernel_cq(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq)
{
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
- hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, hr_cq->ib_cq.cqe);
+ hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
+ free_cq_buf(hr_dev, hr_cq);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
hns_roce_free_db(hr_dev, &hr_cq->db);
}
-int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
- const struct ib_cq_init_attr *attr,
- struct ib_udata *udata)
+int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
+ struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
- struct device *dev = hr_dev->dev;
struct hns_roce_ib_create_cq_resp resp = {};
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
+ struct device *dev = hr_dev->dev;
int vector = attr->comp_vector;
- int cq_entries = attr->cqe;
+ u32 cq_entries = attr->cqe;
int ret;
if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
- dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n",
+ dev_err(dev, "Create CQ failed. entries=%d, max=%d\n",
cq_entries, hr_dev->caps.max_cqes);
return -EINVAL;
}
- if (hr_dev->caps.min_cqes)
- cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
+ if (vector >= hr_dev->caps.num_comp_vectors) {
+ dev_err(dev, "Create CQ failed, vector=%d, max=%d\n",
+ vector, hr_dev->caps.num_comp_vectors);
+ return -EINVAL;
+ }
- cq_entries = roundup_pow_of_two((unsigned int)cq_entries);
- hr_cq->ib_cq.cqe = cq_entries - 1;
+ cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
+ cq_entries = roundup_pow_of_two(cq_entries);
+ hr_cq->ib_cq.cqe = cq_entries - 1; /* used as cqe index */
+ hr_cq->cq_depth = cq_entries;
+ hr_cq->vector = vector;
+ hr_cq->buf.size = hr_cq->cq_depth * hr_dev->caps.cq_entry_sz;
+ hr_cq->buf.page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz;
spin_lock_init(&hr_cq->lock);
if (udata) {
- ret = create_user_cq(hr_dev, hr_cq, udata, &resp, cq_entries);
+ ret = create_user_cq(hr_dev, hr_cq, udata, &resp);
if (ret) {
dev_err(dev, "Create cq failed in user mode!\n");
goto err_cq;
}
} else {
- ret = create_kernel_cq(hr_dev, hr_cq, cq_entries);
+ ret = create_kernel_cq(hr_dev, hr_cq);
if (ret) {
dev_err(dev, "Create cq failed in kernel mode!\n");
goto err_cq;
}
}
- /* Allocate cq index, fill cq_context */
- ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt,
- hr_cq, vector);
+ ret = hns_roce_alloc_cqc(hr_dev, hr_cq);
if (ret) {
- dev_err(dev, "Creat CQ .Failed to cq_alloc.\n");
+ dev_err(dev, "Alloc CQ failed(%d).\n", ret);
goto err_dbmap;
}
@@ -462,11 +400,6 @@ int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
if (!udata && hr_cq->tptr_addr)
*hr_cq->tptr_addr = 0;
- /* Get created cq handler and carry out event */
- hr_cq->comp = hns_roce_ib_cq_comp;
- hr_cq->event = hns_roce_ib_cq_event;
- hr_cq->cq_depth = cq_entries;
-
if (udata) {
resp.cqn = hr_cq->cqn;
ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
@@ -477,7 +410,7 @@ int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
return 0;
err_cqc:
- hns_roce_free_cq(hr_dev, hr_cq);
+ hns_roce_free_cqc(hr_dev, hr_cq);
err_dbmap:
if (udata)
@@ -489,7 +422,7 @@ err_cq:
return ret;
}
-void hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
+void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
@@ -499,8 +432,8 @@ void hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
return;
}
- hns_roce_free_cq(hr_dev, hr_cq);
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+ hns_roce_free_cqc(hr_dev, hr_cq);
+ hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
ib_umem_release(hr_cq->umem);
if (udata) {
@@ -512,7 +445,7 @@ void hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
&hr_cq->db);
} else {
/* Free the buff of stored cq */
- hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, ib_cq->cqe);
+ free_cq_buf(hr_dev, hr_cq);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
hns_roce_free_db(hr_dev, &hr_cq->db);
}
@@ -520,38 +453,57 @@ void hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
{
- struct device *dev = hr_dev->dev;
- struct hns_roce_cq *cq;
+ struct hns_roce_cq *hr_cq;
+ struct ib_cq *ibcq;
- cq = xa_load(&hr_dev->cq_table.array, cqn & (hr_dev->caps.num_cqs - 1));
- if (!cq) {
- dev_warn(dev, "Completion event for bogus CQ 0x%08x\n", cqn);
+ hr_cq = xa_load(&hr_dev->cq_table.array,
+ cqn & (hr_dev->caps.num_cqs - 1));
+ if (!hr_cq) {
+ dev_warn(hr_dev->dev, "Completion event for bogus CQ 0x%06x\n",
+ cqn);
return;
}
- ++cq->arm_sn;
- cq->comp(cq);
+ ++hr_cq->arm_sn;
+ ibcq = &hr_cq->ib_cq;
+ if (ibcq->comp_handler)
+ ibcq->comp_handler(ibcq, ibcq->cq_context);
}
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
{
- struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
struct device *dev = hr_dev->dev;
- struct hns_roce_cq *cq;
+ struct hns_roce_cq *hr_cq;
+ struct ib_event event;
+ struct ib_cq *ibcq;
- cq = xa_load(&cq_table->array, cqn & (hr_dev->caps.num_cqs - 1));
- if (cq)
- atomic_inc(&cq->refcount);
+ hr_cq = xa_load(&hr_dev->cq_table.array,
+ cqn & (hr_dev->caps.num_cqs - 1));
+ if (!hr_cq) {
+ dev_warn(dev, "Async event for bogus CQ 0x%06x\n", cqn);
+ return;
+ }
- if (!cq) {
- dev_warn(dev, "Async event for bogus CQ %08x\n", cqn);
+ if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
+ event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
+ event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
+ dev_err(dev, "Unexpected event type 0x%x on CQ 0x%06x\n",
+ event_type, cqn);
return;
}
- cq->event(cq, (enum hns_roce_event)event_type);
+ atomic_inc(&hr_cq->refcount);
+
+ ibcq = &hr_cq->ib_cq;
+ if (ibcq->event_handler) {
+ event.device = ibcq->device;
+ event.element.cq = ibcq;
+ event.event = IB_EVENT_CQ_ERR;
+ ibcq->event_handler(&event, ibcq->cq_context);
+ }
- if (atomic_dec_and_test(&cq->refcount))
- complete(&cq->free);
+ if (atomic_dec_and_test(&hr_cq->refcount))
+ complete(&hr_cq->free);
}
int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev)