diff options
55 files changed, 435 insertions, 603 deletions
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 0c98dd3dee67..9194a23cf859 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -955,7 +955,7 @@ int rdma_query_gid(struct ib_device *device, u32 port_num, { struct ib_gid_table *table; unsigned long flags; - int res = -EINVAL; + int res; if (!rdma_is_port_valid(device, port_num)) return -EINVAL; @@ -963,9 +963,15 @@ int rdma_query_gid(struct ib_device *device, u32 port_num, table = rdma_gid_table(device, port_num); read_lock_irqsave(&table->rwlock, flags); - if (index < 0 || index >= table->sz || - !is_gid_entry_valid(table->data_vec[index])) + if (index < 0 || index >= table->sz) { + res = -EINVAL; goto done; + } + + if (!is_gid_entry_valid(table->data_vec[index])) { + res = -ENOENT; + goto done; + } memcpy(gid, &table->data_vec[index]->attr.gid, sizeof(*gid)); res = 0; diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 835ac54d4a24..27a00ce2e101 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -766,6 +766,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) unsigned int p; u16 pkey, index; enum ib_port_state port_state; + int ret; int i; cma_dev = NULL; @@ -784,9 +785,14 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) if (ib_get_cached_port_state(cur_dev->device, p, &port_state)) continue; - for (i = 0; !rdma_query_gid(cur_dev->device, - p, i, &gid); - i++) { + + for (i = 0; i < cur_dev->device->port_data[p].immutable.gid_tbl_len; + ++i) { + ret = rdma_query_gid(cur_dev->device, p, i, + &gid); + if (ret) + continue; + if (!memcmp(&gid, dgid, sizeof(gid))) { cma_dev = cur_dev; sgid = gid; @@ -4033,8 +4039,7 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, memset(&req, 0, sizeof req); offset = cma_user_data_offset(id_priv); - req.private_data_len = offset + conn_param->private_data_len; - if (req.private_data_len < conn_param->private_data_len) + if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len)) return -EINVAL; if (req.private_data_len) { @@ -4093,8 +4098,7 @@ static int cma_connect_ib(struct rdma_id_private *id_priv, memset(&req, 0, sizeof req); offset = cma_user_data_offset(id_priv); - req.private_data_len = offset + conn_param->private_data_len; - if (req.private_data_len < conn_param->private_data_len) + if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len)) return -EINVAL; if (req.private_data_len) { diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 22a4adda7981..a311df07b1bd 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -2461,7 +2461,8 @@ int ib_find_gid(struct ib_device *device, union ib_gid *gid, ++i) { ret = rdma_query_gid(device, port, i, &tmp_gid); if (ret) - return ret; + continue; + if (!memcmp(&tmp_gid, gid, sizeof *gid)) { *port_num = port; if (index) diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index 7a47343d11f9..aead24c1a682 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -227,7 +227,6 @@ struct ib_umem_odp *ib_umem_odp_get(struct ib_device *device, const struct mmu_interval_notifier_ops *ops) { struct ib_umem_odp *umem_odp; - struct mm_struct *mm; int ret; if (WARN_ON_ONCE(!(access & IB_ACCESS_ON_DEMAND))) @@ -241,7 +240,7 @@ struct ib_umem_odp *ib_umem_odp_get(struct ib_device *device, umem_odp->umem.length = size; umem_odp->umem.address = addr; umem_odp->umem.writable = ib_access_writable(access); - umem_odp->umem.owning_mm = mm = current->mm; + umem_odp->umem.owning_mm = current->mm; umem_odp->notifier.ops = ops; umem_odp->page_shift = PAGE_SHIFT; diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index d1345d76d9b1..6b6393176b3c 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -1399,7 +1399,6 @@ static int create_qp(struct uverbs_attr_bundle *attrs, attr.sq_sig_type = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; attr.qp_type = cmd->qp_type; - attr.create_flags = 0; attr.cap.max_send_wr = cmd->max_send_wr; attr.cap.max_recv_wr = cmd->max_recv_wr; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 29cc0d14399a..3224f18a66e5 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -262,13 +262,12 @@ void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str) int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num, u16 index, u16 *pkey) { - struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); + if (index > 0) + return -EINVAL; - /* Ignore port_num */ + *pkey = IB_DEFAULT_PKEY_FULL; - memset(pkey, 0, sizeof(*pkey)); - return bnxt_qplib_get_pkey(&rdev->qplib_res, - &rdev->qplib_res.pkey_tbl, index, pkey); + return 0; } int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num, diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index b44944fb9b24..3d6834d3d4fb 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -893,7 +893,6 @@ static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq, struct bnxt_re_srq *srq = container_of(handle, struct bnxt_re_srq, qplib_srq); struct ib_event ib_event; - int rc = 0; ib_event.device = &srq->rdev->ibdev; ib_event.element.srq = &srq->ib_srq; @@ -907,7 +906,7 @@ static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq, (*srq->ib_srq.event_handler)(&ib_event, srq->ib_srq.srq_context); } - return rc; + return 0; } static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq, diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index ca88849559bf..96e581ced50e 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -46,6 +46,7 @@ #include <linux/delay.h> #include <linux/prefetch.h> #include <linux/if_ether.h> +#include <rdma/ib_mad.h> #include "roce_hsi.h" @@ -1232,7 +1233,7 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_modify_qp req; struct creq_modify_qp_resp resp; - u16 cmd_flags = 0, pkey; + u16 cmd_flags = 0; u32 temp32[4]; u32 bmask; int rc; @@ -1255,11 +1256,9 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS) req.access = qp->access; - if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY) { - if (!bnxt_qplib_get_pkey(res, &res->pkey_tbl, - qp->pkey_index, &pkey)) - req.pkey = cpu_to_le16(pkey); - } + if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY) + req.pkey = cpu_to_le16(IB_DEFAULT_PKEY_FULL); + if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY) req.qkey = cpu_to_le32(qp->qkey); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 3de854727460..061b2895dd9b 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -555,7 +555,7 @@ skip_ctx_setup: void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) { - kfree(rcfw->cmdq.cmdq_bitmap); + bitmap_free(rcfw->cmdq.cmdq_bitmap); kfree(rcfw->qp_tbl); kfree(rcfw->crsqe_tbl); bnxt_qplib_free_hwq(rcfw->res, &rcfw->cmdq.hwq); @@ -572,7 +572,6 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res, struct bnxt_qplib_sg_info sginfo = {}; struct bnxt_qplib_cmdq_ctx *cmdq; struct bnxt_qplib_creq_ctx *creq; - u32 bmap_size = 0; rcfw->pdev = res->pdev; cmdq = &rcfw->cmdq; @@ -613,13 +612,10 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res, if (!rcfw->crsqe_tbl) goto fail; - bmap_size = BITS_TO_LONGS(rcfw->cmdq_depth) * sizeof(unsigned long); - cmdq->cmdq_bitmap = kzalloc(bmap_size, GFP_KERNEL); + cmdq->cmdq_bitmap = bitmap_zalloc(rcfw->cmdq_depth, GFP_KERNEL); if (!cmdq->cmdq_bitmap) goto fail; - cmdq->bmap_size = bmap_size; - /* Allocate one extra to hold the QP1 entries */ rcfw->qp_tbl_size = qp_tbl_sz + 1; rcfw->qp_tbl = kcalloc(rcfw->qp_tbl_size, sizeof(struct bnxt_qplib_qp_node), @@ -667,8 +663,8 @@ void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) iounmap(cmdq->cmdq_mbox.reg.bar_reg); iounmap(creq->creq_db.reg.bar_reg); - indx = find_first_bit(cmdq->cmdq_bitmap, cmdq->bmap_size); - if (indx != cmdq->bmap_size) + indx = find_first_bit(cmdq->cmdq_bitmap, rcfw->cmdq_depth); + if (indx != rcfw->cmdq_depth) dev_err(&rcfw->pdev->dev, "disabling RCFW with pending cmd-bit %lx\n", indx); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index 82faa4e4cda8..0a3d8e7da3d4 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h @@ -152,7 +152,6 @@ struct bnxt_qplib_cmdq_ctx { wait_queue_head_t waitq; unsigned long flags; unsigned long *cmdq_bitmap; - u32 bmap_size; u32 seq_num; }; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c index bc1ba4b51ba4..126d4f26f75a 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c @@ -649,31 +649,6 @@ static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl, memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max); } -static void bnxt_qplib_free_pkey_tbl(struct bnxt_qplib_res *res, - struct bnxt_qplib_pkey_tbl *pkey_tbl) -{ - if (!pkey_tbl->tbl) - dev_dbg(&res->pdev->dev, "PKEY tbl not present\n"); - else - kfree(pkey_tbl->tbl); - - pkey_tbl->tbl = NULL; - pkey_tbl->max = 0; - pkey_tbl->active = 0; -} - -static int bnxt_qplib_alloc_pkey_tbl(struct bnxt_qplib_res *res, - struct bnxt_qplib_pkey_tbl *pkey_tbl, - u16 max) -{ - pkey_tbl->tbl = kcalloc(max, sizeof(u16), GFP_KERNEL); - if (!pkey_tbl->tbl) - return -ENOMEM; - - pkey_tbl->max = max; - return 0; -}; - /* PDs */ int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pdt, struct bnxt_qplib_pd *pd) { @@ -843,24 +818,6 @@ unmap_io: return -ENOMEM; } -/* PKEYs */ -static void bnxt_qplib_cleanup_pkey_tbl(struct bnxt_qplib_pkey_tbl *pkey_tbl) -{ - memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max); - pkey_tbl->active = 0; -} - -static void bnxt_qplib_init_pkey_tbl(struct bnxt_qplib_res *res, - struct bnxt_qplib_pkey_tbl *pkey_tbl) -{ - u16 pkey = 0xFFFF; - - memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max); - - /* pkey default = 0xFFFF */ - bnxt_qplib_add_pkey(res, pkey_tbl, &pkey, false); -} - /* Stats */ static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev, struct bnxt_qplib_stats *stats) @@ -891,21 +848,18 @@ static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev, void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res) { - bnxt_qplib_cleanup_pkey_tbl(&res->pkey_tbl); bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl); } int bnxt_qplib_init_res(struct bnxt_qplib_res *res) { bnxt_qplib_init_sgid_tbl(&res->sgid_tbl, res->netdev); - bnxt_qplib_init_pkey_tbl(res, &res->pkey_tbl); return 0; } void bnxt_qplib_free_res(struct bnxt_qplib_res *res) { - bnxt_qplib_free_pkey_tbl(res, &res->pkey_tbl); bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl); bnxt_qplib_free_pd_tbl(&res->pd_tbl); bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl); @@ -924,10 +878,6 @@ int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev, if (rc) goto fail; - rc = bnxt_qplib_alloc_pkey_tbl(res, &res->pkey_tbl, dev_attr->max_pkey); - if (rc) - goto fail; - rc = bnxt_qplib_alloc_pd_tbl(res, &res->pd_tbl, dev_attr->max_pd); if (rc) goto fail; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h index e1411a2352a7..982e2c96dac2 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h @@ -185,12 +185,6 @@ struct bnxt_qplib_sgid_tbl { u8 *vlan; }; -struct bnxt_qplib_pkey_tbl { - u16 *tbl; - u16 max; - u16 active; -}; - struct bnxt_qplib_dpi { u32 dpi; void __iomem *dbr; @@ -258,7 +252,6 @@ struct bnxt_qplib_res { struct bnxt_qplib_rcfw *rcfw; struct bnxt_qplib_pd_tbl pd_tbl; struct bnxt_qplib_sgid_tbl sgid_tbl; - struct bnxt_qplib_pkey_tbl pkey_tbl; struct bnxt_qplib_dpi_tbl dpi_tbl; bool prio; bool is_vf; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index 379e715ebd30..b802981b7171 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -146,17 +146,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, attr->max_srq = le16_to_cpu(sb->max_srq); attr->max_srq_wqes = le32_to_cpu(sb->max_srq_wr) - 1; attr->max_srq_sges = sb->max_srq_sge; - attr->max_pkey = le32_to_cpu(sb->max_pkeys); - /* - * Some versions of FW reports more than 0xFFFF. - * Restrict it for now to 0xFFFF to avoid - * reporting trucated value - */ - if (attr->max_pkey > 0xFFFF) { - /* ib_port_attr::pkey_tbl_len is u16 */ - attr->max_pkey = 0xFFFF; - } - + attr->max_pkey = 1; attr->max_inline_data = le32_to_cpu(sb->max_inline_data); attr->l2_db_size = (sb->l2_db_space_size + 1) * (0x01 << RCFW_DBR_BASE_PAGE_SHIFT); @@ -414,93 +404,6 @@ int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, return rc; } -/* pkeys */ -int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res, - struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 index, - u16 *pkey) -{ - if (index == 0xFFFF) { - *pkey = 0xFFFF; - return 0; - } - if (index >= pkey_tbl->max) { - dev_err(&res->pdev->dev, - "Index %d exceeded PKEY table max (%d)\n", - index, pkey_tbl->max); - return -EINVAL; - } - memcpy(pkey, &pkey_tbl->tbl[index], sizeof(*pkey)); - return 0; -} - -int bnxt_qplib_del_pkey(struct bnxt_qplib_res *res, - struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey, - bool update) -{ - int i, rc = 0; - - if (!pkey_tbl) { - dev_err(&res->pdev->dev, "PKEY table not allocated\n"); - return -EINVAL; - } - - /* Do we need a pkey_lock here? */ - if (!pkey_tbl->active) { - dev_err(&res->pdev->dev, "PKEY table has no active entries\n"); - return -ENOMEM; - } - for (i = 0; i < pkey_tbl->max; i++) { - if (!memcmp(&pkey_tbl->tbl[i], pkey, sizeof(*pkey))) - break; - } - if (i == pkey_tbl->max) { - dev_err(&res->pdev->dev, - "PKEY 0x%04x not found in the pkey table\n", *pkey); - return -ENOMEM; - } - memset(&pkey_tbl->tbl[i], 0, sizeof(*pkey)); - pkey_tbl->active--; - - /* unlock */ - return rc; -} - -int bnxt_qplib_add_pkey(struct bnxt_qplib_res *res, - struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey, - bool update) -{ - int i, free_idx, rc = 0; - - if (!pkey_tbl) { - dev_err(&res->pdev->dev, "PKEY table not allocated\n"); - return -EINVAL; - } - - /* Do we need a pkey_lock here? */ - if (pkey_tbl->active == pkey_tbl->max) { - dev_err(&res->pdev->dev, "PKEY table is full\n"); - return -ENOMEM; - } - free_idx = pkey_tbl->max; - for (i = 0; i < pkey_tbl->max; i++) { - if (!memcmp(&pkey_tbl->tbl[i], pkey, sizeof(*pkey))) - return -EALREADY; - else if (!pkey_tbl->tbl[i] && free_idx == pkey_tbl->max) - free_idx = i; - } - if (free_idx == pkey_tbl->max) { - dev_err(&res->pdev->dev, - "PKEY table is FULL but count is not MAX??\n"); - return -ENOMEM; - } - /* Add PKEY to the pkey_tbl */ - memcpy(&pkey_tbl->tbl[free_idx], pkey, sizeof(*pkey)); - pkey_tbl->active++; - - /* unlock */ - return rc; -} - /* AH */ int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah, bool block) diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h index a18f568cb23e..5939e8fc8353 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h @@ -255,15 +255,6 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, struct bnxt_qplib_gid *gid, u16 gid_idx, const u8 *smac); -int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res, - struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 index, - u16 *pkey); -int bnxt_qplib_del_pkey(struct bnxt_qplib_res *res, - struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey, - bool update); -int bnxt_qplib_add_pkey(struct bnxt_qplib_res *res, - struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey, - bool update); int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, struct bnxt_qplib_dev_attr *attr, bool vf); int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res, diff --git a/drivers/infiniband/hw/cxgb4/id_table.c b/drivers/infiniband/hw/cxgb4/id_table.c index 724d23297b35..f64e7e02b129 100644 --- a/drivers/infiniband/hw/cxgb4/id_table.c +++ b/drivers/infiniband/hw/cxgb4/id_table.c @@ -59,7 +59,7 @@ u32 c4iw_id_alloc(struct c4iw_id_table *alloc) alloc->last = obj + 1; if (alloc->last >= alloc->max) alloc->last = 0; - set_bit(obj, alloc->table); + __set_bit(obj, alloc->table); obj += alloc->start; } else obj = -1; @@ -75,37 +75,32 @@ void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj) obj -= alloc->start; spin_lock_irqsave(&alloc->lock, flags); - clear_bit(obj, alloc->table); + __clear_bit(obj, alloc->table); spin_unlock_irqrestore(&alloc->lock, flags); } int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num, u32 reserved, u32 flags) { - int i; - alloc->start = start; alloc->flags = flags; if (flags & C4IW_ID_TABLE_F_RANDOM) alloc->last = prandom_u32() % RANDOM_SKIP; else alloc->last = 0; - alloc->max = num; + alloc->max = num; spin_lock_init(&alloc->lock); - alloc->table = kmalloc_array(BITS_TO_LONGS(num), sizeof(long), - GFP_KERNEL); + alloc->table = bitmap_zalloc(num, GFP_KERNEL); if (!alloc->table) return -ENOMEM; - bitmap_zero(alloc->table, num); if (!(alloc->flags & C4IW_ID_TABLE_F_EMPTY)) - for (i = 0; i < reserved; ++i) - set_bit(i, alloc->table); + bitmap_set(alloc->table, 0, reserved); return 0; } void c4iw_id_table_free(struct c4iw_id_table *alloc) { - kfree(alloc->table); + bitmap_free(alloc->table); } diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index 0c8fd5a85fcb..89f36a3a9af0 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -41,6 +41,7 @@ #include <linux/ethtool.h> #include <linux/rtnetlink.h> #include <linux/inetdevice.h> +#include <net/addrconf.h> #include <linux/io.h> #include <asm/irq.h> @@ -264,7 +265,8 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro return -EINVAL; dev = to_c4iw_dev(ibdev); - memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6); + addrconf_addr_eui48((u8 *)&props->sys_image_guid, + dev->rdev.lldi.ports[0]->dev_addr); props->hw_ver = CHELSIO_CHIP_RELEASE(dev->rdev.lldi.adapter_type); props->fw_ver = dev->rdev.lldi.fw_vers; props->device_cap_flags = dev->device_cap_flags; @@ -525,8 +527,8 @@ void c4iw_register_device(struct work_struct *work) struct c4iw_dev *dev = ctx->dev; pr_debug("c4iw_dev %p\n", dev); - memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid)); - memcpy(&dev->ibdev.node_guid, dev->rdev.lldi.ports[0]->dev_addr, 6); + addrconf_addr_eui48((u8 *)&dev->ibdev.node_guid, + dev->rdev.lldi.ports[0]->dev_addr); dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW; if (fastreg_support) dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index 5b11c8282744..a71c5a36ceba 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c @@ -161,9 +161,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, if (!pq->reqs) goto pq_reqs_nomem; - pq->req_in_use = kcalloc(BITS_TO_LONGS(hfi1_sdma_comp_ring_size), - sizeof(*pq->req_in_use), - GFP_KERNEL); + pq->req_in_use = bitmap_zalloc(hfi1_sdma_comp_ring_size, GFP_KERNEL); if (!pq->req_in_use) goto pq_reqs_no_in_use; @@ -210,7 +208,7 @@ cq_comps_nomem: cq_nomem: kmem_cache_destroy(pq->txreq_cache); pq_txreq_nomem: - kfree(pq->req_in_use); + bitmap_free(pq->req_in_use); pq_reqs_no_in_use: kfree(pq->reqs); pq_reqs_nomem: @@ -257,7 +255,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd, pq->wait, !atomic_read(&pq->n_reqs)); kfree(pq->reqs); - kfree(pq->req_in_use); + bitmap_free(pq->req_in_use); kmem_cache_destroy(pq->txreq_cache); flush_pq_iowait(pq); kfree(pq); diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c index 84f3f2b5f097..3f7fb7508585 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cmd.c +++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c @@ -61,7 +61,7 @@ static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param, CMD_POLL_TOKEN, 0); if (ret) { dev_err_ratelimited(hr_dev->dev, - "failed to post mailbox %x in poll mode, ret = %d.\n", + "failed to post mailbox 0x%x in poll mode, ret = %d.\n", op, ret); return ret; } @@ -91,7 +91,7 @@ void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status, if (unlikely(token != context->token)) { dev_err_ratelimited(hr_dev->dev, - "[cmd] invalid ae token %x,context token is %x!\n", + "[cmd] invalid ae token 0x%x, context token is 0x%x.\n", token, context->token); return; } @@ -130,14 +130,14 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param, context->token, 1); if (ret) { dev_err_ratelimited(dev, - "failed to post mailbox %x in event mode, ret = %d.\n", + "failed to post mailbox 0x%x in event mode, ret = %d.\n", op, ret); goto out; } if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) { - dev_err_ratelimited(dev, "[cmd] token %x mailbox %x timeout.\n", + dev_err_ratelimited(dev, "[cmd] token 0x%x mailbox 0x%x timeout.\n", context->token, op); ret = -EBUSY; goto out; @@ -145,7 +145,7 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param, ret = context->result; if (ret) - dev_err_ratelimited(dev, "[cmd] token %x mailbox %x error %d\n", + dev_err_ratelimited(dev, "[cmd] token 0x%x mailbox 0x%x error %d.\n", context->token, op, ret); out: diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 43e17d61cb63..bc7112a205a7 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -182,6 +182,7 @@ enum { HNS_ROCE_CAP_FLAG_FRMR = BIT(8), HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL = BIT(9), HNS_ROCE_CAP_FLAG_ATOMIC = BIT(10), + HNS_ROCE_CAP_FLAG_DIRECT_WQE = BIT(12), HNS_ROCE_CAP_FLAG_SDI_MODE = BIT(14), HNS_ROCE_CAP_FLAG_STASH = BIT(17), }; @@ -228,6 +229,7 @@ struct hns_roce_uar { enum hns_roce_mmap_type { HNS_ROCE_MMAP_TYPE_DB = 1, HNS_ROCE_MMAP_TYPE_TPTR, + HNS_ROCE_MMAP_TYPE_DWQE, }; struct hns_user_mmap_entry { @@ -354,10 +356,10 @@ struct hns_roce_mr { u64 size; /* Address range of MR */ u32 key; /* Key of MR */ u32 pd; /* PD num of MR */ - u32 access; /* Access permission of MR */ + u32 access; /* Access permission of MR */ int enabled; /* MR's active status */ - int type; /* MR's register type */ - u32 pbl_hop_num; /* multi-hop number */ + int type; /* MR's register type */ + u32 pbl_hop_num; /* multi-hop number */ struct hns_roce_mtr pbl_mtr; u32 npages; dma_addr_t *page_list; @@ -374,17 +376,17 @@ struct hns_roce_wq { u32 wqe_cnt; /* WQE num */ u32 max_gs; u32 rsv_sge; - int offset; - int wqe_shift; /* WQE size */ + u32 offset; + u32 wqe_shift; /* WQE size */ u32 head; u32 tail; void __iomem *db_reg; }; struct hns_roce_sge { - unsigned int sge_cnt; /* SGE num */ - int offset; - int sge_shift; /* SGE size */ + unsigned int sge_cnt; /* SGE num */ + u32 offset; + u32 sge_shift; /* SGE size */ }; struct hns_roce_buf_list { @@ -468,7 +470,7 @@ struct hns_roce_cq { struct hns_roce_idx_que { struct hns_roce_mtr mtr; - int entry_shift; + u32 entry_shift; unsigned long *bitmap; u32 head; u32 tail; @@ -480,7 +482,7 @@ struct hns_roce_srq { u32 wqe_cnt; int max_gs; u32 rsv_sge; - int wqe_shift; + u32 wqe_shift; u32 cqn; u32 xrcdn; void __iomem *db_reg; @@ -627,10 +629,6 @@ struct hns_roce_work { u32 queue_num; }; -enum { - HNS_ROCE_QP_CAP_DIRECT_WQE = BIT(5), -}; - struct hns_roce_qp { struct ib_qp ibqp; struct hns_roce_wq rq; @@ -672,9 +670,10 @@ struct hns_roce_qp { unsigned long flush_flag; struct hns_roce_work flush_work; struct hns_roce_rinl_buf rq_inl_buf; - struct list_head node; /* all qps are on a list */ - struct list_head rq_node; /* all recv qps are on a list */ - struct list_head sq_node; /* all send qps are on a list */ + struct list_head node; /* all qps are on a list */ + struct list_head rq_node; /* all recv qps are on a list */ + struct list_head sq_node; /* all send qps are on a list */ + struct hns_user_mmap_entry *dwqe_mmap_entry; }; struct hns_roce_ib_iboe { @@ -767,7 +766,7 @@ struct hns_roce_caps { u32 reserved_qps; int num_qpc_timer; int num_cqc_timer; - int num_srqs; + u32 num_srqs; u32 max_wqes; u32 max_srq_wrs; u32 max_srq_sges; @@ -781,7 +780,7 @@ struct hns_roce_caps { u32 min_cqes; u32 min_wqes; u32 reserved_cqs; - int reserved_srqs; + u32 reserved_srqs; int num_aeq_vectors; int num_comp_vectors; int num_other_vectors; @@ -855,7 +854,7 @@ struct hns_roce_caps { u32 cqc_timer_ba_pg_sz; u32 cqc_timer_buf_pg_sz; u32 cqc_timer_hop_num; - u32 cqe_ba_pg_sz; /* page_size = 4K*(2^cqe_ba_pg_sz) */ + u32 cqe_ba_pg_sz; /* page_size = 4K*(2^cqe_ba_pg_sz) */ u32 cqe_buf_pg_sz; u32 cqe_hop_num; u32 srqwqe_ba_pg_sz; @@ -874,7 +873,7 @@ struct hns_roce_caps { u32 gmv_hop_num; u32 sl_num; u32 llm_buf_pg_sz; - u32 chunk_sz; /* chunk size in non multihop mode */ + u32 chunk_sz; /* chunk size in non multihop mode */ u64 flags; u16 default_ceq_max_cnt; u16 default_ceq_period; @@ -1001,8 +1000,8 @@ struct hns_roce_dev { int loop_idc; u32 sdb_offset; u32 odb_offset; - dma_addr_t tptr_dma_addr; /* only for hw v1 */ - u32 tptr_size; /* only for hw v1 */ + dma_addr_t tptr_dma_addr; /* only for hw v1 */ + u32 tptr_size; /* only for hw v1 */ const struct hns_roce_hw *hw; void *priv; struct workqueue_struct *irq_workq; @@ -1010,6 +1009,7 @@ struct hns_roce_dev { u32 func_num; u32 is_vf; u32 cong_algo_tmpl_id; + u64 dwqe_page; }; static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev) @@ -1158,7 +1158,7 @@ void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev); /* hns roce hw need current block and next block addr from mtt */ #define MTT_MIN_COUNT 2 int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, - int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr); + u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr); int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, struct hns_roce_buf_attr *buf_attr, unsigned int page_shift, struct ib_udata *udata, diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index bbfa1332dedc..e681c2dc23e8 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -678,6 +678,7 @@ static void hns_roce_write512(struct hns_roce_dev *hr_dev, u64 *val, static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp, void *wqe) { +#define HNS_ROCE_SL_SHIFT 2 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe; /* All kinds of DirectWQE have the same header field layout */ @@ -685,7 +686,8 @@ static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp, roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_DB_SL_L_M, V2_RC_SEND_WQE_BYTE_4_DB_SL_L_S, qp->sl); roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_DB_SL_H_M, - V2_RC_SEND_WQE_BYTE_4_DB_SL_H_S, qp->sl >> 2); + V2_RC_SEND_WQE_BYTE_4_DB_SL_H_S, + qp->sl >> HNS_ROCE_SL_SHIFT); roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_M, V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_S, qp->sq.head); @@ -1305,14 +1307,14 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, continue; dev_err_ratelimited(hr_dev->dev, - "Cmdq IO error, opcode = %x, return = %x\n", + "Cmdq IO error, opcode = 0x%x, return = 0x%x.\n", desc->opcode, desc_ret); ret = -EIO; } } else { /* FW/HW reset or incorrect number of desc */ tail = roce_read(hr_dev, ROCEE_TX_CMQ_CI_REG); - dev_warn(hr_dev->dev, "CMDQ move tail from %d to %d\n", + dev_warn(hr_dev->dev, "CMDQ move tail from %u to %u.\n", csq->head, tail); csq->head = tail; @@ -1997,7 +1999,8 @@ static void set_default_caps(struct hns_roce_dev *hr_dev) caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM; if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) { - caps->flags |= HNS_ROCE_CAP_FLAG_STASH; + caps->flags |= HNS_ROCE_CAP_FLAG_STASH | + HNS_ROCE_CAP_FLAG_DIRECT_WQE; caps->max_sq_inline = HNS_ROCE_V3_MAX_SQ_INLINE; } else { caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE; @@ -4733,7 +4736,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp, hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr); if (unlikely(hr_qp->sl > MAX_SERVICE_LEVEL)) { ibdev_err(ibdev, - "failed to fill QPC, sl (%d) shouldn't be larger than %d.\n", + "failed to fill QPC, sl (%u) shouldn't be larger than %d.\n", hr_qp->sl, MAX_SERVICE_LEVEL); return -EINVAL; } @@ -4762,7 +4765,8 @@ static bool check_qp_state(enum ib_qp_state cur_state, [IB_QPS_ERR] = true }, [IB_QPS_SQD] = {}, [IB_QPS_SQE] = {}, - [IB_QPS_ERR] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true } + [IB_QPS_ERR] = { [IB_QPS_RESET] = true, + [IB_QPS_ERR] = true } }; return sm[cur_state][new_state]; @@ -5827,7 +5831,7 @@ static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev, roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG, enable_flag); } -static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn) +static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, u32 eqn) { struct device *dev = hr_dev->dev; int ret; @@ -5841,7 +5845,7 @@ static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn) 0, HNS_ROCE_CMD_DESTROY_AEQC, HNS_ROCE_CMD_TIMEOUT_MSECS); if (ret) - dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn); + dev_err(dev, "[mailbox cmd] destroy eqc(%u) failed.\n", eqn); } static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index 4d904d5e82be..fddb9bc3c14c 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -35,26 +35,15 @@ #include <linux/bitops.h> -#define HNS_ROCE_VF_QPC_BT_NUM 256 -#define HNS_ROCE_VF_SCCC_BT_NUM 64 -#define HNS_ROCE_VF_SRQC_BT_NUM 64 -#define HNS_ROCE_VF_CQC_BT_NUM 64 -#define HNS_ROCE_VF_MPT_BT_NUM 64 -#define HNS_ROCE_VF_SMAC_NUM 32 -#define HNS_ROCE_VF_SL_NUM 8 -#define HNS_ROCE_VF_GMV_BT_NUM 256 - #define HNS_ROCE_V2_MAX_QP_NUM 0x1000 #define HNS_ROCE_V2_MAX_QPC_TIMER_NUM 0x200 #define HNS_ROCE_V2_MAX_WQE_NUM 0x8000 -#define HNS_ROCE_V2_MAX_SRQ 0x100000 #define HNS_ROCE_V2_MAX_SRQ_WR 0x8000 #define HNS_ROCE_V2_MAX_SRQ_SGE 64 #define HNS_ROCE_V2_MAX_CQ_NUM 0x100000 #define HNS_ROCE_V2_MAX_CQC_TIMER_NUM 0x100 #define HNS_ROCE_V2_MAX_SRQ_NUM 0x100000 #define HNS_ROCE_V2_MAX_CQE_NUM 0x400000 -#define HNS_ROCE_V2_MAX_SRQWQE_NUM 0x8000 #define HNS_ROCE_V2_MAX_RQ_SGE_NUM 64 #define HNS_ROCE_V2_MAX_SQ_SGE_NUM 64 #define HNS_ROCE_V2_MAX_EXTEND_SGE_NUM 0x200000 @@ -63,13 +52,10 @@ #define HNS_ROCE_V2_MAX_RC_INL_INN_SZ 32 #define HNS_ROCE_V2_UAR_NUM 256 #define HNS_ROCE_V2_PHY_UAR_NUM 1 -#define HNS_ROCE_V2_MAX_IRQ_NUM 65 -#define HNS_ROCE_V2_COMP_VEC_NUM 63 #define HNS_ROCE_V2_AEQE_VEC_NUM 1 #define HNS_ROCE_V2_ABNORMAL_VEC_NUM 1 #define HNS_ROCE_V2_MAX_MTPT_NUM 0x100000 #define HNS_ROCE_V2_MAX_MTT_SEGS 0x1000000 -#define HNS_ROCE_V2_MAX_CQE_SEGS 0x1000000 #define HNS_ROCE_V2_MAX_SRQWQE_SEGS 0x1000000 #define HNS_ROCE_V2_MAX_IDX_SEGS 0x1000000 #define HNS_ROCE_V2_MAX_PD_NUM 0x1000000 @@ -81,7 +67,6 @@ #define HNS_ROCE_V2_MAX_RQ_DESC_SZ 16 #define HNS_ROCE_V2_MAX_SRQ_DESC_SZ 64 #define HNS_ROCE_V2_IRRL_ENTRY_SZ 64 -#define HNS_ROCE_V2_TRRL_ENTRY_SZ 48 #define HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ 100 #define HNS_ROCE_V2_CQC_ENTRY_SZ 64 #define HNS_ROCE_V2_SRQC_ENTRY_SZ 64 @@ -103,7 +88,6 @@ #define HNS_ROCE_INVALID_LKEY 0x0 #define HNS_ROCE_INVALID_SGE_LENGTH 0x80000000 #define HNS_ROCE_CMQ_TX_TIMEOUT 30000 -#define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2 #define HNS_ROCE_V2_RSV_QPS 8 #define HNS_ROCE_V2_HW_RST_TIMEOUT 1000 @@ -1441,7 +1425,7 @@ struct hns_roce_v2_priv { struct hns_roce_dip { u8 dgid[GID_LEN_V2]; u32 dip_idx; - struct list_head node; /* all dips are on a list */ + struct list_head node; /* all dips are on a list */ }; #define HNS_ROCE_AEQ_DEFAULT_BURST_NUM 0x0 diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 4194b626f3c6..d0b976a86cd5 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -270,6 +270,9 @@ static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device, static int hns_roce_query_pkey(struct ib_device *ib_dev, u32 port, u16 index, u16 *pkey) { + if (index > 0) + return -EINVAL; + *pkey = PKEY_ID; return 0; @@ -307,9 +310,25 @@ hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address, entry->address = address; entry->mmap_type = mmap_type; - ret = rdma_user_mmap_entry_insert_exact( - ucontext, &entry->rdma_entry, length, - mmap_type == HNS_ROCE_MMAP_TYPE_DB ? 0 : 1); + switch (mmap_type) { + case HNS_ROCE_MMAP_TYPE_DB: + ret = rdma_user_mmap_entry_insert_exact( + ucontext, &entry->rdma_entry, length, 0); + break; + case HNS_ROCE_MMAP_TYPE_TPTR: + ret = rdma_user_mmap_entry_insert_exact( + ucontext, &entry->rdma_entry, length, 1); + break; + case HNS_ROCE_MMAP_TYPE_DWQE: + ret = rdma_user_mmap_entry_insert_range( + ucontext, &entry->rdma_entry, length, 2, + U32_MAX); + break; + default: + ret = -EINVAL; + break; + } + if (ret) { kfree(entry); return NULL; @@ -436,10 +455,18 @@ static int hns_roce_mmap(struct ib_ucontext *uctx, struct vm_area_struct *vma) entry = to_hns_mmap(rdma_entry); pfn = entry->address >> PAGE_SHIFT; - prot = vma->vm_page_prot; - if (entry->mmap_type != HNS_ROCE_MMAP_TYPE_TPTR) - prot = pgprot_noncached(prot); + switch (entry->mmap_type) { + case HNS_ROCE_MMAP_TYPE_DB: + case HNS_ROCE_MMAP_TYPE_DWQE: + prot = pgprot_device(vma->vm_page_prot); + break; + case HNS_ROCE_MMAP_TYPE_TPTR: + prot = vma->vm_page_prot; + break; + default: + return -EINVAL; + } ret = rdma_user_mmap_io(uctx, vma, pfn, rdma_entry->npages * PAGE_SIZE, prot, rdma_entry); diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 7089ac780291..8de899372567 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -81,7 +81,7 @@ static int alloc_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr) return -ENOMEM; } - mr->key = hw_index_to_key(id); /* MR key */ + mr->key = hw_index_to_key(id); /* MR key */ err = hns_roce_table_get(hr_dev, &hr_dev->mr_table.mtpt_table, (unsigned long)id); @@ -824,11 +824,11 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, } int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, - int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr) + u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr) { struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg; int mtt_count, left; - int start_index; + u32 start_index; int total = 0; __le64 *mtts; u32 npage; @@ -884,10 +884,10 @@ done: static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev, struct hns_roce_buf_attr *attr, struct hns_roce_hem_cfg *cfg, - unsigned int *buf_page_shift, int unalinged_size) + unsigned int *buf_page_shift, u64 unalinged_size) { struct hns_roce_buf_region *r; - int first_region_padding; + u64 first_region_padding; int page_cnt, region_cnt; unsigned int page_shift; size_t buf_size; diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c index 81ffad77ae42..03c349f7ebbe 100644 --- a/drivers/infiniband/hw/hns/hns_roce_pd.c +++ b/drivers/infiniband/hw/hns/hns_roce_pd.c @@ -115,6 +115,9 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar) } else { uar->pfn = ((pci_resource_start(hr_dev->pci_dev, 2)) >> PAGE_SHIFT); + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_DIRECT_WQE) + hr_dev->dwqe_page = + pci_resource_start(hr_dev->pci_dev, 4); } return 0; diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 9af4509894e6..c84e1c23722c 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -379,6 +379,11 @@ err_out: return ret; } +static void qp_user_mmap_entry_remove(struct hns_roce_qp *hr_qp) +{ + rdma_user_mmap_entry_remove(&hr_qp->dwqe_mmap_entry->rdma_entry); +} + void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) { struct xarray *xa = &hr_dev->qp_table_xa; @@ -780,7 +785,11 @@ static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, goto err_inline; } + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_DIRECT_WQE) + hr_qp->en_flags |= HNS_ROCE_QP_CAP_DIRECT_WQE; + return 0; + err_inline: free_rq_inline_buf(hr_qp); @@ -822,6 +831,35 @@ static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev, hns_roce_qp_has_rq(init_attr)); } +static int qp_mmap_entry(struct hns_roce_qp *hr_qp, + struct hns_roce_dev *hr_dev, + struct ib_udata *udata, + struct hns_roce_ib_create_qp_resp *resp) +{ + struct hns_roce_ucontext *uctx = + rdma_udata_to_drv_context(udata, + struct hns_roce_ucontext, ibucontext); + struct rdma_user_mmap_entry *rdma_entry; + u64 address; + + address = hr_dev->dwqe_page + hr_qp->qpn * HNS_ROCE_DWQE_SIZE; + + hr_qp->dwqe_mmap_entry = + hns_roce_user_mmap_entry_insert(&uctx->ibucontext, address, + HNS_ROCE_DWQE_SIZE, + HNS_ROCE_MMAP_TYPE_DWQE); + + if (!hr_qp->dwqe_mmap_entry) { + ibdev_err(&hr_dev->ib_dev, "failed to get dwqe mmap entry.\n"); + return -ENOMEM; + } + + rdma_entry = &hr_qp->dwqe_mmap_entry->rdma_entry; + resp->dwqe_mmap_key = rdma_user_mmap_get_offset(rdma_entry); + + return 0; +} + static int alloc_user_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, struct ib_qp_init_attr *init_attr, @@ -909,10 +947,16 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, hr_qp->en_flags |= HNS_ROCE_QP_CAP_OWNER_DB; if (udata) { + if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) { + ret = qp_mmap_entry(hr_qp, hr_dev, udata, resp); + if (ret) + return ret; + } + ret = alloc_user_qp_db(hr_dev, hr_qp, init_attr, udata, ucmd, resp); if (ret) - return ret; + goto err_remove_qp; } else { ret = alloc_kernel_qp_db(hr_dev, hr_qp, init_attr); if (ret) @@ -920,6 +964,12 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, } return 0; + +err_remove_qp: + if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) + qp_user_mmap_entry_remove(hr_qp); + + return ret; } static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, @@ -933,6 +983,8 @@ static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, hns_roce_db_unmap_user(uctx, &hr_qp->rdb); if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) hns_roce_db_unmap_user(uctx, &hr_qp->sdb); + if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) + qp_user_mmap_entry_remove(hr_qp); } else { if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) hns_roce_free_db(hr_dev, &hr_qp->rdb); @@ -1391,7 +1443,7 @@ void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, } } -static inline void *get_wqe(struct hns_roce_qp *hr_qp, int offset) +static inline void *get_wqe(struct hns_roce_qp *hr_qp, u32 offset) { return hns_roce_buf_offset(hr_qp->mtr.kmem, offset); } diff --git a/drivers/infiniband/hw/irdma/pble.h b/drivers/infiniband/hw/irdma/pble.h index aa20827dcc9d..d0d4f2b77d34 100644 --- a/drivers/infiniband/hw/irdma/pble.h +++ b/drivers/infiniband/hw/irdma/pble.h @@ -69,7 +69,7 @@ struct irdma_add_page_info { struct irdma_chunk { struct list_head list; struct irdma_dma_info dmainfo; - void *bitmapbuf; + unsigned long *bitmapbuf; u32 sizeofbitmap; u64 size; diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c index 8cd5f9261692..456fed94b145 100644 --- a/drivers/infiniband/hw/irdma/verbs.c +++ b/drivers/infiniband/hw/irdma/verbs.c @@ -21,7 +21,8 @@ static int irdma_query_device(struct ib_device *ibdev, return -EINVAL; memset(props, 0, sizeof(*props)); - ether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr); + addrconf_addr_eui48((u8 *)&props->sys_image_guid, + iwdev->netdev->dev_addr); props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 | irdma_fw_minor_ver(&rf->sc_dev); props->device_cap_flags = iwdev->device_cap_flags; @@ -4321,24 +4322,6 @@ static enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev, return IB_LINK_LAYER_ETHERNET; } -static __be64 irdma_mac_to_guid(struct net_device *ndev) -{ - const unsigned char *mac = ndev->dev_addr; - __be64 guid; - unsigned char *dst = (unsigned char *)&guid; - - dst[0] = mac[0] ^ 2; - dst[1] = mac[1]; - dst[2] = mac[2]; - dst[3] = 0xff; - dst[4] = 0xfe; - dst[5] = mac[3]; - dst[6] = mac[4]; - dst[7] = mac[5]; - - return guid; -} - static const struct ib_device_ops irdma_roce_dev_ops = { .attach_mcast = irdma_attach_mcast, .create_ah = irdma_create_ah, @@ -4408,7 +4391,8 @@ static const struct ib_device_ops irdma_dev_ops = { static void irdma_init_roce_device(struct irdma_device *iwdev) { iwdev->ibdev.node_type = RDMA_NODE_IB_CA; - iwdev->ibdev.node_guid = irdma_mac_to_guid(iwdev->netdev); + addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid, + iwdev->netdev->dev_addr); ib_set_device_ops(&iwdev->ibdev, &irdma_roce_dev_ops); } @@ -4421,7 +4405,8 @@ static int irdma_init_iw_device(struct irdma_device *iwdev) struct net_device *netdev = iwdev->netdev; iwdev->ibdev.node_type = RDMA_NODE_RNIC; - ether_addr_copy((u8 *)&iwdev->ibdev.node_guid, netdev->dev_addr); + addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid, + netdev->dev_addr); iwdev->ibdev.ops.iw_add_ref = irdma_qp_add_ref; iwdev->ibdev.ops.iw_rem_ref = irdma_qp_rem_ref; iwdev->ibdev.ops.iw_get_qp = irdma_get_qp; diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 0d2fa3338784..d66ce7694bbe 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -2784,10 +2784,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) if (err) goto err_counter; - ibdev->ib_uc_qpns_bitmap = - kmalloc_array(BITS_TO_LONGS(ibdev->steer_qpn_count), - sizeof(long), - GFP_KERNEL); + ibdev->ib_uc_qpns_bitmap = bitmap_alloc(ibdev->steer_qpn_count, + GFP_KERNEL); if (!ibdev->ib_uc_qpns_bitmap) goto err_steer_qp_release; @@ -2875,7 +2873,7 @@ err_diag_counters: mlx4_ib_diag_cleanup(ibdev); err_steer_free_bitmap: - kfree(ibdev->ib_uc_qpns_bitmap); + bitmap_free(ibdev->ib_uc_qpns_bitmap); err_steer_qp_release: mlx4_qp_release_range(dev, ibdev->steer_qpn_base, @@ -2988,7 +2986,7 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) mlx4_qp_release_range(dev, ibdev->steer_qpn_base, ibdev->steer_qpn_count); - kfree(ibdev->ib_uc_qpns_bitmap); + bitmap_free(ibdev->ib_uc_qpns_bitmap); iounmap(ibdev->uar_map); for (p = 0; p < ibdev->num_ports; ++p) diff --git a/drivers/infiniband/hw/mthca/mthca_allocator.c b/drivers/infiniband/hw/mthca/mthca_allocator.c index aef1d274a14e..9f0f79d02d3c 100644 --- a/drivers/infiniband/hw/mthca/mthca_allocator.c +++ b/drivers/infiniband/hw/mthca/mthca_allocator.c @@ -51,7 +51,7 @@ u32 mthca_alloc(struct mthca_alloc *alloc) } if (obj < alloc->max) { - set_bit(obj, alloc->table); + __set_bit(obj, alloc->table); obj |= alloc->top; } else obj = -1; @@ -69,7 +69,7 @@ void mthca_free(struct mthca_alloc *alloc, u32 obj) spin_lock_irqsave(&alloc->lock, flags); - clear_bit(obj, alloc->table); + __clear_bit(obj, alloc->table); alloc->last = min(alloc->last, obj); alloc->top = (alloc->top + alloc->max) & alloc->mask; @@ -79,8 +79,6 @@ void mthca_free(struct mthca_alloc *alloc, u32 obj) int mthca_alloc_init(struct mthca_alloc *alloc, u32 num, u32 mask, u32 reserved) { - int i; - /* num must be a power of 2 */ if (num != 1 << (ffs(num) - 1)) return -EINVAL; @@ -90,21 +88,18 @@ int mthca_alloc_init(struct mthca_alloc *alloc, u32 num, u32 mask, alloc->max = num; alloc->mask = mask; spin_lock_init(&alloc->lock); - alloc->table = kmalloc_array(BITS_TO_LONGS(num), sizeof(long), - GFP_KERNEL); + alloc->table = bitmap_zalloc(num, GFP_KERNEL); if (!alloc->table) return -ENOMEM; - bitmap_zero(alloc->table, num); - for (i = 0; i < reserved; ++i) - set_bit(i, alloc->table); + bitmap_set(alloc->table, 0, reserved); return 0; } void mthca_alloc_cleanup(struct mthca_alloc *alloc) { - kfree(alloc->table); + bitmap_free(alloc->table); } /* diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c index ce0e0867e488..a59100c496b4 100644 --- a/drivers/infiniband/hw/mthca/mthca_mr.c +++ b/drivers/infiniband/hw/mthca/mthca_mr.c @@ -101,13 +101,13 @@ static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order) return -1; found: - clear_bit(seg, buddy->bits[o]); + __clear_bit(seg, buddy->bits[o]); --buddy->num_free[o]; while (o > order) { --o; seg <<= 1; - set_bit(seg ^ 1, buddy->bits[o]); + __set_bit(seg ^ 1, buddy->bits[o]); ++buddy->num_free[o]; } @@ -125,13 +125,13 @@ static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order) spin_lock(&buddy->lock); while (test_bit(seg ^ 1, buddy->bits[order])) { - clear_bit(seg ^ 1, buddy->bits[order]); + __clear_bit(seg ^ 1, buddy->bits[order]); --buddy->num_free[order]; seg >>= 1; ++order; } - set_bit(seg, buddy->bits[order]); + __set_bit(seg, buddy->bits[order]); ++buddy->num_free[order]; spin_unlock(&buddy->lock); @@ -139,7 +139,7 @@ static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order) static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order) { - int i, s; + int i; buddy->max_order = max_order; spin_lock_init(&buddy->lock); @@ -152,22 +152,20 @@ static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order) goto err_out; for (i = 0; i <= buddy->max_order; ++i) { - s = BITS_TO_LONGS(1 << (buddy->max_order - i)); - buddy->bits[i] = kmalloc_array(s, sizeof(long), GFP_KERNEL); + buddy->bits[i] = bitmap_zalloc(1 << (buddy->max_order - i), + GFP_KERNEL); if (!buddy->bits[i]) goto err_out_free; - bitmap_zero(buddy->bits[i], - 1 << (buddy->max_order - i)); } - set_bit(0, buddy->bits[buddy->max_order]); + __set_bit(0, buddy->bits[buddy->max_order]); buddy->num_free[buddy->max_order] = 1; return 0; err_out_free: for (i = 0; i <= buddy->max_order; ++i) - kfree(buddy->bits[i]); + bitmap_free(buddy->bits[i]); err_out: kfree(buddy->bits); @@ -181,7 +179,7 @@ static void mthca_buddy_cleanup(struct mthca_buddy *buddy) int i; for (i = 0; i <= buddy->max_order; ++i) - kfree(buddy->bits[i]); + bitmap_free(buddy->bits[i]); kfree(buddy->bits); kfree(buddy->num_free); diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c index c51c3f40700e..265a581133dc 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c @@ -1506,7 +1506,6 @@ int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd) static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev) { int status = -ENOMEM; - size_t pd_bitmap_size; struct ocrdma_alloc_pd_range *cmd; struct ocrdma_alloc_pd_range_rsp *rsp; @@ -1528,10 +1527,8 @@ static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev) dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK; dev->pd_mgr->max_dpp_pd = rsp->pd_count; - pd_bitmap_size = - BITS_TO_LONGS(rsp->pd_count) * sizeof(long); - dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size, - GFP_KERNEL); + dev->pd_mgr->pd_dpp_bitmap = bitmap_zalloc(rsp->pd_count, + GFP_KERNEL); } kfree(cmd); } @@ -1547,9 +1544,8 @@ static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev) dev->pd_mgr->pd_norm_start = rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK; dev->pd_mgr->max_normal_pd = rsp->pd_count; - pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long); - dev->pd_mgr->pd_norm_bitmap = kzalloc(pd_bitmap_size, - GFP_KERNEL); + dev->pd_mgr->pd_norm_bitmap = bitmap_zalloc(rsp->pd_count, + GFP_KERNEL); } kfree(cmd); @@ -1611,8 +1607,8 @@ void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev) static void ocrdma_free_pd_pool(struct ocrdma_dev *dev) { ocrdma_mbx_dealloc_pd_range(dev); - kfree(dev->pd_mgr->pd_norm_bitmap); - kfree(dev->pd_mgr->pd_dpp_bitmap); + bitmap_free(dev->pd_mgr->pd_norm_bitmap); + bitmap_free(dev->pd_mgr->pd_dpp_bitmap); kfree(dev->pd_mgr); } diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index 7abf6cf1e937..5d4b3bc16493 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -62,20 +62,6 @@ MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION); MODULE_AUTHOR("Emulex Corporation"); MODULE_LICENSE("Dual BSD/GPL"); -void ocrdma_get_guid(struct ocrdma_dev *dev, u8 *guid) -{ - u8 mac_addr[6]; - - memcpy(&mac_addr[0], &dev->nic_info.mac_addr[0], ETH_ALEN); - guid[0] = mac_addr[0] ^ 2; - guid[1] = mac_addr[1]; - guid[2] = mac_addr[2]; - guid[3] = 0xff; - guid[4] = 0xfe; - guid[5] = mac_addr[3]; - guid[6] = mac_addr[4]; - guid[7] = mac_addr[5]; -} static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device, u32 port_num) { @@ -203,7 +189,8 @@ static int ocrdma_register_device(struct ocrdma_dev *dev) { int ret; - ocrdma_get_guid(dev, (u8 *)&dev->ibdev.node_guid); + addrconf_addr_eui48((u8 *)&dev->ibdev.node_guid, + dev->nic_info.mac_addr); BUILD_BUG_ON(sizeof(OCRDMA_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX); memcpy(dev->ibdev.node_desc, OCRDMA_NODE_DESC, sizeof(OCRDMA_NODE_DESC)); diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index 735123d0e9ec..bfa7aad92ead 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -41,6 +41,7 @@ */ #include <linux/dma-mapping.h> +#include <net/addrconf.h> #include <rdma/ib_verbs.h> #include <rdma/ib_user_verbs.h> #include <rdma/iw_cm.h> @@ -74,7 +75,8 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr, memset(attr, 0, sizeof *attr); memcpy(&attr->fw_ver, &dev->attr.fw_ver[0], min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver))); - ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid); + addrconf_addr_eui48((u8 *)&attr->sys_image_guid, + dev->nic_info.mac_addr); attr->max_mr_size = dev->attr.max_mr_size; attr->page_size_cap = 0xffff000; attr->vendor_id = dev->nic_info.pdev->vendor; @@ -245,13 +247,13 @@ static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool) { u16 pd_bitmap_idx = 0; - const unsigned long *pd_bitmap; + unsigned long *pd_bitmap; if (dpp_pool) { pd_bitmap = dev->pd_mgr->pd_dpp_bitmap; pd_bitmap_idx = find_first_zero_bit(pd_bitmap, dev->pd_mgr->max_dpp_pd); - __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap); + __set_bit(pd_bitmap_idx, pd_bitmap); dev->pd_mgr->pd_dpp_count++; if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh) dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count; @@ -259,7 +261,7 @@ static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool) pd_bitmap = dev->pd_mgr->pd_norm_bitmap; pd_bitmap_idx = find_first_zero_bit(pd_bitmap, dev->pd_mgr->max_normal_pd); - __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap); + __set_bit(pd_bitmap_idx, pd_bitmap); dev->pd_mgr->pd_norm_count++; if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh) dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h index b73d742a520c..f860b7fcef33 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h @@ -59,7 +59,6 @@ int ocrdma_query_port(struct ib_device *ibdev, u32 port, enum rdma_protocol_type ocrdma_query_protocol(struct ib_device *device, u32 port_num); -void ocrdma_get_guid(struct ocrdma_dev *, u8 *guid); int ocrdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey); int ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata); diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 9100009f0a23..a53476653b0d 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -1931,6 +1931,7 @@ static int qedr_create_user_qp(struct qedr_dev *dev, /* db offset was calculated in copy_qp_uresp, now set in the user q */ if (qedr_qp_has_sq(qp)) { qp->usq.db_addr = ctx->dpi_addr + uresp.sq_db_offset; + qp->sq.max_wr = attrs->cap.max_send_wr; rc = qedr_db_recovery_add(dev, qp->usq.db_addr, &qp->usq.db_rec_data->db_data, DB_REC_WIDTH_32B, @@ -1941,6 +1942,7 @@ static int qedr_create_user_qp(struct qedr_dev *dev, if (qedr_qp_has_rq(qp)) { qp->urq.db_addr = ctx->dpi_addr + uresp.rq_db_offset; + qp->rq.max_wr = attrs->cap.max_recv_wr; rc = qedr_db_recovery_add(dev, qp->urq.db_addr, &qp->urq.db_rec_data->db_data, DB_REC_WIDTH_32B, diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c index 756a83bcff58..5a0e26cd648e 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c @@ -442,12 +442,10 @@ int usnic_ib_query_gid(struct ib_device *ibdev, u32 port, int index, int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct usnic_ib_pd *pd = to_upd(ibpd); - void *umem_pd; - umem_pd = pd->umem_pd = usnic_uiom_alloc_pd(); - if (IS_ERR_OR_NULL(umem_pd)) { - return umem_pd ? PTR_ERR(umem_pd) : -ENOMEM; - } + pd->umem_pd = usnic_uiom_alloc_pd(); + if (IS_ERR(pd->umem_pd)) + return PTR_ERR(pd->umem_pd); return 0; } diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c index bf51357ea3aa..9a4de962e947 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c @@ -63,12 +63,12 @@ int pvrdma_uar_table_init(struct pvrdma_dev *dev) tbl->max = num; tbl->mask = mask; spin_lock_init(&tbl->lock); - tbl->table = kcalloc(BITS_TO_LONGS(num), sizeof(long), GFP_KERNEL); + tbl->table = bitmap_zalloc(num, GFP_KERNEL); if (!tbl->table) return -ENOMEM; /* 0th UAR is taken by the device. */ - set_bit(0, tbl->table); + __set_bit(0, tbl->table); return 0; } @@ -77,7 +77,7 @@ void pvrdma_uar_table_cleanup(struct pvrdma_dev *dev) { struct pvrdma_id_table *tbl = &dev->uar_table.tbl; - kfree(tbl->table); + bitmap_free(tbl->table); } int pvrdma_uar_alloc(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar) @@ -100,7 +100,7 @@ int pvrdma_uar_alloc(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar) return -ENOMEM; } - set_bit(obj, tbl->table); + __set_bit(obj, tbl->table); obj |= tbl->top; spin_unlock_irqrestore(&tbl->lock, flags); @@ -120,7 +120,7 @@ void pvrdma_uar_free(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar) obj = uar->index & (tbl->max - 1); spin_lock_irqsave(&tbl->lock, flags); - clear_bit(obj, tbl->table); + __clear_bit(obj, tbl->table); tbl->last = min(tbl->last, obj); tbl->top = (tbl->top + tbl->max) & tbl->mask; spin_unlock_irqrestore(&tbl->lock, flags); diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c index d771ba8449a1..f363fe3fa414 100644 --- a/drivers/infiniband/sw/rxe/rxe_comp.c +++ b/drivers/infiniband/sw/rxe/rxe_comp.c @@ -458,8 +458,6 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) { - unsigned long flags; - if (wqe->has_rd_atomic) { wqe->has_rd_atomic = 0; atomic_inc(&qp->req.rd_atomic); @@ -472,11 +470,11 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp, if (unlikely(qp->req.state == QP_STATE_DRAIN)) { /* state_lock used by requester & completer */ - spin_lock_irqsave(&qp->state_lock, flags); + spin_lock_bh(&qp->state_lock); if ((qp->req.state == QP_STATE_DRAIN) && (qp->comp.psn == qp->req.psn)) { qp->req.state = QP_STATE_DRAINED; - spin_unlock_irqrestore(&qp->state_lock, flags); + spin_unlock_bh(&qp->state_lock); if (qp->ibqp.event_handler) { struct ib_event ev; @@ -488,7 +486,7 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp, qp->ibqp.qp_context); } } else { - spin_unlock_irqrestore(&qp->state_lock, flags); + spin_unlock_bh(&qp->state_lock); } } diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c index 6848426c074f..6baaaa34458e 100644 --- a/drivers/infiniband/sw/rxe/rxe_cq.c +++ b/drivers/infiniband/sw/rxe/rxe_cq.c @@ -42,14 +42,13 @@ err1: static void rxe_send_complete(struct tasklet_struct *t) { struct rxe_cq *cq = from_tasklet(cq, t, comp_task); - unsigned long flags; - spin_lock_irqsave(&cq->cq_lock, flags); + spin_lock_bh(&cq->cq_lock); if (cq->is_dying) { - spin_unlock_irqrestore(&cq->cq_lock, flags); + spin_unlock_bh(&cq->cq_lock); return; } - spin_unlock_irqrestore(&cq->cq_lock, flags); + spin_unlock_bh(&cq->cq_lock); cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); } @@ -106,15 +105,14 @@ int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe, int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited) { struct ib_event ev; - unsigned long flags; int full; void *addr; - spin_lock_irqsave(&cq->cq_lock, flags); + spin_lock_bh(&cq->cq_lock); full = queue_full(cq->queue, QUEUE_TYPE_TO_CLIENT); if (unlikely(full)) { - spin_unlock_irqrestore(&cq->cq_lock, flags); + spin_unlock_bh(&cq->cq_lock); if (cq->ibcq.event_handler) { ev.device = cq->ibcq.device; ev.element.cq = &cq->ibcq; @@ -130,7 +128,7 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited) queue_advance_producer(cq->queue, QUEUE_TYPE_TO_CLIENT); - spin_unlock_irqrestore(&cq->cq_lock, flags); + spin_unlock_bh(&cq->cq_lock); if ((cq->notify == IB_CQ_NEXT_COMP) || (cq->notify == IB_CQ_SOLICITED && solicited)) { @@ -143,16 +141,14 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited) void rxe_cq_disable(struct rxe_cq *cq) { - unsigned long flags; - - spin_lock_irqsave(&cq->cq_lock, flags); + spin_lock_bh(&cq->cq_lock); cq->is_dying = true; - spin_unlock_irqrestore(&cq->cq_lock, flags); + spin_unlock_bh(&cq->cq_lock); } -void rxe_cq_cleanup(struct rxe_pool_entry *arg) +void rxe_cq_cleanup(struct rxe_pool_elem *elem) { - struct rxe_cq *cq = container_of(arg, typeof(*cq), pelem); + struct rxe_cq *cq = container_of(elem, typeof(*cq), elem); if (cq->queue) rxe_queue_cleanup(cq->queue); diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h index 1ca43b859d80..b1e174afb1d4 100644 --- a/drivers/infiniband/sw/rxe/rxe_loc.h +++ b/drivers/infiniband/sw/rxe/rxe_loc.h @@ -37,7 +37,7 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited); void rxe_cq_disable(struct rxe_cq *cq); -void rxe_cq_cleanup(struct rxe_pool_entry *arg); +void rxe_cq_cleanup(struct rxe_pool_elem *arg); /* rxe_mcast.c */ int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid, @@ -51,7 +51,7 @@ int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp, void rxe_drop_all_mcast_groups(struct rxe_qp *qp); -void rxe_mc_cleanup(struct rxe_pool_entry *arg); +void rxe_mc_cleanup(struct rxe_pool_elem *arg); /* rxe_mmap.c */ struct rxe_mmap_info { @@ -89,7 +89,7 @@ int rxe_invalidate_mr(struct rxe_qp *qp, u32 rkey); int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe); int rxe_mr_set_page(struct ib_mr *ibmr, u64 addr); int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata); -void rxe_mr_cleanup(struct rxe_pool_entry *arg); +void rxe_mr_cleanup(struct rxe_pool_elem *arg); /* rxe_mw.c */ int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata); @@ -97,7 +97,7 @@ int rxe_dealloc_mw(struct ib_mw *ibmw); int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe); int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey); struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey); -void rxe_mw_cleanup(struct rxe_pool_entry *arg); +void rxe_mw_cleanup(struct rxe_pool_elem *arg); /* rxe_net.c */ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av, @@ -131,7 +131,7 @@ void rxe_qp_error(struct rxe_qp *qp); void rxe_qp_destroy(struct rxe_qp *qp); -void rxe_qp_cleanup(struct rxe_pool_entry *arg); +void rxe_qp_cleanup(struct rxe_pool_elem *elem); static inline int qp_num(struct rxe_qp *qp) { diff --git a/drivers/infiniband/sw/rxe/rxe_mcast.c b/drivers/infiniband/sw/rxe/rxe_mcast.c index 1c1d1b53312d..bd1ac88b8700 100644 --- a/drivers/infiniband/sw/rxe/rxe_mcast.c +++ b/drivers/infiniband/sw/rxe/rxe_mcast.c @@ -40,12 +40,11 @@ int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid, int err; struct rxe_mc_grp *grp; struct rxe_pool *pool = &rxe->mc_grp_pool; - unsigned long flags; if (rxe->attr.max_mcast_qp_attach == 0) return -EINVAL; - write_lock_irqsave(&pool->pool_lock, flags); + write_lock_bh(&pool->pool_lock); grp = rxe_pool_get_key_locked(pool, mgid); if (grp) @@ -53,13 +52,13 @@ int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid, grp = create_grp(rxe, pool, mgid); if (IS_ERR(grp)) { - write_unlock_irqrestore(&pool->pool_lock, flags); + write_unlock_bh(&pool->pool_lock); err = PTR_ERR(grp); return err; } done: - write_unlock_irqrestore(&pool->pool_lock, flags); + write_unlock_bh(&pool->pool_lock); *grp_p = grp; return 0; } @@ -169,9 +168,9 @@ void rxe_drop_all_mcast_groups(struct rxe_qp *qp) } } -void rxe_mc_cleanup(struct rxe_pool_entry *arg) +void rxe_mc_cleanup(struct rxe_pool_elem *elem) { - struct rxe_mc_grp *grp = container_of(arg, typeof(*grp), pelem); + struct rxe_mc_grp *grp = container_of(elem, typeof(*grp), elem); struct rxe_dev *rxe = grp->rxe; rxe_drop_key(grp); diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c index 53271df10e47..25c78aade822 100644 --- a/drivers/infiniband/sw/rxe/rxe_mr.c +++ b/drivers/infiniband/sw/rxe/rxe_mr.c @@ -50,7 +50,7 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length) static void rxe_mr_init(int access, struct rxe_mr *mr) { - u32 lkey = mr->pelem.index << 8 | rxe_get_next_key(-1); + u32 lkey = mr->elem.index << 8 | rxe_get_next_key(-1); u32 rkey = (access & IB_ACCESS_REMOTE) ? lkey : 0; /* set ibmr->l/rkey and also copy into private l/rkey @@ -699,9 +699,9 @@ int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) return 0; } -void rxe_mr_cleanup(struct rxe_pool_entry *arg) +void rxe_mr_cleanup(struct rxe_pool_elem *elem) { - struct rxe_mr *mr = container_of(arg, typeof(*mr), pelem); + struct rxe_mr *mr = container_of(elem, typeof(*mr), elem); ib_umem_release(mr->umem); diff --git a/drivers/infiniband/sw/rxe/rxe_mw.c b/drivers/infiniband/sw/rxe/rxe_mw.c index 9534a7fe1a98..32dd8c0b8b9e 100644 --- a/drivers/infiniband/sw/rxe/rxe_mw.c +++ b/drivers/infiniband/sw/rxe/rxe_mw.c @@ -21,7 +21,7 @@ int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) } rxe_add_index(mw); - mw->rkey = ibmw->rkey = (mw->pelem.index << 8) | rxe_get_next_key(-1); + mw->rkey = ibmw->rkey = (mw->elem.index << 8) | rxe_get_next_key(-1); mw->state = (mw->ibmw.type == IB_MW_TYPE_2) ? RXE_MW_STATE_FREE : RXE_MW_STATE_VALID; spin_lock_init(&mw->lock); @@ -56,11 +56,10 @@ int rxe_dealloc_mw(struct ib_mw *ibmw) { struct rxe_mw *mw = to_rmw(ibmw); struct rxe_pd *pd = to_rpd(ibmw->pd); - unsigned long flags; - spin_lock_irqsave(&mw->lock, flags); + spin_lock_bh(&mw->lock); rxe_do_dealloc_mw(mw); - spin_unlock_irqrestore(&mw->lock, flags); + spin_unlock_bh(&mw->lock); rxe_drop_ref(mw); rxe_drop_ref(pd); @@ -197,7 +196,6 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe) struct rxe_dev *rxe = to_rdev(qp->ibqp.device); u32 mw_rkey = wqe->wr.wr.mw.mw_rkey; u32 mr_lkey = wqe->wr.wr.mw.mr_lkey; - unsigned long flags; mw = rxe_pool_get_index(&rxe->mw_pool, mw_rkey >> 8); if (unlikely(!mw)) { @@ -225,7 +223,7 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe) mr = NULL; } - spin_lock_irqsave(&mw->lock, flags); + spin_lock_bh(&mw->lock); ret = rxe_check_bind_mw(qp, wqe, mw, mr); if (ret) @@ -233,7 +231,7 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe) rxe_do_bind_mw(qp, wqe, mw, mr); err_unlock: - spin_unlock_irqrestore(&mw->lock, flags); + spin_unlock_bh(&mw->lock); err_drop_mr: if (mr) rxe_drop_ref(mr); @@ -280,7 +278,6 @@ static void rxe_do_invalidate_mw(struct rxe_mw *mw) int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey) { struct rxe_dev *rxe = to_rdev(qp->ibqp.device); - unsigned long flags; struct rxe_mw *mw; int ret; @@ -295,7 +292,7 @@ int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey) goto err_drop_ref; } - spin_lock_irqsave(&mw->lock, flags); + spin_lock_bh(&mw->lock); ret = rxe_check_invalidate_mw(qp, mw); if (ret) @@ -303,7 +300,7 @@ int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey) rxe_do_invalidate_mw(mw); err_unlock: - spin_unlock_irqrestore(&mw->lock, flags); + spin_unlock_bh(&mw->lock); err_drop_ref: rxe_drop_ref(mw); err: @@ -333,9 +330,9 @@ struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey) return mw; } -void rxe_mw_cleanup(struct rxe_pool_entry *elem) +void rxe_mw_cleanup(struct rxe_pool_elem *elem) { - struct rxe_mw *mw = container_of(elem, typeof(*mw), pelem); + struct rxe_mw *mw = container_of(elem, typeof(*mw), elem); rxe_drop_index(mw); } diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c index 2e80bb6aa957..4cb003885e00 100644 --- a/drivers/infiniband/sw/rxe/rxe_pool.c +++ b/drivers/infiniband/sw/rxe/rxe_pool.c @@ -5,13 +5,14 @@ */ #include "rxe.h" -#include "rxe_loc.h" + +#define RXE_POOL_ALIGN (16) static const struct rxe_type_info { const char *name; size_t size; size_t elem_offset; - void (*cleanup)(struct rxe_pool_entry *obj); + void (*cleanup)(struct rxe_pool_elem *obj); enum rxe_pool_flags flags; u32 min_index; u32 max_index; @@ -21,19 +22,19 @@ static const struct rxe_type_info { [RXE_TYPE_UC] = { .name = "rxe-uc", .size = sizeof(struct rxe_ucontext), - .elem_offset = offsetof(struct rxe_ucontext, pelem), + .elem_offset = offsetof(struct rxe_ucontext, elem), .flags = RXE_POOL_NO_ALLOC, }, [RXE_TYPE_PD] = { .name = "rxe-pd", .size = sizeof(struct rxe_pd), - .elem_offset = offsetof(struct rxe_pd, pelem), + .elem_offset = offsetof(struct rxe_pd, elem), .flags = RXE_POOL_NO_ALLOC, }, [RXE_TYPE_AH] = { .name = "rxe-ah", .size = sizeof(struct rxe_ah), - .elem_offset = offsetof(struct rxe_ah, pelem), + .elem_offset = offsetof(struct rxe_ah, elem), .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC, .min_index = RXE_MIN_AH_INDEX, .max_index = RXE_MAX_AH_INDEX, @@ -41,7 +42,7 @@ static const struct rxe_type_info { [RXE_TYPE_SRQ] = { .name = "rxe-srq", .size = sizeof(struct rxe_srq), - .elem_offset = offsetof(struct rxe_srq, pelem), + .elem_offset = offsetof(struct rxe_srq, elem), .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC, .min_index = RXE_MIN_SRQ_INDEX, .max_index = RXE_MAX_SRQ_INDEX, @@ -49,7 +50,7 @@ static const struct rxe_type_info { [RXE_TYPE_QP] = { .name = "rxe-qp", .size = sizeof(struct rxe_qp), - .elem_offset = offsetof(struct rxe_qp, pelem), + .elem_offset = offsetof(struct rxe_qp, elem), .cleanup = rxe_qp_cleanup, .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC, .min_index = RXE_MIN_QP_INDEX, @@ -58,14 +59,14 @@ static const struct rxe_type_info { [RXE_TYPE_CQ] = { .name = "rxe-cq", .size = sizeof(struct rxe_cq), - .elem_offset = offsetof(struct rxe_cq, pelem), + .elem_offset = offsetof(struct rxe_cq, elem), .flags = RXE_POOL_NO_ALLOC, .cleanup = rxe_cq_cleanup, }, [RXE_TYPE_MR] = { .name = "rxe-mr", .size = sizeof(struct rxe_mr), - .elem_offset = offsetof(struct rxe_mr, pelem), + .elem_offset = offsetof(struct rxe_mr, elem), .cleanup = rxe_mr_cleanup, .flags = RXE_POOL_INDEX, .min_index = RXE_MIN_MR_INDEX, @@ -74,7 +75,7 @@ static const struct rxe_type_info { [RXE_TYPE_MW] = { .name = "rxe-mw", .size = sizeof(struct rxe_mw), - .elem_offset = offsetof(struct rxe_mw, pelem), + .elem_offset = offsetof(struct rxe_mw, elem), .cleanup = rxe_mw_cleanup, .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC, .min_index = RXE_MIN_MW_INDEX, @@ -83,7 +84,7 @@ static const struct rxe_type_info { [RXE_TYPE_MC_GRP] = { .name = "rxe-mc_grp", .size = sizeof(struct rxe_mc_grp), - .elem_offset = offsetof(struct rxe_mc_grp, pelem), + .elem_offset = offsetof(struct rxe_mc_grp, elem), .cleanup = rxe_mc_cleanup, .flags = RXE_POOL_KEY, .key_offset = offsetof(struct rxe_mc_grp, mgid), @@ -92,15 +93,10 @@ static const struct rxe_type_info { [RXE_TYPE_MC_ELEM] = { .name = "rxe-mc_elem", .size = sizeof(struct rxe_mc_elem), - .elem_offset = offsetof(struct rxe_mc_elem, pelem), + .elem_offset = offsetof(struct rxe_mc_elem, elem), }, }; -static inline const char *pool_name(struct rxe_pool *pool) -{ - return rxe_type_info[pool->type].name; -} - static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min) { int err = 0; @@ -130,35 +126,36 @@ int rxe_pool_init( enum rxe_elem_type type, unsigned int max_elem) { + const struct rxe_type_info *info = &rxe_type_info[type]; int err = 0; - size_t size = rxe_type_info[type].size; memset(pool, 0, sizeof(*pool)); pool->rxe = rxe; + pool->name = info->name; pool->type = type; pool->max_elem = max_elem; - pool->elem_size = ALIGN(size, RXE_POOL_ALIGN); - pool->flags = rxe_type_info[type].flags; - pool->index.tree = RB_ROOT; - pool->key.tree = RB_ROOT; - pool->cleanup = rxe_type_info[type].cleanup; + pool->elem_size = ALIGN(info->size, RXE_POOL_ALIGN); + pool->elem_offset = info->elem_offset; + pool->flags = info->flags; + pool->cleanup = info->cleanup; atomic_set(&pool->num_elem, 0); rwlock_init(&pool->pool_lock); - if (rxe_type_info[type].flags & RXE_POOL_INDEX) { - err = rxe_pool_init_index(pool, - rxe_type_info[type].max_index, - rxe_type_info[type].min_index); + if (pool->flags & RXE_POOL_INDEX) { + pool->index.tree = RB_ROOT; + err = rxe_pool_init_index(pool, info->max_index, + info->min_index); if (err) goto out; } - if (rxe_type_info[type].flags & RXE_POOL_KEY) { - pool->key.key_offset = rxe_type_info[type].key_offset; - pool->key.key_size = rxe_type_info[type].key_size; + if (pool->flags & RXE_POOL_KEY) { + pool->key.tree = RB_ROOT; + pool->key.key_offset = info->key_offset; + pool->key.key_size = info->key_size; } out: @@ -169,9 +166,10 @@ void rxe_pool_cleanup(struct rxe_pool *pool) { if (atomic_read(&pool->num_elem) > 0) pr_warn("%s pool destroyed with unfree'd elem\n", - pool_name(pool)); + pool->name); - bitmap_free(pool->index.table); + if (pool->flags & RXE_POOL_INDEX) + bitmap_free(pool->index.table); } static u32 alloc_index(struct rxe_pool *pool) @@ -189,15 +187,15 @@ static u32 alloc_index(struct rxe_pool *pool) return index + pool->index.min_index; } -static int rxe_insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new) +static int rxe_insert_index(struct rxe_pool *pool, struct rxe_pool_elem *new) { struct rb_node **link = &pool->index.tree.rb_node; struct rb_node *parent = NULL; - struct rxe_pool_entry *elem; + struct rxe_pool_elem *elem; while (*link) { parent = *link; - elem = rb_entry(parent, struct rxe_pool_entry, index_node); + elem = rb_entry(parent, struct rxe_pool_elem, index_node); if (elem->index == new->index) { pr_warn("element already exists!\n"); @@ -216,19 +214,20 @@ static int rxe_insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new) return 0; } -static int rxe_insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new) +static int rxe_insert_key(struct rxe_pool *pool, struct rxe_pool_elem *new) { struct rb_node **link = &pool->key.tree.rb_node; struct rb_node *parent = NULL; - struct rxe_pool_entry *elem; + struct rxe_pool_elem *elem; int cmp; while (*link) { parent = *link; - elem = rb_entry(parent, struct rxe_pool_entry, key_node); + elem = rb_entry(parent, struct rxe_pool_elem, key_node); cmp = memcmp((u8 *)elem + pool->key.key_offset, - (u8 *)new + pool->key.key_offset, pool->key.key_size); + (u8 *)new + pool->key.key_offset, + pool->key.key_size); if (cmp == 0) { pr_warn("key already exists!\n"); @@ -247,7 +246,7 @@ static int rxe_insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new) return 0; } -int __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key) +int __rxe_add_key_locked(struct rxe_pool_elem *elem, void *key) { struct rxe_pool *pool = elem->pool; int err; @@ -258,37 +257,35 @@ int __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key) return err; } -int __rxe_add_key(struct rxe_pool_entry *elem, void *key) +int __rxe_add_key(struct rxe_pool_elem *elem, void *key) { struct rxe_pool *pool = elem->pool; - unsigned long flags; int err; - write_lock_irqsave(&pool->pool_lock, flags); + write_lock_bh(&pool->pool_lock); err = __rxe_add_key_locked(elem, key); - write_unlock_irqrestore(&pool->pool_lock, flags); + write_unlock_bh(&pool->pool_lock); return err; } -void __rxe_drop_key_locked(struct rxe_pool_entry *elem) +void __rxe_drop_key_locked(struct rxe_pool_elem *elem) { struct rxe_pool *pool = elem->pool; rb_erase(&elem->key_node, &pool->key.tree); } -void __rxe_drop_key(struct rxe_pool_entry *elem) +void __rxe_drop_key(struct rxe_pool_elem *elem) { struct rxe_pool *pool = elem->pool; - unsigned long flags; - write_lock_irqsave(&pool->pool_lock, flags); + write_lock_bh(&pool->pool_lock); __rxe_drop_key_locked(elem); - write_unlock_irqrestore(&pool->pool_lock, flags); + write_unlock_bh(&pool->pool_lock); } -int __rxe_add_index_locked(struct rxe_pool_entry *elem) +int __rxe_add_index_locked(struct rxe_pool_elem *elem) { struct rxe_pool *pool = elem->pool; int err; @@ -299,20 +296,19 @@ int __rxe_add_index_locked(struct rxe_pool_entry *elem) return err; } -int __rxe_add_index(struct rxe_pool_entry *elem) +int __rxe_add_index(struct rxe_pool_elem *elem) { struct rxe_pool *pool = elem->pool; - unsigned long flags; int err; - write_lock_irqsave(&pool->pool_lock, flags); + write_lock_bh(&pool->pool_lock); err = __rxe_add_index_locked(elem); - write_unlock_irqrestore(&pool->pool_lock, flags); + write_unlock_bh(&pool->pool_lock); return err; } -void __rxe_drop_index_locked(struct rxe_pool_entry *elem) +void __rxe_drop_index_locked(struct rxe_pool_elem *elem) { struct rxe_pool *pool = elem->pool; @@ -320,32 +316,31 @@ void __rxe_drop_index_locked(struct rxe_pool_entry *elem) rb_erase(&elem->index_node, &pool->index.tree); } -void __rxe_drop_index(struct rxe_pool_entry *elem) +void __rxe_drop_index(struct rxe_pool_elem *elem) { struct rxe_pool *pool = elem->pool; - unsigned long flags; - write_lock_irqsave(&pool->pool_lock, flags); + write_lock_bh(&pool->pool_lock); __rxe_drop_index_locked(elem); - write_unlock_irqrestore(&pool->pool_lock, flags); + write_unlock_bh(&pool->pool_lock); } void *rxe_alloc_locked(struct rxe_pool *pool) { - const struct rxe_type_info *info = &rxe_type_info[pool->type]; - struct rxe_pool_entry *elem; - u8 *obj; + struct rxe_pool_elem *elem; + void *obj; if (atomic_inc_return(&pool->num_elem) > pool->max_elem) goto out_cnt; - obj = kzalloc(info->size, GFP_ATOMIC); + obj = kzalloc(pool->elem_size, GFP_ATOMIC); if (!obj) goto out_cnt; - elem = (struct rxe_pool_entry *)(obj + info->elem_offset); + elem = (struct rxe_pool_elem *)((u8 *)obj + pool->elem_offset); elem->pool = pool; + elem->obj = obj; kref_init(&elem->ref_cnt); return obj; @@ -357,20 +352,20 @@ out_cnt: void *rxe_alloc(struct rxe_pool *pool) { - const struct rxe_type_info *info = &rxe_type_info[pool->type]; - struct rxe_pool_entry *elem; - u8 *obj; + struct rxe_pool_elem *elem; + void *obj; if (atomic_inc_return(&pool->num_elem) > pool->max_elem) goto out_cnt; - obj = kzalloc(info->size, GFP_KERNEL); + obj = kzalloc(pool->elem_size, GFP_KERNEL); if (!obj) goto out_cnt; - elem = (struct rxe_pool_entry *)(obj + info->elem_offset); + elem = (struct rxe_pool_elem *)((u8 *)obj + pool->elem_offset); elem->pool = pool; + elem->obj = obj; kref_init(&elem->ref_cnt); return obj; @@ -380,12 +375,13 @@ out_cnt: return NULL; } -int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem) +int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem) { if (atomic_inc_return(&pool->num_elem) > pool->max_elem) goto out_cnt; elem->pool = pool; + elem->obj = (u8 *)elem - pool->elem_offset; kref_init(&elem->ref_cnt); return 0; @@ -397,17 +393,16 @@ out_cnt: void rxe_elem_release(struct kref *kref) { - struct rxe_pool_entry *elem = - container_of(kref, struct rxe_pool_entry, ref_cnt); + struct rxe_pool_elem *elem = + container_of(kref, struct rxe_pool_elem, ref_cnt); struct rxe_pool *pool = elem->pool; - const struct rxe_type_info *info = &rxe_type_info[pool->type]; - u8 *obj; + void *obj; if (pool->cleanup) pool->cleanup(elem); if (!(pool->flags & RXE_POOL_NO_ALLOC)) { - obj = (u8 *)elem - info->elem_offset; + obj = elem->obj; kfree(obj); } @@ -416,15 +411,14 @@ void rxe_elem_release(struct kref *kref) void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index) { - const struct rxe_type_info *info = &rxe_type_info[pool->type]; struct rb_node *node; - struct rxe_pool_entry *elem; - u8 *obj; + struct rxe_pool_elem *elem; + void *obj; node = pool->index.tree.rb_node; while (node) { - elem = rb_entry(node, struct rxe_pool_entry, index_node); + elem = rb_entry(node, struct rxe_pool_elem, index_node); if (elem->index > index) node = node->rb_left; @@ -436,7 +430,7 @@ void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index) if (node) { kref_get(&elem->ref_cnt); - obj = (u8 *)elem - info->elem_offset; + obj = elem->obj; } else { obj = NULL; } @@ -446,28 +440,26 @@ void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index) void *rxe_pool_get_index(struct rxe_pool *pool, u32 index) { - u8 *obj; - unsigned long flags; + void *obj; - read_lock_irqsave(&pool->pool_lock, flags); + read_lock_bh(&pool->pool_lock); obj = rxe_pool_get_index_locked(pool, index); - read_unlock_irqrestore(&pool->pool_lock, flags); + read_unlock_bh(&pool->pool_lock); return obj; } void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key) { - const struct rxe_type_info *info = &rxe_type_info[pool->type]; struct rb_node *node; - struct rxe_pool_entry *elem; - u8 *obj; + struct rxe_pool_elem *elem; + void *obj; int cmp; node = pool->key.tree.rb_node; while (node) { - elem = rb_entry(node, struct rxe_pool_entry, key_node); + elem = rb_entry(node, struct rxe_pool_elem, key_node); cmp = memcmp((u8 *)elem + pool->key.key_offset, key, pool->key.key_size); @@ -482,7 +474,7 @@ void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key) if (node) { kref_get(&elem->ref_cnt); - obj = (u8 *)elem - info->elem_offset; + obj = elem->obj; } else { obj = NULL; } @@ -492,12 +484,11 @@ void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key) void *rxe_pool_get_key(struct rxe_pool *pool, void *key) { - u8 *obj; - unsigned long flags; + void *obj; - read_lock_irqsave(&pool->pool_lock, flags); + read_lock_bh(&pool->pool_lock); obj = rxe_pool_get_key_locked(pool, key); - read_unlock_irqrestore(&pool->pool_lock, flags); + read_unlock_bh(&pool->pool_lock); return obj; } diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h index 8ecd9f870aea..214279310f4d 100644 --- a/drivers/infiniband/sw/rxe/rxe_pool.h +++ b/drivers/infiniband/sw/rxe/rxe_pool.h @@ -7,9 +7,6 @@ #ifndef RXE_POOL_H #define RXE_POOL_H -#define RXE_POOL_ALIGN (16) -#define RXE_POOL_CACHE_FLAGS (0) - enum rxe_pool_flags { RXE_POOL_INDEX = BIT(1), RXE_POOL_KEY = BIT(2), @@ -30,10 +27,9 @@ enum rxe_elem_type { RXE_NUM_TYPES, /* keep me last */ }; -struct rxe_pool_entry; - -struct rxe_pool_entry { +struct rxe_pool_elem { struct rxe_pool *pool; + void *obj; struct kref ref_cnt; struct list_head list; @@ -47,14 +43,16 @@ struct rxe_pool_entry { struct rxe_pool { struct rxe_dev *rxe; + const char *name; rwlock_t pool_lock; /* protects pool add/del/search */ - size_t elem_size; - void (*cleanup)(struct rxe_pool_entry *obj); + void (*cleanup)(struct rxe_pool_elem *obj); enum rxe_pool_flags flags; enum rxe_elem_type type; unsigned int max_elem; atomic_t num_elem; + size_t elem_size; + size_t elem_offset; /* only used if indexed */ struct { @@ -89,51 +87,51 @@ void *rxe_alloc_locked(struct rxe_pool *pool); void *rxe_alloc(struct rxe_pool *pool); /* connect already allocated object to pool */ -int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem); +int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem); -#define rxe_add_to_pool(pool, obj) __rxe_add_to_pool(pool, &(obj)->pelem) +#define rxe_add_to_pool(pool, obj) __rxe_add_to_pool(pool, &(obj)->elem) /* assign an index to an indexed object and insert object into * pool's rb tree holding and not holding the pool_lock */ -int __rxe_add_index_locked(struct rxe_pool_entry *elem); +int __rxe_add_index_locked(struct rxe_pool_elem *elem); -#define rxe_add_index_locked(obj) __rxe_add_index_locked(&(obj)->pelem) +#define rxe_add_index_locked(obj) __rxe_add_index_locked(&(obj)->elem) -int __rxe_add_index(struct rxe_pool_entry *elem); +int __rxe_add_index(struct rxe_pool_elem *elem); -#define rxe_add_index(obj) __rxe_add_index(&(obj)->pelem) +#define rxe_add_index(obj) __rxe_add_index(&(obj)->elem) /* drop an index and remove object from rb tree * holding and not holding the pool_lock */ -void __rxe_drop_index_locked(struct rxe_pool_entry *elem); +void __rxe_drop_index_locked(struct rxe_pool_elem *elem); -#define rxe_drop_index_locked(obj) __rxe_drop_index_locked(&(obj)->pelem) +#define rxe_drop_index_locked(obj) __rxe_drop_index_locked(&(obj)->elem) -void __rxe_drop_index(struct rxe_pool_entry *elem); +void __rxe_drop_index(struct rxe_pool_elem *elem); -#define rxe_drop_index(obj) __rxe_drop_index(&(obj)->pelem) +#define rxe_drop_index(obj) __rxe_drop_index(&(obj)->elem) /* assign a key to a keyed object and insert object into * pool's rb tree holding and not holding pool_lock */ -int __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key); +int __rxe_add_key_locked(struct rxe_pool_elem *elem, void *key); -#define rxe_add_key_locked(obj, key) __rxe_add_key_locked(&(obj)->pelem, key) +#define rxe_add_key_locked(obj, key) __rxe_add_key_locked(&(obj)->elem, key) -int __rxe_add_key(struct rxe_pool_entry *elem, void *key); +int __rxe_add_key(struct rxe_pool_elem *elem, void *key); -#define rxe_add_key(obj, key) __rxe_add_key(&(obj)->pelem, key) +#define rxe_add_key(obj, key) __rxe_add_key(&(obj)->elem, key) /* remove elem from rb tree holding and not holding the pool_lock */ -void __rxe_drop_key_locked(struct rxe_pool_entry *elem); +void __rxe_drop_key_locked(struct rxe_pool_elem *elem); -#define rxe_drop_key_locked(obj) __rxe_drop_key_locked(&(obj)->pelem) +#define rxe_drop_key_locked(obj) __rxe_drop_key_locked(&(obj)->elem) -void __rxe_drop_key(struct rxe_pool_entry *elem); +void __rxe_drop_key(struct rxe_pool_elem *elem); -#define rxe_drop_key(obj) __rxe_drop_key(&(obj)->pelem) +#define rxe_drop_key(obj) __rxe_drop_key(&(obj)->elem) /* lookup an indexed object from index holding and not holding the pool_lock. * takes a reference on object @@ -153,9 +151,9 @@ void *rxe_pool_get_key(struct rxe_pool *pool, void *key); void rxe_elem_release(struct kref *kref); /* take a reference on an object */ -#define rxe_add_ref(elem) kref_get(&(elem)->pelem.ref_cnt) +#define rxe_add_ref(obj) kref_get(&(obj)->elem.ref_cnt) /* drop a reference on an object */ -#define rxe_drop_ref(elem) kref_put(&(elem)->pelem.ref_cnt, rxe_elem_release) +#define rxe_drop_ref(obj) kref_put(&(obj)->elem.ref_cnt, rxe_elem_release) #endif /* RXE_POOL_H */ diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index 54b8711321c1..afe11f475b8c 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -167,7 +167,7 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp, qp->attr.path_mtu = 1; qp->mtu = ib_mtu_enum_to_int(qp->attr.path_mtu); - qpn = qp->pelem.index; + qpn = qp->elem.index; port = &rxe->port; switch (init->qp_type) { @@ -832,9 +832,9 @@ static void rxe_qp_do_cleanup(struct work_struct *work) } /* called when the last reference to the qp is dropped */ -void rxe_qp_cleanup(struct rxe_pool_entry *arg) +void rxe_qp_cleanup(struct rxe_pool_elem *elem) { - struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem); + struct rxe_qp *qp = container_of(elem, typeof(*qp), elem); execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work); } diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c index 6e6e023c1b45..a1b283dd2d4c 100644 --- a/drivers/infiniband/sw/rxe/rxe_queue.c +++ b/drivers/infiniband/sw/rxe/rxe_queue.c @@ -151,7 +151,6 @@ int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p, struct rxe_queue *new_q; unsigned int num_elem = *num_elem_p; int err; - unsigned long flags = 0, flags1; new_q = rxe_queue_init(q->rxe, &num_elem, elem_size, q->type); if (!new_q) @@ -165,17 +164,17 @@ int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p, goto err1; } - spin_lock_irqsave(consumer_lock, flags1); + spin_lock_bh(consumer_lock); if (producer_lock) { - spin_lock_irqsave(producer_lock, flags); + spin_lock_bh(producer_lock); err = resize_finish(q, new_q, num_elem); - spin_unlock_irqrestore(producer_lock, flags); + spin_unlock_bh(producer_lock); } else { err = resize_finish(q, new_q, num_elem); } - spin_unlock_irqrestore(consumer_lock, flags1); + spin_unlock_bh(consumer_lock); rxe_queue_cleanup(new_q); /* new/old dep on err */ if (err) diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index 0c9d2af15f3d..5eb89052dd66 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c @@ -110,7 +110,6 @@ void rnr_nak_timer(struct timer_list *t) static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp) { struct rxe_send_wqe *wqe; - unsigned long flags; struct rxe_queue *q = qp->sq.queue; unsigned int index = qp->req.wqe_index; unsigned int cons; @@ -124,25 +123,23 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp) /* check to see if we are drained; * state_lock used by requester and completer */ - spin_lock_irqsave(&qp->state_lock, flags); + spin_lock_bh(&qp->state_lock); do { if (qp->req.state != QP_STATE_DRAIN) { /* comp just finished */ - spin_unlock_irqrestore(&qp->state_lock, - flags); + spin_unlock_bh(&qp->state_lock); break; } if (wqe && ((index != cons) || (wqe->state != wqe_state_posted))) { /* comp not done yet */ - spin_unlock_irqrestore(&qp->state_lock, - flags); + spin_unlock_bh(&qp->state_lock); break; } qp->req.state = QP_STATE_DRAINED; - spin_unlock_irqrestore(&qp->state_lock, flags); + spin_unlock_bh(&qp->state_lock); if (qp->ibqp.event_handler) { struct ib_event ev; @@ -372,7 +369,6 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp, int pad = (-payload) & 0x3; int paylen; int solicited; - u16 pkey; u32 qp_num; int ack_req; @@ -404,8 +400,6 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp, (pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) == (RXE_WRITE_MASK | RXE_IMMDT_MASK)); - pkey = IB_DEFAULT_PKEY_FULL; - qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn : qp->attr.dest_qp_num; @@ -414,7 +408,7 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp, if (ack_req) qp->req.noack_pkts = 0; - bth_init(pkt, pkt->opcode, solicited, 0, pad, pkey, qp_num, + bth_init(pkt, pkt->opcode, solicited, 0, pad, IB_DEFAULT_PKEY_FULL, qp_num, ack_req, pkt->psn); /* init optional headers */ diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c index eb1c4c3b3a78..0c0721f04357 100644 --- a/drivers/infiniband/sw/rxe/rxe_srq.c +++ b/drivers/infiniband/sw/rxe/rxe_srq.c @@ -83,7 +83,7 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq, srq->ibsrq.event_handler = init->event_handler; srq->ibsrq.srq_context = init->srq_context; srq->limit = init->attr.srq_limit; - srq->srq_num = srq->pelem.index; + srq->srq_num = srq->elem.index; srq->rq.max_wr = init->attr.max_wr; srq->rq.max_sge = init->attr.max_sge; diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c index 6951fdcb31bf..0c4db5bb17d7 100644 --- a/drivers/infiniband/sw/rxe/rxe_task.c +++ b/drivers/infiniband/sw/rxe/rxe_task.c @@ -32,25 +32,24 @@ void rxe_do_task(struct tasklet_struct *t) { int cont; int ret; - unsigned long flags; struct rxe_task *task = from_tasklet(task, t, tasklet); - spin_lock_irqsave(&task->state_lock, flags); + spin_lock_bh(&task->state_lock); switch (task->state) { case TASK_STATE_START: task->state = TASK_STATE_BUSY; - spin_unlock_irqrestore(&task->state_lock, flags); + spin_unlock_bh(&task->state_lock); break; case TASK_STATE_BUSY: task->state = TASK_STATE_ARMED; fallthrough; case TASK_STATE_ARMED: - spin_unlock_irqrestore(&task->state_lock, flags); + spin_unlock_bh(&task->state_lock); return; default: - spin_unlock_irqrestore(&task->state_lock, flags); + spin_unlock_bh(&task->state_lock); pr_warn("%s failed with bad state %d\n", __func__, task->state); return; } @@ -59,7 +58,7 @@ void rxe_do_task(struct tasklet_struct *t) cont = 0; ret = task->func(task->arg); - spin_lock_irqsave(&task->state_lock, flags); + spin_lock_bh(&task->state_lock); switch (task->state) { case TASK_STATE_BUSY: if (ret) @@ -81,7 +80,7 @@ void rxe_do_task(struct tasklet_struct *t) pr_warn("%s failed with bad state %d\n", __func__, task->state); } - spin_unlock_irqrestore(&task->state_lock, flags); + spin_unlock_bh(&task->state_lock); } while (cont); task->ret = ret; @@ -106,7 +105,6 @@ int rxe_init_task(void *obj, struct rxe_task *task, void rxe_cleanup_task(struct rxe_task *task) { - unsigned long flags; bool idle; /* @@ -116,9 +114,9 @@ void rxe_cleanup_task(struct rxe_task *task) task->destroyed = true; do { - spin_lock_irqsave(&task->state_lock, flags); + spin_lock_bh(&task->state_lock); idle = (task->state == TASK_STATE_START); - spin_unlock_irqrestore(&task->state_lock, flags); + spin_unlock_bh(&task->state_lock); } while (!idle); tasklet_kill(&task->tasklet); diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 0aa0d7e52773..07ca169110bf 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -182,7 +182,7 @@ static int rxe_create_ah(struct ib_ah *ibah, /* create index > 0 */ rxe_add_index(ah); - ah->ah_num = ah->pelem.index; + ah->ah_num = ah->elem.index; if (uresp) { /* only if new user provider */ @@ -383,10 +383,9 @@ static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) { int err = 0; - unsigned long flags; struct rxe_srq *srq = to_rsrq(ibsrq); - spin_lock_irqsave(&srq->rq.producer_lock, flags); + spin_lock_bh(&srq->rq.producer_lock); while (wr) { err = post_one_recv(&srq->rq, wr); @@ -395,7 +394,7 @@ static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, wr = wr->next; } - spin_unlock_irqrestore(&srq->rq.producer_lock, flags); + spin_unlock_bh(&srq->rq.producer_lock); if (err) *bad_wr = wr; @@ -634,19 +633,18 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr, int err; struct rxe_sq *sq = &qp->sq; struct rxe_send_wqe *send_wqe; - unsigned long flags; int full; err = validate_send_wr(qp, ibwr, mask, length); if (err) return err; - spin_lock_irqsave(&qp->sq.sq_lock, flags); + spin_lock_bh(&qp->sq.sq_lock); full = queue_full(sq->queue, QUEUE_TYPE_TO_DRIVER); if (unlikely(full)) { - spin_unlock_irqrestore(&qp->sq.sq_lock, flags); + spin_unlock_bh(&qp->sq.sq_lock); return -ENOMEM; } @@ -655,7 +653,7 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr, queue_advance_producer(sq->queue, QUEUE_TYPE_TO_DRIVER); - spin_unlock_irqrestore(&qp->sq.sq_lock, flags); + spin_unlock_bh(&qp->sq.sq_lock); return 0; } @@ -735,7 +733,6 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, int err = 0; struct rxe_qp *qp = to_rqp(ibqp); struct rxe_rq *rq = &qp->rq; - unsigned long flags; if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) { *bad_wr = wr; @@ -749,7 +746,7 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, goto err1; } - spin_lock_irqsave(&rq->producer_lock, flags); + spin_lock_bh(&rq->producer_lock); while (wr) { err = post_one_recv(rq, wr); @@ -760,7 +757,7 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, wr = wr->next; } - spin_unlock_irqrestore(&rq->producer_lock, flags); + spin_unlock_bh(&rq->producer_lock); if (qp->resp.state == QP_STATE_ERROR) rxe_run_task(&qp->resp.task, 1); @@ -841,9 +838,8 @@ static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) int i; struct rxe_cq *cq = to_rcq(ibcq); struct rxe_cqe *cqe; - unsigned long flags; - spin_lock_irqsave(&cq->cq_lock, flags); + spin_lock_bh(&cq->cq_lock); for (i = 0; i < num_entries; i++) { cqe = queue_head(cq->queue, QUEUE_TYPE_FROM_DRIVER); if (!cqe) @@ -852,7 +848,7 @@ static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) memcpy(wc++, &cqe->ibwc, sizeof(*wc)); queue_advance_consumer(cq->queue, QUEUE_TYPE_FROM_DRIVER); } - spin_unlock_irqrestore(&cq->cq_lock, flags); + spin_unlock_bh(&cq->cq_lock); return i; } @@ -870,11 +866,10 @@ static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt) static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) { struct rxe_cq *cq = to_rcq(ibcq); - unsigned long irq_flags; int ret = 0; int empty; - spin_lock_irqsave(&cq->cq_lock, irq_flags); + spin_lock_bh(&cq->cq_lock); if (cq->notify != IB_CQ_NEXT_COMP) cq->notify = flags & IB_CQ_SOLICITED_MASK; @@ -883,7 +878,7 @@ static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !empty) ret = 1; - spin_unlock_irqrestore(&cq->cq_lock, irq_flags); + spin_unlock_bh(&cq->cq_lock); return ret; } diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h index 35e041450090..caf1ce118765 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.h +++ b/drivers/infiniband/sw/rxe/rxe_verbs.h @@ -35,17 +35,17 @@ static inline int psn_compare(u32 psn_a, u32 psn_b) struct rxe_ucontext { struct ib_ucontext ibuc; - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; }; struct rxe_pd { struct ib_pd ibpd; - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; }; struct rxe_ah { struct ib_ah ibah; - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; struct rxe_av av; bool is_user; int ah_num; @@ -60,7 +60,7 @@ struct rxe_cqe { struct rxe_cq { struct ib_cq ibcq; - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; struct rxe_queue *queue; spinlock_t cq_lock; u8 notify; @@ -95,7 +95,7 @@ struct rxe_rq { struct rxe_srq { struct ib_srq ibsrq; - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; struct rxe_pd *pd; struct rxe_rq rq; u32 srq_num; @@ -209,7 +209,7 @@ struct rxe_resp_info { struct rxe_qp { struct ib_qp ibqp; - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; struct ib_qp_attr attr; unsigned int valid; unsigned int mtu; @@ -309,7 +309,7 @@ static inline int rkey_is_mw(u32 rkey) } struct rxe_mr { - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; struct ib_mr ibmr; struct ib_umem *umem; @@ -342,7 +342,7 @@ enum rxe_mw_state { struct rxe_mw { struct ib_mw ibmw; - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; spinlock_t lock; enum rxe_mw_state state; struct rxe_qp *qp; /* Type 2 only */ @@ -354,7 +354,7 @@ struct rxe_mw { }; struct rxe_mc_grp { - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; spinlock_t mcg_lock; /* guard group */ struct rxe_dev *rxe; struct list_head qp_list; @@ -365,7 +365,7 @@ struct rxe_mc_grp { }; struct rxe_mc_elem { - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; struct list_head qp_list; struct list_head grp_list; struct rxe_qp *qp; @@ -484,6 +484,6 @@ static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw) int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name); -void rxe_mc_cleanup(struct rxe_pool_entry *arg); +void rxe_mc_cleanup(struct rxe_pool_elem *elem); #endif /* RXE_VERBS_H */ diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c index 1b36350601fa..a3dd2cb6d5c9 100644 --- a/drivers/infiniband/sw/siw/siw_verbs.c +++ b/drivers/infiniband/sw/siw/siw_verbs.c @@ -8,6 +8,7 @@ #include <linux/uaccess.h> #include <linux/vmalloc.h> #include <linux/xarray.h> +#include <net/addrconf.h> #include <rdma/iw_cm.h> #include <rdma/ib_verbs.h> @@ -155,7 +156,8 @@ int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr, attr->vendor_id = SIW_VENDOR_ID; attr->vendor_part_id = sdev->vendor_part_id; - memcpy(&attr->sys_image_guid, sdev->netdev->dev_addr, 6); + addrconf_addr_eui48((u8 *)&attr->sys_image_guid, + sdev->netdev->dev_addr); return 0; } @@ -660,7 +662,7 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr, kbuf += core_sge->length; core_sge++; } - sqe->sge[0].length = bytes > 0 ? bytes : 0; + sqe->sge[0].length = max(bytes, 0); sqe->num_sge = bytes > 0 ? 1 : 0; return bytes; diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c index 15c0077dd27e..e39709dee179 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c @@ -867,7 +867,7 @@ static struct rtrs_clt_sess *get_next_path_min_latency(struct path_it *it) struct rtrs_clt_sess *min_path = NULL; struct rtrs_clt *clt = it->clt; struct rtrs_clt_sess *sess; - ktime_t min_latency = INT_MAX; + ktime_t min_latency = KTIME_MAX; ktime_t latency; list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) { diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h index 42b177655560..f6fde06db4b4 100644 --- a/include/uapi/rdma/hns-abi.h +++ b/include/uapi/rdma/hns-abi.h @@ -77,10 +77,12 @@ enum hns_roce_qp_cap_flags { HNS_ROCE_QP_CAP_RQ_RECORD_DB = 1 << 0, HNS_ROCE_QP_CAP_SQ_RECORD_DB = 1 << 1, HNS_ROCE_QP_CAP_OWNER_DB = 1 << 2, + HNS_ROCE_QP_CAP_DIRECT_WQE = 1 << 5, }; struct hns_roce_ib_create_qp_resp { __aligned_u64 cap_flags; + __aligned_u64 dwqe_mmap_key; }; struct hns_roce_ib_alloc_ucontext_resp { |