aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5/mr.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2015-10-08 09:16:33 +0100
committerChristoph Hellwig <hch@lst.de>2015-10-08 11:09:10 +0100
commite622f2f4ad2142d2a613a57fb85f8cf737935ef5 (patch)
tree19fa458bcaacf3f8b2f5e40676f748afc3df1e84 /drivers/infiniband/hw/mlx5/mr.c
parentIB/cma: Accept connection without a valid netdev on RoCE (diff)
downloadlinux-dev-e622f2f4ad2142d2a613a57fb85f8cf737935ef5.tar.xz
linux-dev-e622f2f4ad2142d2a613a57fb85f8cf737935ef5.zip
IB: split struct ib_send_wr
This patch split up struct ib_send_wr so that all non-trivial verbs use their own structure which embedds struct ib_send_wr. This dramaticly shrinks the size of a WR for most common operations: sizeof(struct ib_send_wr) (old): 96 sizeof(struct ib_send_wr): 48 sizeof(struct ib_rdma_wr): 64 sizeof(struct ib_atomic_wr): 96 sizeof(struct ib_ud_wr): 88 sizeof(struct ib_fast_reg_wr): 88 sizeof(struct ib_bind_mw_wr): 96 sizeof(struct ib_sig_handover_wr): 80 And with Sagi's pending MR rework the fast registration WR will also be down to a reasonable size: sizeof(struct ib_fastreg_wr): 64 Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Bart Van Assche <bart.vanassche@sandisk.com> [srp, srpt] Reviewed-by: Chuck Lever <chuck.lever@oracle.com> [sunrpc] Tested-by: Haggai Eran <haggaie@mellanox.com> Tested-by: Sagi Grimberg <sagig@mellanox.com> Tested-by: Steve Wise <swise@opengridcomputing.com>
Diffstat (limited to 'drivers/infiniband/hw/mlx5/mr.c')
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c52
1 files changed, 27 insertions, 25 deletions
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 54a15b5d336d..b30d4ae0fb61 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -687,7 +687,7 @@ static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
int access_flags)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
- struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg;
+ struct mlx5_umr_wr *umrwr = umr_wr(wr);
sg->addr = dma;
sg->length = ALIGN(sizeof(u64) * n, 64);
@@ -715,7 +715,7 @@ static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
struct ib_send_wr *wr, u32 key)
{
- struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg;
+ struct mlx5_umr_wr *umrwr = umr_wr(wr);
wr->send_flags = MLX5_IB_SEND_UMR_UNREG | MLX5_IB_SEND_UMR_FAIL_IF_FREE;
wr->opcode = MLX5_IB_WR_UMR;
@@ -752,7 +752,8 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
struct device *ddev = dev->ib_dev.dma_device;
struct umr_common *umrc = &dev->umrc;
struct mlx5_ib_umr_context umr_context;
- struct ib_send_wr wr, *bad;
+ struct mlx5_umr_wr umrwr;
+ struct ib_send_wr *bad;
struct mlx5_ib_mr *mr;
struct ib_sge sg;
int size;
@@ -798,14 +799,14 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
goto free_pas;
}
- memset(&wr, 0, sizeof(wr));
- wr.wr_id = (u64)(unsigned long)&umr_context;
- prep_umr_reg_wqe(pd, &wr, &sg, dma, npages, mr->mmr.key, page_shift,
- virt_addr, len, access_flags);
+ memset(&umrwr, 0, sizeof(umrwr));
+ umrwr.wr.wr_id = (u64)(unsigned long)&umr_context;
+ prep_umr_reg_wqe(pd, &umrwr.wr, &sg, dma, npages, mr->mmr.key,
+ page_shift, virt_addr, len, access_flags);
mlx5_ib_init_umr_context(&umr_context);
down(&umrc->sem);
- err = ib_post_send(umrc->qp, &wr, &bad);
+ err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
if (err) {
mlx5_ib_warn(dev, "post send failed, err %d\n", err);
goto unmap_dma;
@@ -851,8 +852,8 @@ int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
int size;
__be64 *pas;
dma_addr_t dma;
- struct ib_send_wr wr, *bad;
- struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr.wr.fast_reg;
+ struct ib_send_wr *bad;
+ struct mlx5_umr_wr wr;
struct ib_sge sg;
int err = 0;
const int page_index_alignment = MLX5_UMR_MTT_ALIGNMENT / sizeof(u64);
@@ -917,26 +918,26 @@ int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
memset(&wr, 0, sizeof(wr));
- wr.wr_id = (u64)(unsigned long)&umr_context;
+ wr.wr.wr_id = (u64)(unsigned long)&umr_context;
sg.addr = dma;
sg.length = ALIGN(npages * sizeof(u64),
MLX5_UMR_MTT_ALIGNMENT);
sg.lkey = dev->umrc.pd->local_dma_lkey;
- wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE |
+ wr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE |
MLX5_IB_SEND_UMR_UPDATE_MTT;
- wr.sg_list = &sg;
- wr.num_sge = 1;
- wr.opcode = MLX5_IB_WR_UMR;
- umrwr->npages = sg.length / sizeof(u64);
- umrwr->page_shift = PAGE_SHIFT;
- umrwr->mkey = mr->mmr.key;
- umrwr->target.offset = start_page_index;
+ wr.wr.sg_list = &sg;
+ wr.wr.num_sge = 1;
+ wr.wr.opcode = MLX5_IB_WR_UMR;
+ wr.npages = sg.length / sizeof(u64);
+ wr.page_shift = PAGE_SHIFT;
+ wr.mkey = mr->mmr.key;
+ wr.target.offset = start_page_index;
mlx5_ib_init_umr_context(&umr_context);
down(&umrc->sem);
- err = ib_post_send(umrc->qp, &wr, &bad);
+ err = ib_post_send(umrc->qp, &wr.wr, &bad);
if (err) {
mlx5_ib_err(dev, "UMR post send failed, err %d\n", err);
} else {
@@ -1122,16 +1123,17 @@ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
struct umr_common *umrc = &dev->umrc;
struct mlx5_ib_umr_context umr_context;
- struct ib_send_wr wr, *bad;
+ struct mlx5_umr_wr umrwr;
+ struct ib_send_wr *bad;
int err;
- memset(&wr, 0, sizeof(wr));
- wr.wr_id = (u64)(unsigned long)&umr_context;
- prep_umr_unreg_wqe(dev, &wr, mr->mmr.key);
+ memset(&umrwr.wr, 0, sizeof(umrwr));
+ umrwr.wr.wr_id = (u64)(unsigned long)&umr_context;
+ prep_umr_unreg_wqe(dev, &umrwr.wr, mr->mmr.key);
mlx5_ib_init_umr_context(&umr_context);
down(&umrc->sem);
- err = ib_post_send(umrc->qp, &wr, &bad);
+ err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
if (err) {
up(&umrc->sem);
mlx5_ib_dbg(dev, "err %d\n", err);