aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5
diff options
context:
space:
mode:
authorMaor Gottlieb <maorg@mellanox.com>2016-10-27 16:36:37 +0300
committerDoug Ledford <dledford@redhat.com>2016-11-16 20:04:48 -0500
commit578e72647ba5a77a09004606cb572f0881c34e0d (patch)
treef965fa1ea025d28079459a0dd704da439a8de823 /drivers/infiniband/hw/mlx5
parentIB/mlx5: Replace numerical constant with predefined MACRO (diff)
downloadlinux-dev-578e72647ba5a77a09004606cb572f0881c34e0d.tar.xz
linux-dev-578e72647ba5a77a09004606cb572f0881c34e0d.zip
IB/mlx5: Fix atomic cap in indirect UMR
Remove from the driver the limitation imposed by firmware check to not allow change of atomic permissions for indirect UMRs. In order to avoid failures on old firmware, we only ask for change of atomic permissions if atomic operations are supported. Fixes: 968e78dd9644 ('IB/mlx5: Enhance UMR support to allow partial page table update') Signed-off-by: Eli Cohen <eli@mellanox.com> Signed-off-by: Maor Gottlieb <maorg@mellanox.com> Signed-off-by: Leon Romanovsky <leon@kernel.org> Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/infiniband/hw/mlx5')
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c12
1 files changed, 7 insertions, 5 deletions
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 00cffbfe6c35..5bdf20c676fe 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -3071,7 +3071,7 @@ static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
umr->flags = MLX5_UMR_INLINE;
}
-static __be64 get_umr_reg_mr_mask(void)
+static __be64 get_umr_reg_mr_mask(int atomic)
{
u64 result;
@@ -3084,9 +3084,11 @@ static __be64 get_umr_reg_mr_mask(void)
MLX5_MKEY_MASK_KEY |
MLX5_MKEY_MASK_RR |
MLX5_MKEY_MASK_RW |
- MLX5_MKEY_MASK_A |
MLX5_MKEY_MASK_FREE;
+ if (atomic)
+ result |= MLX5_MKEY_MASK_A;
+
return cpu_to_be64(result);
}
@@ -3147,7 +3149,7 @@ static __be64 get_umr_update_pd_mask(void)
}
static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
- struct ib_send_wr *wr)
+ struct ib_send_wr *wr, int atomic)
{
struct mlx5_umr_wr *umrwr = umr_wr(wr);
@@ -3172,7 +3174,7 @@ static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD)
umr->mkey_mask |= get_umr_update_pd_mask();
if (!umr->mkey_mask)
- umr->mkey_mask = get_umr_reg_mr_mask();
+ umr->mkey_mask = get_umr_reg_mr_mask(atomic);
} else {
umr->mkey_mask = get_umr_unreg_mr_mask();
}
@@ -4025,7 +4027,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
ctrl->imm = cpu_to_be32(umr_wr(wr)->mkey);
- set_reg_umr_segment(seg, wr);
+ set_reg_umr_segment(seg, wr, !!(MLX5_CAP_GEN(mdev, atomic)));
seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
if (unlikely((seg == qend)))