aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5/mr.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mlx5/mr.c')
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c101
1 files changed, 52 insertions, 49 deletions
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index ea8bfc3e2d8d..6fa0a83c19de 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -147,7 +147,7 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
break;
}
mr->order = ent->order;
- mr->allocated_from_cache = 1;
+ mr->allocated_from_cache = true;
mr->dev = dev;
MLX5_SET(mkc, mkc, free, 1);
@@ -661,12 +661,21 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
struct ib_pd *pd)
{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+
MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
MLX5_SET(mkc, mkc, lr, 1);
+ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
+ MLX5_SET(mkc, mkc, relaxed_ordering_write,
+ !!(acc & IB_ACCESS_RELAXED_ORDERING));
+ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
+ MLX5_SET(mkc, mkc, relaxed_ordering_read,
+ !!(acc & IB_ACCESS_RELAXED_ORDERING));
+
MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
MLX5_SET(mkc, mkc, qpn, 0xffffff);
MLX5_SET64(mkc, mkc, start_addr, start_addr);
@@ -737,10 +746,9 @@ static int mr_cache_max_order(struct mlx5_ib_dev *dev)
return MLX5_MAX_UMR_SHIFT;
}
-static int mr_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
- u64 start, u64 length, int access_flags,
- struct ib_umem **umem, int *npages, int *page_shift,
- int *ncont, int *order)
+static int mr_umem_get(struct mlx5_ib_dev *dev, u64 start, u64 length,
+ int access_flags, struct ib_umem **umem, int *npages,
+ int *page_shift, int *ncont, int *order)
{
struct ib_umem *u;
@@ -749,7 +757,7 @@ static int mr_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
if (access_flags & IB_ACCESS_ON_DEMAND) {
struct ib_umem_odp *odp;
- odp = ib_umem_odp_get(udata, start, length, access_flags,
+ odp = ib_umem_odp_get(&dev->ib_dev, start, length, access_flags,
&mlx5_mn_ops);
if (IS_ERR(odp)) {
mlx5_ib_dbg(dev, "umem get failed (%ld)\n",
@@ -765,7 +773,7 @@ static int mr_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
if (order)
*order = ilog2(roundup_pow_of_two(*ncont));
} else {
- u = ib_umem_get(udata, start, length, access_flags);
+ u = ib_umem_get(&dev->ib_dev, start, length, access_flags);
if (IS_ERR(u)) {
mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(u));
return PTR_ERR(u);
@@ -868,36 +876,6 @@ static struct mlx5_ib_mr *alloc_mr_from_cache(
return mr;
}
-static inline int populate_xlt(struct mlx5_ib_mr *mr, int idx, int npages,
- void *xlt, int page_shift, size_t size,
- int flags)
-{
- struct mlx5_ib_dev *dev = mr->dev;
- struct ib_umem *umem = mr->umem;
-
- if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
- if (!umr_can_use_indirect_mkey(dev))
- return -EPERM;
- mlx5_odp_populate_klm(xlt, idx, npages, mr, flags);
- return npages;
- }
-
- npages = min_t(size_t, npages, ib_umem_num_pages(umem) - idx);
-
- if (!(flags & MLX5_IB_UPD_XLT_ZAP)) {
- __mlx5_ib_populate_pas(dev, umem, page_shift,
- idx, npages, xlt,
- MLX5_IB_MTT_PRESENT);
- /* Clear padding after the pages
- * brought from the umem.
- */
- memset(xlt + (npages * sizeof(struct mlx5_mtt)), 0,
- size - npages * sizeof(struct mlx5_mtt));
- }
-
- return npages;
-}
-
#define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \
MLX5_UMR_MTT_ALIGNMENT)
#define MLX5_SPARE_UMR_CHUNK 0x10000
@@ -921,6 +899,7 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
size_t pages_mapped = 0;
size_t pages_to_map = 0;
size_t pages_iter = 0;
+ size_t size_to_map = 0;
gfp_t gfp;
bool use_emergency_page = false;
@@ -967,6 +946,15 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
goto free_xlt;
}
+ if (mr->umem->is_odp) {
+ if (!(flags & MLX5_IB_UPD_XLT_INDIRECT)) {
+ struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+ size_t max_pages = ib_umem_odp_num_pages(odp) - idx;
+
+ pages_to_map = min_t(size_t, pages_to_map, max_pages);
+ }
+ }
+
sg.addr = dma;
sg.lkey = dev->umrc.pd->local_dma_lkey;
@@ -989,14 +977,22 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
pages_mapped < pages_to_map && !err;
pages_mapped += pages_iter, idx += pages_iter) {
npages = min_t(int, pages_iter, pages_to_map - pages_mapped);
+ size_to_map = npages * desc_size;
dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
- npages = populate_xlt(mr, idx, npages, xlt,
- page_shift, size, flags);
-
+ if (mr->umem->is_odp) {
+ mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags);
+ } else {
+ __mlx5_ib_populate_pas(dev, mr->umem, page_shift, idx,
+ npages, xlt,
+ MLX5_IB_MTT_PRESENT);
+ /* Clear padding after the pages
+ * brought from the umem.
+ */
+ memset(xlt + size_to_map, 0, size - size_to_map);
+ }
dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
- sg.length = ALIGN(npages * desc_size,
- MLX5_UMR_MTT_ALIGNMENT);
+ sg.length = ALIGN(size_to_map, MLX5_UMR_MTT_ALIGNMENT);
if (pages_mapped + pages_iter >= pages_to_map) {
if (flags & MLX5_IB_UPD_XLT_ENABLE)
@@ -1075,6 +1071,12 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
MLX5_SET(mkc, mkc, free, !populate);
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
+ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
+ MLX5_SET(mkc, mkc, relaxed_ordering_write,
+ !!(access_flags & IB_ACCESS_RELAXED_ORDERING));
+ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
+ MLX5_SET(mkc, mkc, relaxed_ordering_read,
+ !!(access_flags & IB_ACCESS_RELAXED_ORDERING));
MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
@@ -1247,6 +1249,8 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && !start &&
length == U64_MAX) {
+ if (virt_addr != start)
+ return ERR_PTR(-EINVAL);
if (!(access_flags & IB_ACCESS_ON_DEMAND) ||
!(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
return ERR_PTR(-EINVAL);
@@ -1257,13 +1261,13 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return &mr->ibmr;
}
- err = mr_umem_get(dev, udata, start, length, access_flags, &umem,
+ err = mr_umem_get(dev, start, length, access_flags, &umem,
&npages, &page_shift, &ncont, &order);
if (err < 0)
return ERR_PTR(err);
- use_umr = mlx5_ib_can_use_umr(dev, true);
+ use_umr = mlx5_ib_can_use_umr(dev, true, access_flags);
if (order <= mr_cache_max_order(dev) && use_umr) {
mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
@@ -1424,14 +1428,13 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
flags |= IB_MR_REREG_TRANS;
ib_umem_release(mr->umem);
mr->umem = NULL;
- err = mr_umem_get(dev, udata, addr, len, access_flags,
- &mr->umem, &npages, &page_shift, &ncont,
- &order);
+ err = mr_umem_get(dev, addr, len, access_flags, &mr->umem,
+ &npages, &page_shift, &ncont, &order);
if (err)
goto err;
}
- if (!mlx5_ib_can_use_umr(dev, true) ||
+ if (!mlx5_ib_can_use_umr(dev, true, access_flags) ||
(flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len))) {
/*
* UMR can't be used - MKey needs to be replaced.
@@ -1452,7 +1455,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
goto err;
}
- mr->allocated_from_cache = 0;
+ mr->allocated_from_cache = false;
} else {
/*
* Send a UMR WQE