aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5/odp.c
diff options
context:
space:
mode:
authorAharon Landau <aharonl@nvidia.com>2023-01-26 00:28:03 +0200
committerJason Gunthorpe <jgg@nvidia.com>2023-01-27 13:04:09 -0400
commit18b1746bddf5e7f6b2618966596d9517172a5cd7 (patch)
tree51c73d271bdb57c1247bacdbe8167942d73446ce /drivers/infiniband/hw/mlx5/odp.c
parentRDMA/mlx5: Don't keep umrable 'page_shift' in cache entries (diff)
downloadlinux-18b1746bddf5e7f6b2618966596d9517172a5cd7.tar.xz
linux-18b1746bddf5e7f6b2618966596d9517172a5cd7.zip
RDMA/mlx5: Remove implicit ODP cache entry
Implicit ODP mkey doesn't have unique properties. It shares the same properties as the order 18 cache entry. There is no need to devote a special entry for that. Link: https://lore.kernel.org/r/20230125222807.6921-3-michaelgur@nvidia.com Signed-off-by: Aharon Landau <aharonl@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/hw/mlx5/odp.c')
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c20
1 files changed, 5 insertions, 15 deletions
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 8a78580a2a72..72044f8ec883 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -405,6 +405,7 @@ static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
unsigned long idx)
{
+ int order = order_base_2(MLX5_IMR_MTT_ENTRIES);
struct mlx5_ib_dev *dev = mr_to_mdev(imr);
struct ib_umem_odp *odp;
struct mlx5_ib_mr *mr;
@@ -417,7 +418,8 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
if (IS_ERR(odp))
return ERR_CAST(odp);
- mr = mlx5_mr_cache_alloc(dev, &dev->cache.ent[MLX5_IMR_MTT_CACHE_ENTRY],
+ BUILD_BUG_ON(order > MKEY_CACHE_LAST_STD_ENTRY);
+ mr = mlx5_mr_cache_alloc(dev, &dev->cache.ent[order],
imr->access_flags);
if (IS_ERR(mr)) {
ib_umem_odp_release(odp);
@@ -1591,20 +1593,8 @@ void mlx5_odp_init_mkey_cache_entry(struct mlx5_cache_ent *ent)
{
if (!(ent->dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
return;
-
- switch (ent->order - 2) {
- case MLX5_IMR_MTT_CACHE_ENTRY:
- ent->ndescs = MLX5_IMR_MTT_ENTRIES;
- ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
- ent->limit = 0;
- break;
-
- case MLX5_IMR_KSM_CACHE_ENTRY:
- ent->ndescs = mlx5_imr_ksm_entries;
- ent->access_mode = MLX5_MKC_ACCESS_MODE_KSM;
- ent->limit = 0;
- break;
- }
+ ent->ndescs = mlx5_imr_ksm_entries;
+ ent->access_mode = MLX5_MKC_ACCESS_MODE_KSM;
}
static const struct ib_device_ops mlx5_ib_dev_odp_ops = {