aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5/odp.c
diff options
context:
space:
mode:
authorMichael Guralnik <michaelgur@nvidia.com>2023-01-26 00:28:04 +0200
committerJason Gunthorpe <jgg@nvidia.com>2023-01-27 13:04:09 -0400
commitb9584517832858a0f78d6851d09b697a829514cd (patch)
treeedbd61efcb53909a45bb403674a286f5391148bd /drivers/infiniband/hw/mlx5/odp.c
parentRDMA/mlx5: Remove implicit ODP cache entry (diff)
downloadlinux-b9584517832858a0f78d6851d09b697a829514cd.tar.xz
linux-b9584517832858a0f78d6851d09b697a829514cd.zip
RDMA/mlx5: Change the cache structure to an RB-tree
Currently, the cache structure is a static linear array. Therefore, his size is limited to the number of entries in it and is not expandable. The entries are dedicated to mkeys of size 2^x and no access_flags. Mkeys with different properties are not cacheable. In this patch, we change the cache structure to an RB-tree. This will allow to extend the cache to support more entries with different mkey properties. Link: https://lore.kernel.org/r/20230125222807.6921-4-michaelgur@nvidia.com Signed-off-by: Michael Guralnik <michaelgur@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/hw/mlx5/odp.c')
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c8
1 files changed, 3 insertions, 5 deletions
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 72044f8ec883..71c3c611e10a 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -419,8 +419,7 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
return ERR_CAST(odp);
BUILD_BUG_ON(order > MKEY_CACHE_LAST_STD_ENTRY);
- mr = mlx5_mr_cache_alloc(dev, &dev->cache.ent[order],
- imr->access_flags);
+ mr = mlx5_mr_cache_alloc_order(dev, order, imr->access_flags);
if (IS_ERR(mr)) {
ib_umem_odp_release(odp);
return mr;
@@ -494,9 +493,8 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
if (IS_ERR(umem_odp))
return ERR_CAST(umem_odp);
- imr = mlx5_mr_cache_alloc(dev,
- &dev->cache.ent[MLX5_IMR_KSM_CACHE_ENTRY],
- access_flags);
+ imr = mlx5_mr_cache_alloc_order(dev, MLX5_IMR_KSM_CACHE_ENTRY,
+ access_flags);
if (IS_ERR(imr)) {
ib_umem_odp_release(umem_odp);
return imr;