aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/sw
diff options
context:
space:
mode:
authorBob Pearson <rpearsonhpe@gmail.com>2022-06-12 17:34:35 -0500
committerJason Gunthorpe <jgg@nvidia.com>2022-06-30 10:56:05 -0300
commitb54c2a25ace5aed21c9944b7605b623abd2ca99c (patch)
tree05b370a21202443fc9e37530ece51ed75313f926 /drivers/infiniband/sw
parentRDMA/rxe: Stop lookup of partially built objects (diff)
downloadlinux-dev-b54c2a25ace5aed21c9944b7605b623abd2ca99c.tar.xz
linux-dev-b54c2a25ace5aed21c9944b7605b623abd2ca99c.zip
RDMA/rxe: Convert read side locking to rcu
Use rcu_read_lock() for protecting read side operations in rxe_pool.c. Link: https://lore.kernel.org/r/20220612223434.31462-3-rpearsonhpe@gmail.com Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/sw')
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c24
1 files changed, 8 insertions, 16 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index 74eca7f33f3b..f50620f5a0a1 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -197,16 +197,15 @@ void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
{
struct rxe_pool_elem *elem;
struct xarray *xa = &pool->xa;
- unsigned long flags;
void *obj;
- xa_lock_irqsave(xa, flags);
+ rcu_read_lock();
elem = xa_load(xa, index);
if (elem && kref_get_unless_zero(&elem->ref_cnt))
obj = elem->obj;
else
obj = NULL;
- xa_unlock_irqrestore(xa, flags);
+ rcu_read_unlock();
return obj;
}
@@ -223,7 +222,6 @@ int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable)
struct rxe_pool *pool = elem->pool;
struct xarray *xa = &pool->xa;
static int timeout = RXE_POOL_TIMEOUT;
- unsigned long flags;
int ret, err = 0;
void *xa_ret;
@@ -233,9 +231,7 @@ int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable)
/* erase xarray entry to prevent looking up
* the pool elem from its index
*/
- xa_lock_irqsave(xa, flags);
- xa_ret = __xa_erase(xa, elem->index);
- xa_unlock_irqrestore(xa, flags);
+ xa_ret = xa_erase(xa, elem->index);
WARN_ON(xa_err(xa_ret));
/* if this is the last call to rxe_put complete the
@@ -280,7 +276,7 @@ int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable)
pool->cleanup(elem);
if (pool->type == RXE_TYPE_MR)
- kfree(elem->obj);
+ kfree_rcu(elem->obj);
atomic_dec(&pool->num_elem);
@@ -299,12 +295,8 @@ int __rxe_put(struct rxe_pool_elem *elem)
void __rxe_finalize(struct rxe_pool_elem *elem)
{
- struct xarray *xa = &elem->pool->xa;
- unsigned long flags;
- void *ret;
-
- xa_lock_irqsave(xa, flags);
- ret = __xa_store(&elem->pool->xa, elem->index, elem, GFP_KERNEL);
- xa_unlock_irqrestore(xa, flags);
- WARN_ON(xa_err(ret));
+ void *xa_ret;
+
+ xa_ret = xa_store(&elem->pool->xa, elem->index, elem, GFP_KERNEL);
+ WARN_ON(xa_err(xa_ret));
}