aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core/cm.c
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@nvidia.com>2020-11-04 17:40:59 -0400
committerJason Gunthorpe <jgg@nvidia.com>2020-11-12 12:31:27 -0400
commiteb73060b971aa04e4f7421b8c9c0363918608b72 (patch)
tree0949b05a44dfdb947ab29e1dda7772bbd9d57135 /drivers/infiniband/core/cm.c
parentLinux 5.10-rc3 (diff)
downloadlinux-dev-eb73060b971aa04e4f7421b8c9c0363918608b72.tar.xz
linux-dev-eb73060b971aa04e4f7421b8c9c0363918608b72.zip
RDMA/cm: Make the local_id_table xarray non-irq
The xarray is never mutated from an IRQ handler, only from work queues under a spinlock_irq. Thus there is no reason for it be an IRQ type xarray. This was copied over from the original IDR code, but the recent rework put the xarray inside another spinlock_irq which will unbalance the unlocking. Fixes: c206f8bad15d ("RDMA/cm: Make it clearer how concurrency works in cm_req_handler()") Link: https://lore.kernel.org/r/0-v1-808b6da3bd3f+1857-cm_xarray_no_irq_jgg@nvidia.com Reported-by: Matthew Wilcox <willy@infradead.org> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/core/cm.c')
-rw-r--r--drivers/infiniband/core/cm.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 5740d1ba3568..012156624b82 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -859,8 +859,8 @@ static struct cm_id_private *cm_alloc_id_priv(struct ib_device *device,
atomic_set(&cm_id_priv->work_count, -1);
refcount_set(&cm_id_priv->refcount, 1);
- ret = xa_alloc_cyclic_irq(&cm.local_id_table, &id, NULL, xa_limit_32b,
- &cm.local_id_next, GFP_KERNEL);
+ ret = xa_alloc_cyclic(&cm.local_id_table, &id, NULL, xa_limit_32b,
+ &cm.local_id_next, GFP_KERNEL);
if (ret < 0)
goto error;
cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
@@ -878,8 +878,8 @@ error:
*/
static void cm_finalize_id(struct cm_id_private *cm_id_priv)
{
- xa_store_irq(&cm.local_id_table, cm_local_id(cm_id_priv->id.local_id),
- cm_id_priv, GFP_KERNEL);
+ xa_store(&cm.local_id_table, cm_local_id(cm_id_priv->id.local_id),
+ cm_id_priv, GFP_ATOMIC);
}
struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
@@ -1169,7 +1169,7 @@ retest:
spin_unlock(&cm.lock);
spin_unlock_irq(&cm_id_priv->lock);
- xa_erase_irq(&cm.local_id_table, cm_local_id(cm_id->local_id));
+ xa_erase(&cm.local_id_table, cm_local_id(cm_id->local_id));
cm_deref_id(cm_id_priv);
wait_for_completion(&cm_id_priv->comp);
while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
@@ -4482,7 +4482,7 @@ static int __init ib_cm_init(void)
cm.remote_id_table = RB_ROOT;
cm.remote_qp_table = RB_ROOT;
cm.remote_sidr_table = RB_ROOT;
- xa_init_flags(&cm.local_id_table, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
+ xa_init_flags(&cm.local_id_table, XA_FLAGS_ALLOC);
get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
INIT_LIST_HEAD(&cm.timewait_list);