aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorHaggai Eran <haggaie@mellanox.com>2015-07-30 17:50:13 +0300
committerDoug Ledford <dledford@redhat.com>2015-08-30 15:48:15 -0400
commit5aa44bb90f047662c12c44be1b6de454658632d0 (patch)
treeef9f18b75a5794c055e5fa8c8fd3a38ae2d1e231 /drivers/infiniband
parentRDMA/Core: remove rdma_cap_read_multi_sge() helper (diff)
downloadlinux-dev-5aa44bb90f047662c12c44be1b6de454658632d0.tar.xz
linux-dev-5aa44bb90f047662c12c44be1b6de454658632d0.zip
IB/core: Add rwsem to allow reading device list or client list
Currently the RDMA subsystem's device list and client list are protected by a single mutex. This prevents adding user-facing APIs that iterate these lists, since using them may cause a deadlock. The patch attempts to solve this problem by adding a read-write semaphore to protect the lists. Readers now don't need the mutex, and are safe just by read-locking the semaphore. The ib_register_device, ib_register_client, ib_unregister_device, and ib_unregister_client functions are modified to lock the semaphore for write during their respective list modification. Also, in order to make sure client callbacks are called only between add() and remove() calls, the code is changed to only add items to the lists after the add() calls and remove from the lists before the remove() calls. This patch attempts to solve a similar need [1] that was seen in the RoCE v2 patch series. [1] http://www.spinics.net/lists/linux-rdma/msg24733.html Reviewed-by: Jason Gunthorpe <jgunthorpe@obsidianresearch.com> Cc: Matan Barak <matanb@mellanox.com> Signed-off-by: Haggai Eran <haggaie@mellanox.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/device.c40
1 files changed, 28 insertions, 12 deletions
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 9567756ca4f9..0c8fa781538b 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -55,17 +55,24 @@ struct ib_client_data {
struct workqueue_struct *ib_wq;
EXPORT_SYMBOL_GPL(ib_wq);
+/* The device_list and client_list contain devices and clients after their
+ * registration has completed, and the devices and clients are removed
+ * during unregistration. */
static LIST_HEAD(device_list);
static LIST_HEAD(client_list);
/*
- * device_mutex protects access to both device_list and client_list.
- * There's no real point to using multiple locks or something fancier
- * like an rwsem: we always access both lists, and we're always
- * modifying one list or the other list. In any case this is not a
- * hot path so there's no point in trying to optimize.
+ * device_mutex and lists_rwsem protect access to both device_list and
+ * client_list. device_mutex protects writer access by device and client
+ * registration / de-registration. lists_rwsem protects reader access to
+ * these lists. Iterators of these lists must lock it for read, while updates
+ * to the lists must be done with a write lock. A special case is when the
+ * device_mutex is locked. In this case locking the lists for read access is
+ * not necessary as the device_mutex implies it.
*/
static DEFINE_MUTEX(device_mutex);
+static DECLARE_RWSEM(lists_rwsem);
+
static int ib_device_check_mandatory(struct ib_device *device)
{
@@ -305,8 +312,6 @@ int ib_register_device(struct ib_device *device,
goto out;
}
- list_add_tail(&device->core_list, &device_list);
-
device->reg_state = IB_DEV_REGISTERED;
{
@@ -317,7 +322,10 @@ int ib_register_device(struct ib_device *device,
client->add(device);
}
- out:
+ down_write(&lists_rwsem);
+ list_add_tail(&device->core_list, &device_list);
+ up_write(&lists_rwsem);
+out:
mutex_unlock(&device_mutex);
return ret;
}
@@ -337,12 +345,14 @@ void ib_unregister_device(struct ib_device *device)
mutex_lock(&device_mutex);
+ down_write(&lists_rwsem);
+ list_del(&device->core_list);
+ up_write(&lists_rwsem);
+
list_for_each_entry_reverse(client, &client_list, list)
if (client->remove)
client->remove(device);
- list_del(&device->core_list);
-
mutex_unlock(&device_mutex);
ib_device_unregister_sysfs(device);
@@ -375,11 +385,14 @@ int ib_register_client(struct ib_client *client)
mutex_lock(&device_mutex);
- list_add_tail(&client->list, &client_list);
list_for_each_entry(device, &device_list, core_list)
if (client->add && !add_client_context(device, client))
client->add(device);
+ down_write(&lists_rwsem);
+ list_add_tail(&client->list, &client_list);
+ up_write(&lists_rwsem);
+
mutex_unlock(&device_mutex);
return 0;
@@ -402,6 +415,10 @@ void ib_unregister_client(struct ib_client *client)
mutex_lock(&device_mutex);
+ down_write(&lists_rwsem);
+ list_del(&client->list);
+ up_write(&lists_rwsem);
+
list_for_each_entry(device, &device_list, core_list) {
if (client->remove)
client->remove(device);
@@ -414,7 +431,6 @@ void ib_unregister_client(struct ib_client *client)
}
spin_unlock_irqrestore(&device->client_data_lock, flags);
}
- list_del(&client->list);
mutex_unlock(&device_mutex);
}