aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/greybus/connection.c
diff options
context:
space:
mode:
authorViresh Kumar <viresh.kumar@linaro.org>2016-06-23 23:23:06 +0530
committerGreg Kroah-Hartman <gregkh@google.com>2016-06-23 15:30:45 -0700
commit6f7f2ae5df786bf9ced3247fda51a0a7aeb9cd0c (patch)
tree16168a18dd7d0ae0f3c081079cf7e81853c0bdec /drivers/staging/greybus/connection.c
parentgreybus: es2.c: don't use spin_lock_irq() (diff)
downloadlinux-dev-6f7f2ae5df786bf9ced3247fda51a0a7aeb9cd0c.tar.xz
linux-dev-6f7f2ae5df786bf9ced3247fda51a0a7aeb9cd0c.zip
greybus: gb_connections_lock: don't use spin_lock_irq()
spin_[un]lock_irq() routines should be used carefully as they things can go wrong, if they are mixed with spin_lock_irqsave() or other variants. The main problem is that spin_[un]lock_irq() routines doesn't check if the IRQs are already disabled/enabled on the local CPU and so spin_unlock_irq() will forcefully enable interrupts for example. This may not work well, if some other code was relying on interrupts being disabled. Use spin_lock_irqsave() and spin_unlock_restore() instead. This patch doesn't claim that it fixes the JIRA completely, but the issue was harder to reproduce for some iterations after this, which was quite easy to reproduce earlier on. Tested on EVT 2.0 with lots of debug patches to kernel and greybus. Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org> Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Diffstat (limited to 'drivers/staging/greybus/connection.c')
-rw-r--r--drivers/staging/greybus/connection.c11
1 files changed, 7 insertions, 4 deletions
diff --git a/drivers/staging/greybus/connection.c b/drivers/staging/greybus/connection.c
index 3a17db91a167..810c61807c0a 100644
--- a/drivers/staging/greybus/connection.c
+++ b/drivers/staging/greybus/connection.c
@@ -150,6 +150,7 @@ _gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
unsigned long flags)
{
struct gb_connection *connection;
+ unsigned long irqflags;
int ret;
mutex_lock(&gb_connection_mutex);
@@ -200,7 +201,7 @@ _gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
gb_connection_init_name(connection);
- spin_lock_irq(&gb_connections_lock);
+ spin_lock_irqsave(&gb_connections_lock, irqflags);
list_add(&connection->hd_links, &hd->connections);
if (bundle)
@@ -208,7 +209,7 @@ _gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
else
INIT_LIST_HEAD(&connection->bundle_links);
- spin_unlock_irq(&gb_connections_lock);
+ spin_unlock_irqrestore(&gb_connections_lock, irqflags);
mutex_unlock(&gb_connection_mutex);
@@ -849,6 +850,8 @@ EXPORT_SYMBOL_GPL(gb_connection_disable_forced);
/* Caller must have disabled the connection before destroying it. */
void gb_connection_destroy(struct gb_connection *connection)
{
+ unsigned long flags;
+
if (!connection)
return;
@@ -857,10 +860,10 @@ void gb_connection_destroy(struct gb_connection *connection)
mutex_lock(&gb_connection_mutex);
- spin_lock_irq(&gb_connections_lock);
+ spin_lock_irqsave(&gb_connections_lock, flags);
list_del(&connection->bundle_links);
list_del(&connection->hd_links);
- spin_unlock_irq(&gb_connections_lock);
+ spin_unlock_irqrestore(&gb_connections_lock, flags);
destroy_workqueue(connection->wq);