aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/greybus/connection.c
diff options
context:
space:
mode:
authorJeffrey Carlyle <jcarlyle@google.com>2016-06-24 08:58:56 +0530
committerGreg Kroah-Hartman <gregkh@google.com>2016-06-24 16:06:23 -0700
commit2d466c23c64f7556d0a184a1f02b2c8a23edaf5e (patch)
tree5aff4fe579f0035d1be7cc33d1fedee7cbc36b0a /drivers/staging/greybus/connection.c
parentgreybus: bootrom: Wait for 10 seconds for mode-switch (diff)
downloadlinux-dev-2d466c23c64f7556d0a184a1f02b2c8a23edaf5e.tar.xz
linux-dev-2d466c23c64f7556d0a184a1f02b2c8a23edaf5e.zip
greybus: connection: switch to using spin_lock_irqsave/spin_lock_irqrestore excluisvely
We know that it is a bad idea to explicitly enable IRQs when we don't don't know if they were already off before we disabled, so switch to the save _irqsave and _irqrestore functions. Ultimately, we need to review places in the Greybus drivers where IRQs are disabled and remove unnecessary instances. This is only an interim step. This code will never run from hard irq context, as it is already taking mutex in the path. Testing done: booted EVT2.0, ran suspend/resume test app with a period of 20s for a few dozen cycles. Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org> Signed-off-by: Jeffrey Carlyle <jcarlyle@google.com> Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Diffstat (limited to 'drivers/staging/greybus/connection.c')
-rw-r--r--drivers/staging/greybus/connection.c53
1 files changed, 30 insertions, 23 deletions
diff --git a/drivers/staging/greybus/connection.c b/drivers/staging/greybus/connection.c
index 810c61807c0a..20a87b9ee674 100644
--- a/drivers/staging/greybus/connection.c
+++ b/drivers/staging/greybus/connection.c
@@ -572,7 +572,7 @@ static int gb_connection_ping(struct gb_connection *connection)
* DISCONNECTING.
*/
static void gb_connection_cancel_operations(struct gb_connection *connection,
- int errno)
+ int errno, unsigned long *flags)
__must_hold(&connection->lock)
{
struct gb_operation *operation;
@@ -581,7 +581,7 @@ static void gb_connection_cancel_operations(struct gb_connection *connection,
operation = list_last_entry(&connection->operations,
struct gb_operation, links);
gb_operation_get(operation);
- spin_unlock_irq(&connection->lock);
+ spin_unlock_irqrestore(&connection->lock, *flags);
if (gb_operation_is_incoming(operation))
gb_operation_cancel_incoming(operation, errno);
@@ -590,7 +590,7 @@ static void gb_connection_cancel_operations(struct gb_connection *connection,
gb_operation_put(operation);
- spin_lock_irq(&connection->lock);
+ spin_lock_irqsave(&connection->lock, *flags);
}
}
@@ -601,7 +601,7 @@ static void gb_connection_cancel_operations(struct gb_connection *connection,
*/
static void
gb_connection_flush_incoming_operations(struct gb_connection *connection,
- int errno)
+ int errno, unsigned long *flags)
__must_hold(&connection->lock)
{
struct gb_operation *operation;
@@ -621,13 +621,13 @@ gb_connection_flush_incoming_operations(struct gb_connection *connection,
if (!incoming)
break;
- spin_unlock_irq(&connection->lock);
+ spin_unlock_irqrestore(&connection->lock, *flags);
/* FIXME: flush, not cancel? */
gb_operation_cancel_incoming(operation, errno);
gb_operation_put(operation);
- spin_lock_irq(&connection->lock);
+ spin_lock_irqsave(&connection->lock, *flags);
}
}
@@ -644,15 +644,16 @@ gb_connection_flush_incoming_operations(struct gb_connection *connection,
static int _gb_connection_enable(struct gb_connection *connection, bool rx)
{
int ret;
+ unsigned long flags;
/* Handle ENABLED_TX -> ENABLED transitions. */
if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) {
if (!(connection->handler && rx))
return 0;
- spin_lock_irq(&connection->lock);
+ spin_lock_irqsave(&connection->lock, flags);
connection->state = GB_CONNECTION_STATE_ENABLED;
- spin_unlock_irq(&connection->lock);
+ spin_unlock_irqrestore(&connection->lock, flags);
return 0;
}
@@ -669,12 +670,12 @@ static int _gb_connection_enable(struct gb_connection *connection, bool rx)
if (ret)
goto err_svc_connection_destroy;
- spin_lock_irq(&connection->lock);
+ spin_lock_irqsave(&connection->lock, flags);
if (connection->handler && rx)
connection->state = GB_CONNECTION_STATE_ENABLED;
else
connection->state = GB_CONNECTION_STATE_ENABLED_TX;
- spin_unlock_irq(&connection->lock);
+ spin_unlock_irqrestore(&connection->lock, flags);
ret = gb_connection_control_connected(connection);
if (ret)
@@ -685,10 +686,10 @@ static int _gb_connection_enable(struct gb_connection *connection, bool rx)
err_control_disconnecting:
gb_connection_control_disconnecting(connection);
- spin_lock_irq(&connection->lock);
+ spin_lock_irqsave(&connection->lock, flags);
connection->state = GB_CONNECTION_STATE_DISCONNECTING;
- gb_connection_cancel_operations(connection, -ESHUTDOWN);
- spin_unlock_irq(&connection->lock);
+ gb_connection_cancel_operations(connection, -ESHUTDOWN, &flags);
+ spin_unlock_irqrestore(&connection->lock, flags);
/* Transmit queue should already be empty. */
gb_connection_hd_cport_flush(connection);
@@ -754,16 +755,18 @@ EXPORT_SYMBOL_GPL(gb_connection_enable_tx);
void gb_connection_disable_rx(struct gb_connection *connection)
{
+ unsigned long flags;
+
mutex_lock(&connection->mutex);
- spin_lock_irq(&connection->lock);
+ spin_lock_irqsave(&connection->lock, flags);
if (connection->state != GB_CONNECTION_STATE_ENABLED) {
- spin_unlock_irq(&connection->lock);
+ spin_unlock_irqrestore(&connection->lock, flags);
goto out_unlock;
}
connection->state = GB_CONNECTION_STATE_ENABLED_TX;
- gb_connection_flush_incoming_operations(connection, -ESHUTDOWN);
- spin_unlock_irq(&connection->lock);
+ gb_connection_flush_incoming_operations(connection, -ESHUTDOWN, &flags);
+ spin_unlock_irqrestore(&connection->lock, flags);
trace_gb_connection_disable(connection);
@@ -786,6 +789,8 @@ void gb_connection_mode_switch_complete(struct gb_connection *connection)
void gb_connection_disable(struct gb_connection *connection)
{
+ unsigned long flags;
+
mutex_lock(&connection->mutex);
if (connection->state == GB_CONNECTION_STATE_DISABLED)
@@ -795,10 +800,10 @@ void gb_connection_disable(struct gb_connection *connection)
gb_connection_control_disconnecting(connection);
- spin_lock_irq(&connection->lock);
+ spin_lock_irqsave(&connection->lock, flags);
connection->state = GB_CONNECTION_STATE_DISCONNECTING;
- gb_connection_cancel_operations(connection, -ESHUTDOWN);
- spin_unlock_irq(&connection->lock);
+ gb_connection_cancel_operations(connection, -ESHUTDOWN, &flags);
+ spin_unlock_irqrestore(&connection->lock, flags);
gb_connection_hd_cport_flush(connection);
@@ -825,6 +830,8 @@ EXPORT_SYMBOL_GPL(gb_connection_disable);
/* Disable a connection without communicating with the remote end. */
void gb_connection_disable_forced(struct gb_connection *connection)
{
+ unsigned long flags;
+
mutex_lock(&connection->mutex);
if (connection->state == GB_CONNECTION_STATE_DISABLED)
@@ -832,10 +839,10 @@ void gb_connection_disable_forced(struct gb_connection *connection)
trace_gb_connection_disable(connection);
- spin_lock_irq(&connection->lock);
+ spin_lock_irqsave(&connection->lock, flags);
connection->state = GB_CONNECTION_STATE_DISABLED;
- gb_connection_cancel_operations(connection, -ESHUTDOWN);
- spin_unlock_irq(&connection->lock);
+ gb_connection_cancel_operations(connection, -ESHUTDOWN, &flags);
+ spin_unlock_irqrestore(&connection->lock, flags);
gb_connection_hd_cport_flush(connection);
gb_connection_hd_cport_features_disable(connection);