aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ipa/ipa_clock.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ipa/ipa_clock.c')
-rw-r--r--drivers/net/ipa/ipa_clock.c188
1 files changed, 123 insertions, 65 deletions
diff --git a/drivers/net/ipa/ipa_clock.c b/drivers/net/ipa/ipa_clock.c
index 69ef6ea41e61..6df66c574d59 100644
--- a/drivers/net/ipa/ipa_clock.c
+++ b/drivers/net/ipa/ipa_clock.c
@@ -4,14 +4,16 @@
* Copyright (C) 2018-2021 Linaro Ltd.
*/
-#include <linux/refcount.h>
-#include <linux/mutex.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/interconnect.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/bitops.h>
#include "ipa.h"
#include "ipa_clock.h"
+#include "ipa_endpoint.h"
#include "ipa_modem.h"
#include "ipa_data.h"
@@ -43,17 +45,27 @@ struct ipa_interconnect {
};
/**
+ * enum ipa_power_flag - IPA power flags
+ * @IPA_POWER_FLAG_RESUMED: Whether resume from suspend has been signaled
+ * @IPA_POWER_FLAG_COUNT: Number of defined power flags
+ */
+enum ipa_power_flag {
+ IPA_POWER_FLAG_RESUMED,
+ IPA_POWER_FLAG_COUNT, /* Last; not a flag */
+};
+
+/**
* struct ipa_clock - IPA clocking information
- * @count: Clocking reference count
- * @mutex: Protects clock enable/disable
+ * @dev: IPA device pointer
* @core: IPA core clock
+ * @flags: Boolean state flags
* @interconnect_count: Number of elements in interconnect[]
* @interconnect: Interconnect array
*/
struct ipa_clock {
- refcount_t count;
- struct mutex mutex; /* protects clock enable/disable */
+ struct device *dev;
struct clk *core;
+ DECLARE_BITMAP(flags, IPA_POWER_FLAG_COUNT);
u32 interconnect_count;
struct ipa_interconnect *interconnect;
};
@@ -144,8 +156,12 @@ static int ipa_interconnect_enable(struct ipa *ipa)
ret = icc_set_bw(interconnect->path,
interconnect->average_bandwidth,
interconnect->peak_bandwidth);
- if (ret)
+ if (ret) {
+ dev_err(&ipa->pdev->dev,
+ "error %d enabling %s interconnect\n",
+ ret, icc_get_name(interconnect->path));
goto out_unwind;
+ }
interconnect++;
}
@@ -159,10 +175,11 @@ out_unwind:
}
/* To disable an interconnect, we just its bandwidth to 0 */
-static void ipa_interconnect_disable(struct ipa *ipa)
+static int ipa_interconnect_disable(struct ipa *ipa)
{
struct ipa_interconnect *interconnect;
struct ipa_clock *clock = ipa->clock;
+ struct device *dev = &ipa->pdev->dev;
int result = 0;
u32 count;
int ret;
@@ -172,13 +189,16 @@ static void ipa_interconnect_disable(struct ipa *ipa)
while (count--) {
interconnect--;
ret = icc_set_bw(interconnect->path, 0, 0);
- if (ret && !result)
- result = ret;
+ if (ret) {
+ dev_err(dev, "error %d disabling %s interconnect\n",
+ ret, icc_get_name(interconnect->path));
+ /* Try to disable all; record only the first error */
+ if (!result)
+ result = ret;
+ }
}
- if (result)
- dev_err(&ipa->pdev->dev,
- "error %d disabling IPA interconnects\n", ret);
+ return result;
}
/* Turn on IPA clocks, including interconnects */
@@ -191,78 +211,74 @@ static int ipa_clock_enable(struct ipa *ipa)
return ret;
ret = clk_prepare_enable(ipa->clock->core);
- if (ret)
- ipa_interconnect_disable(ipa);
+ if (ret) {
+ dev_err(&ipa->pdev->dev, "error %d enabling core clock\n", ret);
+ (void)ipa_interconnect_disable(ipa);
+ }
return ret;
}
/* Inverse of ipa_clock_enable() */
-static void ipa_clock_disable(struct ipa *ipa)
+static int ipa_clock_disable(struct ipa *ipa)
{
clk_disable_unprepare(ipa->clock->core);
- ipa_interconnect_disable(ipa);
-}
-/* Get an IPA clock reference, but only if the reference count is
- * already non-zero. Returns true if the additional reference was
- * added successfully, or false otherwise.
- */
-bool ipa_clock_get_additional(struct ipa *ipa)
-{
- return refcount_inc_not_zero(&ipa->clock->count);
+ return ipa_interconnect_disable(ipa);
}
-/* Get an IPA clock reference. If the reference count is non-zero, it is
- * incremented and return is immediate. Otherwise it is checked again
- * under protection of the mutex, and if appropriate the IPA clock
- * is enabled.
- *
- * Incrementing the reference count is intentionally deferred until
- * after the clock is running and endpoints are resumed.
- */
-void ipa_clock_get(struct ipa *ipa)
+static int ipa_runtime_suspend(struct device *dev)
{
- struct ipa_clock *clock = ipa->clock;
- int ret;
+ struct ipa *ipa = dev_get_drvdata(dev);
- /* If the clock is running, just bump the reference count */
- if (ipa_clock_get_additional(ipa))
- return;
+ /* Endpoints aren't usable until setup is complete */
+ if (ipa->setup_complete) {
+ __clear_bit(IPA_POWER_FLAG_RESUMED, ipa->clock->flags);
+ ipa_endpoint_suspend(ipa);
+ gsi_suspend(&ipa->gsi);
+ }
- /* Otherwise get the mutex and check again */
- mutex_lock(&clock->mutex);
+ return ipa_clock_disable(ipa);
+}
- /* A reference might have been added before we got the mutex. */
- if (ipa_clock_get_additional(ipa))
- goto out_mutex_unlock;
+static int ipa_runtime_resume(struct device *dev)
+{
+ struct ipa *ipa = dev_get_drvdata(dev);
+ int ret;
ret = ipa_clock_enable(ipa);
- if (ret) {
- dev_err(&ipa->pdev->dev, "error %d enabling IPA clock\n", ret);
- goto out_mutex_unlock;
+ if (WARN_ON(ret < 0))
+ return ret;
+
+ /* Endpoints aren't usable until setup is complete */
+ if (ipa->setup_complete) {
+ gsi_resume(&ipa->gsi);
+ ipa_endpoint_resume(ipa);
}
- refcount_set(&clock->count, 1);
+ return 0;
+}
-out_mutex_unlock:
- mutex_unlock(&clock->mutex);
+static int ipa_runtime_idle(struct device *dev)
+{
+ return -EAGAIN;
}
-/* Attempt to remove an IPA clock reference. If this represents the
- * last reference, disable the IPA clock under protection of the mutex.
+/* Get an IPA clock reference. If the reference count is non-zero, it is
+ * incremented and return is immediate. Otherwise the IPA clock is
+ * enabled.
*/
-void ipa_clock_put(struct ipa *ipa)
+int ipa_clock_get(struct ipa *ipa)
{
- struct ipa_clock *clock = ipa->clock;
-
- /* If this is not the last reference there's nothing more to do */
- if (!refcount_dec_and_mutex_lock(&clock->count, &clock->mutex))
- return;
-
- ipa_clock_disable(ipa);
+ return pm_runtime_get_sync(&ipa->pdev->dev);
+}
- mutex_unlock(&clock->mutex);
+/* Attempt to remove an IPA clock reference. If this represents the
+ * last reference, disable the IPA clock.
+ */
+int ipa_clock_put(struct ipa *ipa)
+{
+ return pm_runtime_put(&ipa->pdev->dev);
}
/* Return the current IPA core clock rate */
@@ -271,6 +287,40 @@ u32 ipa_clock_rate(struct ipa *ipa)
return ipa->clock ? (u32)clk_get_rate(ipa->clock->core) : 0;
}
+/**
+ * ipa_suspend_handler() - Handle the suspend IPA interrupt
+ * @ipa: IPA pointer
+ * @irq_id: IPA interrupt type (unused)
+ *
+ * If an RX endpoint is suspended, and the IPA has a packet destined for
+ * that endpoint, the IPA generates a SUSPEND interrupt to inform the AP
+ * that it should resume the endpoint. If we get one of these interrupts
+ * we just wake up the system.
+ */
+static void ipa_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
+{
+ /* Just report the event, and let system resume handle the rest.
+ * More than one endpoint could signal this; if so, ignore
+ * all but the first.
+ */
+ if (!test_and_set_bit(IPA_POWER_FLAG_RESUMED, ipa->clock->flags))
+ pm_wakeup_dev_event(&ipa->pdev->dev, 0, true);
+
+ /* Acknowledge/clear the suspend interrupt on all endpoints */
+ ipa_interrupt_suspend_clear_all(ipa->interrupt);
+}
+
+void ipa_power_setup(struct ipa *ipa)
+{
+ ipa_interrupt_add(ipa->interrupt, IPA_IRQ_TX_SUSPEND,
+ ipa_suspend_handler);
+}
+
+void ipa_power_teardown(struct ipa *ipa)
+{
+ ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND);
+}
+
/* Initialize IPA clocking */
struct ipa_clock *
ipa_clock_init(struct device *dev, const struct ipa_clock_data *data)
@@ -298,6 +348,7 @@ ipa_clock_init(struct device *dev, const struct ipa_clock_data *data)
ret = -ENOMEM;
goto err_clk_put;
}
+ clock->dev = dev;
clock->core = clk;
clock->interconnect_count = data->interconnect_count;
@@ -305,8 +356,8 @@ ipa_clock_init(struct device *dev, const struct ipa_clock_data *data)
if (ret)
goto err_kfree;
- mutex_init(&clock->mutex);
- refcount_set(&clock->count, 0);
+ pm_runtime_dont_use_autosuspend(dev);
+ pm_runtime_enable(dev);
return clock;
@@ -323,9 +374,16 @@ void ipa_clock_exit(struct ipa_clock *clock)
{
struct clk *clk = clock->core;
- WARN_ON(refcount_read(&clock->count) != 0);
- mutex_destroy(&clock->mutex);
+ pm_runtime_disable(clock->dev);
ipa_interconnect_exit(clock);
kfree(clock);
clk_put(clk);
}
+
+const struct dev_pm_ops ipa_pm_ops = {
+ .suspend = pm_runtime_force_suspend,
+ .resume = pm_runtime_force_resume,
+ .runtime_suspend = ipa_runtime_suspend,
+ .runtime_resume = ipa_runtime_resume,
+ .runtime_idle = ipa_runtime_idle,
+};