aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base/power
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base/power')
-rw-r--r--drivers/base/power/Makefile1
-rw-r--r--drivers/base/power/domain.c226
-rw-r--r--drivers/base/power/domain_governor.c73
-rw-r--r--drivers/base/power/generic_ops.c23
-rw-r--r--drivers/base/power/main.c61
-rw-r--r--drivers/base/power/opp/Makefile4
-rw-r--r--drivers/base/power/opp/core.c1747
-rw-r--r--drivers/base/power/opp/cpu.c236
-rw-r--r--drivers/base/power/opp/debugfs.c249
-rw-r--r--drivers/base/power/opp/of.c633
-rw-r--r--drivers/base/power/opp/opp.h222
-rw-r--r--drivers/base/power/qos.c5
-rw-r--r--drivers/base/power/runtime.c13
-rw-r--r--drivers/base/power/sysfs.c53
-rw-r--r--drivers/base/power/wakeup.c11
15 files changed, 267 insertions, 3290 deletions
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 29cd71d8b360..e1bb691cf8f1 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -2,7 +2,6 @@
obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o runtime.o wakeirq.o
obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
-obj-$(CONFIG_PM_OPP) += opp/
obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o
obj-$(CONFIG_HAVE_CLK) += clock_ops.o
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index e8ca5e2cf1e5..0c80bea05bcb 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -124,6 +124,7 @@ static const struct genpd_lock_ops genpd_spin_ops = {
#define genpd_status_on(genpd) (genpd->status == GPD_STATE_ACTIVE)
#define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
#define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
+#define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
const struct generic_pm_domain *genpd)
@@ -237,6 +238,95 @@ static void genpd_update_accounting(struct generic_pm_domain *genpd)
static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
#endif
+/**
+ * dev_pm_genpd_set_performance_state- Set performance state of device's power
+ * domain.
+ *
+ * @dev: Device for which the performance-state needs to be set.
+ * @state: Target performance state of the device. This can be set as 0 when the
+ * device doesn't have any performance state constraints left (And so
+ * the device wouldn't participate anymore to find the target
+ * performance state of the genpd).
+ *
+ * It is assumed that the users guarantee that the genpd wouldn't be detached
+ * while this routine is getting called.
+ *
+ * Returns 0 on success and negative error values on failures.
+ */
+int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
+{
+ struct generic_pm_domain *genpd;
+ struct generic_pm_domain_data *gpd_data, *pd_data;
+ struct pm_domain_data *pdd;
+ unsigned int prev;
+ int ret = 0;
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -ENODEV;
+
+ if (unlikely(!genpd->set_performance_state))
+ return -EINVAL;
+
+ if (unlikely(!dev->power.subsys_data ||
+ !dev->power.subsys_data->domain_data)) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ genpd_lock(genpd);
+
+ gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
+ prev = gpd_data->performance_state;
+ gpd_data->performance_state = state;
+
+ /* New requested state is same as Max requested state */
+ if (state == genpd->performance_state)
+ goto unlock;
+
+ /* New requested state is higher than Max requested state */
+ if (state > genpd->performance_state)
+ goto update_state;
+
+ /* Traverse all devices within the domain */
+ list_for_each_entry(pdd, &genpd->dev_list, list_node) {
+ pd_data = to_gpd_data(pdd);
+
+ if (pd_data->performance_state > state)
+ state = pd_data->performance_state;
+ }
+
+ if (state == genpd->performance_state)
+ goto unlock;
+
+ /*
+ * We aren't propagating performance state changes of a subdomain to its
+ * masters as we don't have hardware that needs it. Over that, the
+ * performance states of subdomain and its masters may not have
+ * one-to-one mapping and would require additional information. We can
+ * get back to this once we have hardware that needs it. For that
+ * reason, we don't have to consider performance state of the subdomains
+ * of genpd here.
+ */
+
+update_state:
+ if (genpd_status_on(genpd)) {
+ ret = genpd->set_performance_state(genpd, state);
+ if (ret) {
+ gpd_data->performance_state = prev;
+ goto unlock;
+ }
+ }
+
+ genpd->performance_state = state;
+
+unlock:
+ genpd_unlock(genpd);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
+
static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
{
unsigned int state_idx = genpd->state_idx;
@@ -256,6 +346,15 @@ static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
return ret;
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
+
+ if (unlikely(genpd->set_performance_state)) {
+ ret = genpd->set_performance_state(genpd, genpd->performance_state);
+ if (ret) {
+ pr_warn("%s: Failed to set performance state %d (%d)\n",
+ genpd->name, genpd->performance_state, ret);
+ }
+ }
+
if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
return ret;
@@ -346,9 +445,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
enum pm_qos_flags_status stat;
- stat = dev_pm_qos_flags(pdd->dev,
- PM_QOS_FLAG_NO_POWER_OFF
- | PM_QOS_FLAG_REMOTE_WAKEUP);
+ stat = dev_pm_qos_flags(pdd->dev, PM_QOS_FLAG_NO_POWER_OFF);
if (stat > PM_QOS_FLAGS_NONE)
return -EBUSY;
@@ -749,11 +846,7 @@ late_initcall(genpd_power_off_unused);
#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_GENERIC_DOMAINS_OF)
-/**
- * pm_genpd_present - Check if the given PM domain has been initialized.
- * @genpd: PM domain to check.
- */
-static bool pm_genpd_present(const struct generic_pm_domain *genpd)
+static bool genpd_present(const struct generic_pm_domain *genpd)
{
const struct generic_pm_domain *gpd;
@@ -771,12 +864,6 @@ static bool pm_genpd_present(const struct generic_pm_domain *genpd)
#ifdef CONFIG_PM_SLEEP
-static bool genpd_dev_active_wakeup(const struct generic_pm_domain *genpd,
- struct device *dev)
-{
- return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
-}
-
/**
* genpd_sync_power_off - Synchronously power off a PM domain and its masters.
* @genpd: PM domain to power off, if possible.
@@ -863,7 +950,7 @@ static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
* @genpd: PM domain the device belongs to.
*
* There are two cases in which a device that can wake up the system from sleep
- * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
+ * states should be resumed by genpd_prepare(): (1) if the device is enabled
* to wake up the system and it has to remain active for this purpose while the
* system is in the sleep state and (2) if the device is not enabled to wake up
* the system from sleep states and it generally doesn't generate wakeup signals
@@ -881,12 +968,12 @@ static bool resume_needed(struct device *dev,
if (!device_can_wakeup(dev))
return false;
- active_wakeup = genpd_dev_active_wakeup(genpd, dev);
+ active_wakeup = genpd_is_active_wakeup(genpd);
return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
}
/**
- * pm_genpd_prepare - Start power transition of a device in a PM domain.
+ * genpd_prepare - Start power transition of a device in a PM domain.
* @dev: Device to start the transition of.
*
* Start a power transition of a device (during a system-wide power transition)
@@ -894,7 +981,7 @@ static bool resume_needed(struct device *dev,
* an object of type struct generic_pm_domain representing a PM domain
* consisting of I/O devices.
*/
-static int pm_genpd_prepare(struct device *dev)
+static int genpd_prepare(struct device *dev)
{
struct generic_pm_domain *genpd;
int ret;
@@ -921,7 +1008,7 @@ static int pm_genpd_prepare(struct device *dev)
genpd_unlock(genpd);
ret = pm_generic_prepare(dev);
- if (ret) {
+ if (ret < 0) {
genpd_lock(genpd);
genpd->prepared_count--;
@@ -929,7 +1016,8 @@ static int pm_genpd_prepare(struct device *dev)
genpd_unlock(genpd);
}
- return ret;
+ /* Never return 1, as genpd don't cope with the direct_complete path. */
+ return ret >= 0 ? 0 : ret;
}
/**
@@ -950,7 +1038,7 @@ static int genpd_finish_suspend(struct device *dev, bool poweroff)
if (IS_ERR(genpd))
return -EINVAL;
- if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
+ if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
return 0;
if (poweroff)
@@ -975,13 +1063,13 @@ static int genpd_finish_suspend(struct device *dev, bool poweroff)
}
/**
- * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
+ * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
* @dev: Device to suspend.
*
* Stop the device and remove power from the domain if all devices in it have
* been stopped.
*/
-static int pm_genpd_suspend_noirq(struct device *dev)
+static int genpd_suspend_noirq(struct device *dev)
{
dev_dbg(dev, "%s()\n", __func__);
@@ -989,12 +1077,12 @@ static int pm_genpd_suspend_noirq(struct device *dev)
}
/**
- * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
+ * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
* @dev: Device to resume.
*
* Restore power to the device's PM domain, if necessary, and start the device.
*/
-static int pm_genpd_resume_noirq(struct device *dev)
+static int genpd_resume_noirq(struct device *dev)
{
struct generic_pm_domain *genpd;
int ret = 0;
@@ -1005,7 +1093,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
+ if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
return 0;
genpd_lock(genpd);
@@ -1024,7 +1112,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
}
/**
- * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
+ * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
* @dev: Device to freeze.
*
* Carry out a late freeze of a device under the assumption that its
@@ -1032,7 +1120,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
* struct generic_pm_domain representing a power domain consisting of I/O
* devices.
*/
-static int pm_genpd_freeze_noirq(struct device *dev)
+static int genpd_freeze_noirq(struct device *dev)
{
const struct generic_pm_domain *genpd;
int ret = 0;
@@ -1054,13 +1142,13 @@ static int pm_genpd_freeze_noirq(struct device *dev)
}
/**
- * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
+ * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
* @dev: Device to thaw.
*
* Start the device, unless power has been removed from the domain already
* before the system transition.
*/
-static int pm_genpd_thaw_noirq(struct device *dev)
+static int genpd_thaw_noirq(struct device *dev)
{
const struct generic_pm_domain *genpd;
int ret = 0;
@@ -1081,14 +1169,14 @@ static int pm_genpd_thaw_noirq(struct device *dev)
}
/**
- * pm_genpd_poweroff_noirq - Completion of hibernation of device in an
+ * genpd_poweroff_noirq - Completion of hibernation of device in an
* I/O PM domain.
* @dev: Device to poweroff.
*
* Stop the device and remove power from the domain if all devices in it have
* been stopped.
*/
-static int pm_genpd_poweroff_noirq(struct device *dev)
+static int genpd_poweroff_noirq(struct device *dev)
{
dev_dbg(dev, "%s()\n", __func__);
@@ -1096,13 +1184,13 @@ static int pm_genpd_poweroff_noirq(struct device *dev)
}
/**
- * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
+ * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
* @dev: Device to resume.
*
* Make sure the domain will be in the same power state as before the
* hibernation the system is resuming from and start the device if necessary.
*/
-static int pm_genpd_restore_noirq(struct device *dev)
+static int genpd_restore_noirq(struct device *dev)
{
struct generic_pm_domain *genpd;
int ret = 0;
@@ -1139,7 +1227,7 @@ static int pm_genpd_restore_noirq(struct device *dev)
}
/**
- * pm_genpd_complete - Complete power transition of a device in a power domain.
+ * genpd_complete - Complete power transition of a device in a power domain.
* @dev: Device to complete the transition of.
*
* Complete a power transition of a device (during a system-wide power
@@ -1147,7 +1235,7 @@ static int pm_genpd_restore_noirq(struct device *dev)
* domain member of an object of type struct generic_pm_domain representing
* a power domain consisting of I/O devices.
*/
-static void pm_genpd_complete(struct device *dev)
+static void genpd_complete(struct device *dev)
{
struct generic_pm_domain *genpd;
@@ -1180,7 +1268,7 @@ static void genpd_syscore_switch(struct device *dev, bool suspend)
struct generic_pm_domain *genpd;
genpd = dev_to_genpd(dev);
- if (!pm_genpd_present(genpd))
+ if (!genpd_present(genpd))
return;
if (suspend) {
@@ -1206,14 +1294,14 @@ EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
#else /* !CONFIG_PM_SLEEP */
-#define pm_genpd_prepare NULL
-#define pm_genpd_suspend_noirq NULL
-#define pm_genpd_resume_noirq NULL
-#define pm_genpd_freeze_noirq NULL
-#define pm_genpd_thaw_noirq NULL
-#define pm_genpd_poweroff_noirq NULL
-#define pm_genpd_restore_noirq NULL
-#define pm_genpd_complete NULL
+#define genpd_prepare NULL
+#define genpd_suspend_noirq NULL
+#define genpd_resume_noirq NULL
+#define genpd_freeze_noirq NULL
+#define genpd_thaw_noirq NULL
+#define genpd_poweroff_noirq NULL
+#define genpd_restore_noirq NULL
+#define genpd_complete NULL
#endif /* CONFIG_PM_SLEEP */
@@ -1239,7 +1327,7 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
gpd_data->base.dev = dev;
gpd_data->td.constraint_changed = true;
- gpd_data->td.effective_constraint_ns = -1;
+ gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
spin_lock_irq(&dev->power.lock);
@@ -1574,14 +1662,14 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
genpd->accounting_time = ktime_get();
genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
genpd->domain.ops.runtime_resume = genpd_runtime_resume;
- genpd->domain.ops.prepare = pm_genpd_prepare;
- genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
- genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
- genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
- genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
- genpd->domain.ops.poweroff_noirq = pm_genpd_poweroff_noirq;
- genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
- genpd->domain.ops.complete = pm_genpd_complete;
+ genpd->domain.ops.prepare = genpd_prepare;
+ genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
+ genpd->domain.ops.resume_noirq = genpd_resume_noirq;
+ genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
+ genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
+ genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
+ genpd->domain.ops.restore_noirq = genpd_restore_noirq;
+ genpd->domain.ops.complete = genpd_complete;
if (genpd->flags & GENPD_FLAG_PM_CLK) {
genpd->dev_ops.stop = pm_clk_suspend;
@@ -1795,7 +1883,7 @@ int of_genpd_add_provider_simple(struct device_node *np,
mutex_lock(&gpd_list_lock);
- if (pm_genpd_present(genpd)) {
+ if (genpd_present(genpd)) {
ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
if (!ret) {
genpd->provider = &np->fwnode;
@@ -1831,7 +1919,7 @@ int of_genpd_add_provider_onecell(struct device_node *np,
for (i = 0; i < data->num_domains; i++) {
if (!data->domains[i])
continue;
- if (!pm_genpd_present(data->domains[i]))
+ if (!genpd_present(data->domains[i]))
goto error;
data->domains[i]->provider = &np->fwnode;
@@ -2274,7 +2362,7 @@ EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/kobject.h>
-static struct dentry *pm_genpd_debugfs_dir;
+static struct dentry *genpd_debugfs_dir;
/*
* TODO: This function is a slightly modified version of rtpm_status_show
@@ -2302,8 +2390,8 @@ static void rtpm_status_str(struct seq_file *s, struct device *dev)
seq_puts(s, p);
}
-static int pm_genpd_summary_one(struct seq_file *s,
- struct generic_pm_domain *genpd)
+static int genpd_summary_one(struct seq_file *s,
+ struct generic_pm_domain *genpd)
{
static const char * const status_lookup[] = {
[GPD_STATE_ACTIVE] = "on",
@@ -2373,7 +2461,7 @@ static int genpd_summary_show(struct seq_file *s, void *data)
return -ERESTARTSYS;
list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
- ret = pm_genpd_summary_one(s, genpd);
+ ret = genpd_summary_one(s, genpd);
if (ret)
break;
}
@@ -2559,23 +2647,23 @@ define_genpd_debugfs_fops(active_time);
define_genpd_debugfs_fops(total_idle_time);
define_genpd_debugfs_fops(devices);
-static int __init pm_genpd_debug_init(void)
+static int __init genpd_debug_init(void)
{
struct dentry *d;
struct generic_pm_domain *genpd;
- pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
+ genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
- if (!pm_genpd_debugfs_dir)
+ if (!genpd_debugfs_dir)
return -ENOMEM;
d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
- pm_genpd_debugfs_dir, NULL, &genpd_summary_fops);
+ genpd_debugfs_dir, NULL, &genpd_summary_fops);
if (!d)
return -ENOMEM;
list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
- d = debugfs_create_dir(genpd->name, pm_genpd_debugfs_dir);
+ d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
if (!d)
return -ENOMEM;
@@ -2595,11 +2683,11 @@ static int __init pm_genpd_debug_init(void)
return 0;
}
-late_initcall(pm_genpd_debug_init);
+late_initcall(genpd_debug_init);
-static void __exit pm_genpd_debug_exit(void)
+static void __exit genpd_debug_exit(void)
{
- debugfs_remove_recursive(pm_genpd_debugfs_dir);
+ debugfs_remove_recursive(genpd_debugfs_dir);
}
-__exitcall(pm_genpd_debug_exit);
+__exitcall(genpd_debug_exit);
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index 281f949c5ffe..99896fbf18e4 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -14,23 +14,29 @@
static int dev_update_qos_constraint(struct device *dev, void *data)
{
s64 *constraint_ns_p = data;
- s32 constraint_ns = -1;
+ s64 constraint_ns;
- if (dev->power.subsys_data && dev->power.subsys_data->domain_data)
+ if (dev->power.subsys_data && dev->power.subsys_data->domain_data) {
+ /*
+ * Only take suspend-time QoS constraints of devices into
+ * account, because constraints updated after the device has
+ * been suspended are not guaranteed to be taken into account
+ * anyway. In order for them to take effect, the device has to
+ * be resumed and suspended again.
+ */
constraint_ns = dev_gpd_data(dev)->td.effective_constraint_ns;
-
- if (constraint_ns < 0) {
+ } else {
+ /*
+ * The child is not in a domain and there's no info on its
+ * suspend/resume latencies, so assume them to be negligible and
+ * take its current PM QoS constraint (that's the only thing
+ * known at this point anyway).
+ */
constraint_ns = dev_pm_qos_read_value(dev);
constraint_ns *= NSEC_PER_USEC;
}
- if (constraint_ns == 0)
- return 0;
- /*
- * constraint_ns cannot be negative here, because the device has been
- * suspended.
- */
- if (constraint_ns < *constraint_ns_p || *constraint_ns_p == 0)
+ if (constraint_ns < *constraint_ns_p)
*constraint_ns_p = constraint_ns;
return 0;
@@ -58,12 +64,12 @@ static bool default_suspend_ok(struct device *dev)
}
td->constraint_changed = false;
td->cached_suspend_ok = false;
- td->effective_constraint_ns = -1;
+ td->effective_constraint_ns = 0;
constraint_ns = __dev_pm_qos_read_value(dev);
spin_unlock_irqrestore(&dev->power.lock, flags);
- if (constraint_ns < 0)
+ if (constraint_ns == 0)
return false;
constraint_ns *= NSEC_PER_USEC;
@@ -76,14 +82,32 @@ static bool default_suspend_ok(struct device *dev)
device_for_each_child(dev, &constraint_ns,
dev_update_qos_constraint);
- if (constraint_ns > 0) {
+ if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS) {
+ /* "No restriction", so the device is allowed to suspend. */
+ td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
+ td->cached_suspend_ok = true;
+ } else if (constraint_ns == 0) {
+ /*
+ * This triggers if one of the children that don't belong to a
+ * domain has a zero PM QoS constraint and it's better not to
+ * suspend then. effective_constraint_ns is zero already and
+ * cached_suspend_ok is false, so bail out.
+ */
+ return false;
+ } else {
constraint_ns -= td->suspend_latency_ns +
td->resume_latency_ns;
- if (constraint_ns == 0)
+ /*
+ * effective_constraint_ns is zero already and cached_suspend_ok
+ * is false, so if the computed value is not positive, return
+ * right away.
+ */
+ if (constraint_ns <= 0)
return false;
+
+ td->effective_constraint_ns = constraint_ns;
+ td->cached_suspend_ok = true;
}
- td->effective_constraint_ns = constraint_ns;
- td->cached_suspend_ok = constraint_ns >= 0;
/*
* The children have been suspended already, so we don't need to take
@@ -144,18 +168,13 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd,
*/
td = &to_gpd_data(pdd)->td;
constraint_ns = td->effective_constraint_ns;
- /* default_suspend_ok() need not be called before us. */
- if (constraint_ns < 0) {
- constraint_ns = dev_pm_qos_read_value(pdd->dev);
- constraint_ns *= NSEC_PER_USEC;
- }
- if (constraint_ns == 0)
- continue;
-
/*
- * constraint_ns cannot be negative here, because the device has
- * been suspended.
+ * Zero means "no suspend at all" and this runs only when all
+ * devices in the domain are suspended, so it must be positive.
*/
+ if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS)
+ continue;
+
if (constraint_ns <= off_on_time_ns)
return false;
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index 07c3c4a9522d..b2ed606265a8 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -9,7 +9,6 @@
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/export.h>
-#include <linux/suspend.h>
#ifdef CONFIG_PM
/**
@@ -298,26 +297,4 @@ void pm_generic_complete(struct device *dev)
if (drv && drv->pm && drv->pm->complete)
drv->pm->complete(dev);
}
-
-/**
- * pm_complete_with_resume_check - Complete a device power transition.
- * @dev: Device to handle.
- *
- * Complete a device power transition during a system-wide power transition and
- * optionally schedule a runtime resume of the device if the system resume in
- * progress has been initated by the platform firmware and the device had its
- * power.direct_complete flag set.
- */
-void pm_complete_with_resume_check(struct device *dev)
-{
- pm_generic_complete(dev);
- /*
- * If the device had been runtime-suspended before the system went into
- * the sleep state it is going out of and it has never been resumed till
- * now, resume it in case the firmware powered it up.
- */
- if (dev->power.direct_complete && pm_resume_via_firmware())
- pm_request_resume(dev);
-}
-EXPORT_SYMBOL_GPL(pm_complete_with_resume_check);
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 770b1539a083..db2f04415927 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -478,9 +478,9 @@ struct dpm_watchdog {
* There's not much we can do here to recover so panic() to
* capture a crash-dump in pstore.
*/
-static void dpm_watchdog_handler(unsigned long data)
+static void dpm_watchdog_handler(struct timer_list *t)
{
- struct dpm_watchdog *wd = (void *)data;
+ struct dpm_watchdog *wd = from_timer(wd, t, timer);
dev_emerg(wd->dev, "**** DPM device timeout ****\n");
show_stack(wd->tsk, NULL);
@@ -500,11 +500,9 @@ static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
wd->dev = dev;
wd->tsk = current;
- init_timer_on_stack(timer);
+ timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
/* use same timeout value for both suspend and resume */
timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
- timer->function = dpm_watchdog_handler;
- timer->data = (unsigned long)wd;
add_timer(timer);
}
@@ -528,7 +526,7 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
/*------------------------- Resume routines -------------------------*/
/**
- * device_resume_noirq - Execute an "early resume" callback for given device.
+ * device_resume_noirq - Execute a "noirq resume" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being resumed asynchronously.
@@ -848,16 +846,10 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
goto Driver;
}
- if (dev->class) {
- if (dev->class->pm) {
- info = "class ";
- callback = pm_op(dev->class->pm, state);
- goto Driver;
- } else if (dev->class->resume) {
- info = "legacy class ";
- callback = dev->class->resume;
- goto End;
- }
+ if (dev->class && dev->class->pm) {
+ info = "class ";
+ callback = pm_op(dev->class->pm, state);
+ goto Driver;
}
if (dev->bus) {
@@ -1083,7 +1075,7 @@ static pm_message_t resume_event(pm_message_t sleep_state)
}
/**
- * device_suspend_noirq - Execute a "late suspend" callback for given device.
+ * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
@@ -1243,7 +1235,7 @@ int dpm_suspend_noirq(pm_message_t state)
}
/**
- * device_suspend_late - Execute a "late suspend" callback for given device.
+ * __device_suspend_late - Execute a "late suspend" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
@@ -1445,7 +1437,7 @@ static void dpm_clear_suppliers_direct_complete(struct device *dev)
}
/**
- * device_suspend - Execute "suspend" callbacks for given device.
+ * __device_suspend - Execute "suspend" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
@@ -1508,17 +1500,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
goto Run;
}
- if (dev->class) {
- if (dev->class->pm) {
- info = "class ";
- callback = pm_op(dev->class->pm, state);
- goto Run;
- } else if (dev->class->suspend) {
- pm_dev_dbg(dev, state, "legacy class ");
- error = legacy_suspend(dev, state, dev->class->suspend,
- "legacy class ");
- goto End;
- }
+ if (dev->class && dev->class->pm) {
+ info = "class ";
+ callback = pm_op(dev->class->pm, state);
+ goto Run;
}
if (dev->bus) {
@@ -1665,6 +1650,9 @@ static int device_prepare(struct device *dev, pm_message_t state)
if (dev->power.syscore)
return 0;
+ WARN_ON(dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
+ !pm_runtime_enabled(dev));
+
/*
* If a device's parent goes into runtime suspend at the wrong time,
* it won't be possible to resume the device. To prevent this we
@@ -1713,7 +1701,9 @@ unlock:
* applies to suspend transitions, however.
*/
spin_lock_irq(&dev->power.lock);
- dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND;
+ dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
+ pm_runtime_suspended(dev) && ret > 0 &&
+ !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
spin_unlock_irq(&dev->power.lock);
return 0;
}
@@ -1862,11 +1852,16 @@ void device_pm_check_callbacks(struct device *dev)
dev->power.no_pm_callbacks =
(!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
!dev->bus->suspend && !dev->bus->resume)) &&
- (!dev->class || (pm_ops_is_empty(dev->class->pm) &&
- !dev->class->suspend && !dev->class->resume)) &&
+ (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
(!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
!dev->driver->suspend && !dev->driver->resume));
spin_unlock_irq(&dev->power.lock);
}
+
+bool dev_pm_smart_suspend_and_suspended(struct device *dev)
+{
+ return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
+ pm_runtime_status_suspended(dev);
+}
diff --git a/drivers/base/power/opp/Makefile b/drivers/base/power/opp/Makefile
deleted file mode 100644
index e70ceb406fe9..000000000000
--- a/drivers/base/power/opp/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
-obj-y += core.o cpu.o
-obj-$(CONFIG_OF) += of.o
-obj-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
deleted file mode 100644
index a6de32530693..000000000000
--- a/drivers/base/power/opp/core.c
+++ /dev/null
@@ -1,1747 +0,0 @@
-/*
- * Generic OPP Interface
- *
- * Copyright (C) 2009-2010 Texas Instruments Incorporated.
- * Nishanth Menon
- * Romit Dasgupta
- * Kevin Hilman
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/clk.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/device.h>
-#include <linux/export.h>
-#include <linux/regulator/consumer.h>
-
-#include "opp.h"
-
-/*
- * The root of the list of all opp-tables. All opp_table structures branch off
- * from here, with each opp_table containing the list of opps it supports in
- * various states of availability.
- */
-LIST_HEAD(opp_tables);
-/* Lock to allow exclusive modification to the device and opp lists */
-DEFINE_MUTEX(opp_table_lock);
-
-static void dev_pm_opp_get(struct dev_pm_opp *opp);
-
-static struct opp_device *_find_opp_dev(const struct device *dev,
- struct opp_table *opp_table)
-{
- struct opp_device *opp_dev;
-
- list_for_each_entry(opp_dev, &opp_table->dev_list, node)
- if (opp_dev->dev == dev)
- return opp_dev;
-
- return NULL;
-}
-
-static struct opp_table *_find_opp_table_unlocked(struct device *dev)
-{
- struct opp_table *opp_table;
-
- list_for_each_entry(opp_table, &opp_tables, node) {
- if (_find_opp_dev(dev, opp_table)) {
- _get_opp_table_kref(opp_table);
-
- return opp_table;
- }
- }
-
- return ERR_PTR(-ENODEV);
-}
-
-/**
- * _find_opp_table() - find opp_table struct using device pointer
- * @dev: device pointer used to lookup OPP table
- *
- * Search OPP table for one containing matching device.
- *
- * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or
- * -EINVAL based on type of error.
- *
- * The callers must call dev_pm_opp_put_opp_table() after the table is used.
- */
-struct opp_table *_find_opp_table(struct device *dev)
-{
- struct opp_table *opp_table;
-
- if (IS_ERR_OR_NULL(dev)) {
- pr_err("%s: Invalid parameters\n", __func__);
- return ERR_PTR(-EINVAL);
- }
-
- mutex_lock(&opp_table_lock);
- opp_table = _find_opp_table_unlocked(dev);
- mutex_unlock(&opp_table_lock);
-
- return opp_table;
-}
-
-/**
- * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
- * @opp: opp for which voltage has to be returned for
- *
- * Return: voltage in micro volt corresponding to the opp, else
- * return 0
- *
- * This is useful only for devices with single power supply.
- */
-unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
-{
- if (IS_ERR_OR_NULL(opp)) {
- pr_err("%s: Invalid parameters\n", __func__);
- return 0;
- }
-
- return opp->supplies[0].u_volt;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
-
-/**
- * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
- * @opp: opp for which frequency has to be returned for
- *
- * Return: frequency in hertz corresponding to the opp, else
- * return 0
- */
-unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
-{
- if (IS_ERR_OR_NULL(opp) || !opp->available) {
- pr_err("%s: Invalid parameters\n", __func__);
- return 0;
- }
-
- return opp->rate;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
-
-/**
- * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
- * @opp: opp for which turbo mode is being verified
- *
- * Turbo OPPs are not for normal use, and can be enabled (under certain
- * conditions) for short duration of times to finish high throughput work
- * quickly. Running on them for longer times may overheat the chip.
- *
- * Return: true if opp is turbo opp, else false.
- */
-bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
-{
- if (IS_ERR_OR_NULL(opp) || !opp->available) {
- pr_err("%s: Invalid parameters\n", __func__);
- return false;
- }
-
- return opp->turbo;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
-
-/**
- * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
- * @dev: device for which we do this operation
- *
- * Return: This function returns the max clock latency in nanoseconds.
- */
-unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
-{
- struct opp_table *opp_table;
- unsigned long clock_latency_ns;
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table))
- return 0;
-
- clock_latency_ns = opp_table->clock_latency_ns_max;
-
- dev_pm_opp_put_opp_table(opp_table);
-
- return clock_latency_ns;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
-
-/**
- * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
- * @dev: device for which we do this operation
- *
- * Return: This function returns the max voltage latency in nanoseconds.
- */
-unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
-{
- struct opp_table *opp_table;
- struct dev_pm_opp *opp;
- struct regulator *reg;
- unsigned long latency_ns = 0;
- int ret, i, count;
- struct {
- unsigned long min;
- unsigned long max;
- } *uV;
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table))
- return 0;
-
- count = opp_table->regulator_count;
-
- /* Regulator may not be required for the device */
- if (!count)
- goto put_opp_table;
-
- uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL);
- if (!uV)
- goto put_opp_table;
-
- mutex_lock(&opp_table->lock);
-
- for (i = 0; i < count; i++) {
- uV[i].min = ~0;
- uV[i].max = 0;
-
- list_for_each_entry(opp, &opp_table->opp_list, node) {
- if (!opp->available)
- continue;
-
- if (opp->supplies[i].u_volt_min < uV[i].min)
- uV[i].min = opp->supplies[i].u_volt_min;
- if (opp->supplies[i].u_volt_max > uV[i].max)
- uV[i].max = opp->supplies[i].u_volt_max;
- }
- }
-
- mutex_unlock(&opp_table->lock);
-
- /*
- * The caller needs to ensure that opp_table (and hence the regulator)
- * isn't freed, while we are executing this routine.
- */
- for (i = 0; i < count; i++) {
- reg = opp_table->regulators[i];
- ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max);
- if (ret > 0)
- latency_ns += ret * 1000;
- }
-
- kfree(uV);
-put_opp_table:
- dev_pm_opp_put_opp_table(opp_table);
-
- return latency_ns;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);
-
-/**
- * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
- * nanoseconds
- * @dev: device for which we do this operation
- *
- * Return: This function returns the max transition latency, in nanoseconds, to
- * switch from one OPP to other.
- */
-unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
-{
- return dev_pm_opp_get_max_volt_latency(dev) +
- dev_pm_opp_get_max_clock_latency(dev);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
-
-/**
- * dev_pm_opp_get_suspend_opp_freq() - Get frequency of suspend opp in Hz
- * @dev: device for which we do this operation
- *
- * Return: This function returns the frequency of the OPP marked as suspend_opp
- * if one is available, else returns 0;
- */
-unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev)
-{
- struct opp_table *opp_table;
- unsigned long freq = 0;
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table))
- return 0;
-
- if (opp_table->suspend_opp && opp_table->suspend_opp->available)
- freq = dev_pm_opp_get_freq(opp_table->suspend_opp);
-
- dev_pm_opp_put_opp_table(opp_table);
-
- return freq;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp_freq);
-
-/**
- * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
- * @dev: device for which we do this operation
- *
- * Return: This function returns the number of available opps if there are any,
- * else returns 0 if none or the corresponding error value.
- */
-int dev_pm_opp_get_opp_count(struct device *dev)
-{
- struct opp_table *opp_table;
- struct dev_pm_opp *temp_opp;
- int count = 0;
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table)) {
- count = PTR_ERR(opp_table);
- dev_err(dev, "%s: OPP table not found (%d)\n",
- __func__, count);
- return count;
- }
-
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->available)
- count++;
- }
-
- mutex_unlock(&opp_table->lock);
- dev_pm_opp_put_opp_table(opp_table);
-
- return count;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
-
-/**
- * dev_pm_opp_find_freq_exact() - search for an exact frequency
- * @dev: device for which we do this operation
- * @freq: frequency to search for
- * @available: true/false - match for available opp
- *
- * Return: Searches for exact match in the opp table and returns pointer to the
- * matching opp if found, else returns ERR_PTR in case of error and should
- * be handled using IS_ERR. Error return values can be:
- * EINVAL: for bad pointer
- * ERANGE: no match found for search
- * ENODEV: if device not found in list of registered devices
- *
- * Note: available is a modifier for the search. if available=true, then the
- * match is for exact matching frequency and is available in the stored OPP
- * table. if false, the match is for exact frequency which is not available.
- *
- * This provides a mechanism to enable an opp which is not available currently
- * or the opposite as well.
- *
- * The callers are required to call dev_pm_opp_put() for the returned OPP after
- * use.
- */
-struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
- unsigned long freq,
- bool available)
-{
- struct opp_table *opp_table;
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table)) {
- int r = PTR_ERR(opp_table);
-
- dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
- return ERR_PTR(r);
- }
-
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->available == available &&
- temp_opp->rate == freq) {
- opp = temp_opp;
-
- /* Increment the reference count of OPP */
- dev_pm_opp_get(opp);
- break;
- }
- }
-
- mutex_unlock(&opp_table->lock);
- dev_pm_opp_put_opp_table(opp_table);
-
- return opp;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
-
-static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
- unsigned long *freq)
-{
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
-
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->available && temp_opp->rate >= *freq) {
- opp = temp_opp;
- *freq = opp->rate;
-
- /* Increment the reference count of OPP */
- dev_pm_opp_get(opp);
- break;
- }
- }
-
- mutex_unlock(&opp_table->lock);
-
- return opp;
-}
-
-/**
- * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
- * @dev: device for which we do this operation
- * @freq: Start frequency
- *
- * Search for the matching ceil *available* OPP from a starting freq
- * for a device.
- *
- * Return: matching *opp and refreshes *freq accordingly, else returns
- * ERR_PTR in case of error and should be handled using IS_ERR. Error return
- * values can be:
- * EINVAL: for bad pointer
- * ERANGE: no match found for search
- * ENODEV: if device not found in list of registered devices
- *
- * The callers are required to call dev_pm_opp_put() for the returned OPP after
- * use.
- */
-struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
- unsigned long *freq)
-{
- struct opp_table *opp_table;
- struct dev_pm_opp *opp;
-
- if (!dev || !freq) {
- dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
- return ERR_PTR(-EINVAL);
- }
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table))
- return ERR_CAST(opp_table);
-
- opp = _find_freq_ceil(opp_table, freq);
-
- dev_pm_opp_put_opp_table(opp_table);
-
- return opp;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
-
-/**
- * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
- * @dev: device for which we do this operation
- * @freq: Start frequency
- *
- * Search for the matching floor *available* OPP from a starting freq
- * for a device.
- *
- * Return: matching *opp and refreshes *freq accordingly, else returns
- * ERR_PTR in case of error and should be handled using IS_ERR. Error return
- * values can be:
- * EINVAL: for bad pointer
- * ERANGE: no match found for search
- * ENODEV: if device not found in list of registered devices
- *
- * The callers are required to call dev_pm_opp_put() for the returned OPP after
- * use.
- */
-struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
- unsigned long *freq)
-{
- struct opp_table *opp_table;
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
-
- if (!dev || !freq) {
- dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
- return ERR_PTR(-EINVAL);
- }
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table))
- return ERR_CAST(opp_table);
-
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->available) {
- /* go to the next node, before choosing prev */
- if (temp_opp->rate > *freq)
- break;
- else
- opp = temp_opp;
- }
- }
-
- /* Increment the reference count of OPP */
- if (!IS_ERR(opp))
- dev_pm_opp_get(opp);
- mutex_unlock(&opp_table->lock);
- dev_pm_opp_put_opp_table(opp_table);
-
- if (!IS_ERR(opp))
- *freq = opp->rate;
-
- return opp;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
-
-static int _set_opp_voltage(struct device *dev, struct regulator *reg,
- struct dev_pm_opp_supply *supply)
-{
- int ret;
-
- /* Regulator not available for device */
- if (IS_ERR(reg)) {
- dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
- PTR_ERR(reg));
- return 0;
- }
-
- dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__,
- supply->u_volt_min, supply->u_volt, supply->u_volt_max);
-
- ret = regulator_set_voltage_triplet(reg, supply->u_volt_min,
- supply->u_volt, supply->u_volt_max);
- if (ret)
- dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
- __func__, supply->u_volt_min, supply->u_volt,
- supply->u_volt_max, ret);
-
- return ret;
-}
-
-static inline int
-_generic_set_opp_clk_only(struct device *dev, struct clk *clk,
- unsigned long old_freq, unsigned long freq)
-{
- int ret;
-
- ret = clk_set_rate(clk, freq);
- if (ret) {
- dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
- ret);
- }
-
- return ret;
-}
-
-static int _generic_set_opp_regulator(const struct opp_table *opp_table,
- struct device *dev,
- unsigned long old_freq,
- unsigned long freq,
- struct dev_pm_opp_supply *old_supply,
- struct dev_pm_opp_supply *new_supply)
-{
- struct regulator *reg = opp_table->regulators[0];
- int ret;
-
- /* This function only supports single regulator per device */
- if (WARN_ON(opp_table->regulator_count > 1)) {
- dev_err(dev, "multiple regulators are not supported\n");
- return -EINVAL;
- }
-
- /* Scaling up? Scale voltage before frequency */
- if (freq > old_freq) {
- ret = _set_opp_voltage(dev, reg, new_supply);
- if (ret)
- goto restore_voltage;
- }
-
- /* Change frequency */
- ret = _generic_set_opp_clk_only(dev, opp_table->clk, old_freq, freq);
- if (ret)
- goto restore_voltage;
-
- /* Scaling down? Scale voltage after frequency */
- if (freq < old_freq) {
- ret = _set_opp_voltage(dev, reg, new_supply);
- if (ret)
- goto restore_freq;
- }
-
- return 0;
-
-restore_freq:
- if (_generic_set_opp_clk_only(dev, opp_table->clk, freq, old_freq))
- dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
- __func__, old_freq);
-restore_voltage:
- /* This shouldn't harm even if the voltages weren't updated earlier */
- if (old_supply)
- _set_opp_voltage(dev, reg, old_supply);
-
- return ret;
-}
-
-/**
- * dev_pm_opp_set_rate() - Configure new OPP based on frequency
- * @dev: device for which we do this operation
- * @target_freq: frequency to achieve
- *
- * This configures the power-supplies and clock source to the levels specified
- * by the OPP corresponding to the target_freq.
- */
-int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
-{
- struct opp_table *opp_table;
- unsigned long freq, old_freq;
- struct dev_pm_opp *old_opp, *opp;
- struct clk *clk;
- int ret, size;
-
- if (unlikely(!target_freq)) {
- dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
- target_freq);
- return -EINVAL;
- }
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table)) {
- dev_err(dev, "%s: device opp doesn't exist\n", __func__);
- return PTR_ERR(opp_table);
- }
-
- clk = opp_table->clk;
- if (IS_ERR(clk)) {
- dev_err(dev, "%s: No clock available for the device\n",
- __func__);
- ret = PTR_ERR(clk);
- goto put_opp_table;
- }
-
- freq = clk_round_rate(clk, target_freq);
- if ((long)freq <= 0)
- freq = target_freq;
-
- old_freq = clk_get_rate(clk);
-
- /* Return early if nothing to do */
- if (old_freq == freq) {
- dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
- __func__, freq);
- ret = 0;
- goto put_opp_table;
- }
-
- old_opp = _find_freq_ceil(opp_table, &old_freq);
- if (IS_ERR(old_opp)) {
- dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
- __func__, old_freq, PTR_ERR(old_opp));
- }
-
- opp = _find_freq_ceil(opp_table, &freq);
- if (IS_ERR(opp)) {
- ret = PTR_ERR(opp);
- dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
- __func__, freq, ret);
- goto put_old_opp;
- }
-
- dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", __func__,
- old_freq, freq);
-
- /* Only frequency scaling */
- if (!opp_table->regulators) {
- ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq);
- } else if (!opp_table->set_opp) {
- ret = _generic_set_opp_regulator(opp_table, dev, old_freq, freq,
- IS_ERR(old_opp) ? NULL : old_opp->supplies,
- opp->supplies);
- } else {
- struct dev_pm_set_opp_data *data;
-
- data = opp_table->set_opp_data;
- data->regulators = opp_table->regulators;
- data->regulator_count = opp_table->regulator_count;
- data->clk = clk;
- data->dev = dev;
-
- data->old_opp.rate = old_freq;
- size = sizeof(*opp->supplies) * opp_table->regulator_count;
- if (IS_ERR(old_opp))
- memset(data->old_opp.supplies, 0, size);
- else
- memcpy(data->old_opp.supplies, old_opp->supplies, size);
-
- data->new_opp.rate = freq;
- memcpy(data->new_opp.supplies, opp->supplies, size);
-
- ret = opp_table->set_opp(data);
- }
-
- dev_pm_opp_put(opp);
-put_old_opp:
- if (!IS_ERR(old_opp))
- dev_pm_opp_put(old_opp);
-put_opp_table:
- dev_pm_opp_put_opp_table(opp_table);
- return ret;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
-
-/* OPP-dev Helpers */
-static void _remove_opp_dev(struct opp_device *opp_dev,
- struct opp_table *opp_table)
-{
- opp_debug_unregister(opp_dev, opp_table);
- list_del(&opp_dev->node);
- kfree(opp_dev);
-}
-
-struct opp_device *_add_opp_dev(const struct device *dev,
- struct opp_table *opp_table)
-{
- struct opp_device *opp_dev;
- int ret;
-
- opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
- if (!opp_dev)
- return NULL;
-
- /* Initialize opp-dev */
- opp_dev->dev = dev;
- list_add(&opp_dev->node, &opp_table->dev_list);
-
- /* Create debugfs entries for the opp_table */
- ret = opp_debug_register(opp_dev, opp_table);
- if (ret)
- dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
- __func__, ret);
-
- return opp_dev;
-}
-
-static struct opp_table *_allocate_opp_table(struct device *dev)
-{
- struct opp_table *opp_table;
- struct opp_device *opp_dev;
- int ret;
-
- /*
- * Allocate a new OPP table. In the infrequent case where a new
- * device is needed to be added, we pay this penalty.
- */
- opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL);
- if (!opp_table)
- return NULL;
-
- INIT_LIST_HEAD(&opp_table->dev_list);
-
- opp_dev = _add_opp_dev(dev, opp_table);
- if (!opp_dev) {
- kfree(opp_table);
- return NULL;
- }
-
- _of_init_opp_table(opp_table, dev);
-
- /* Find clk for the device */
- opp_table->clk = clk_get(dev, NULL);
- if (IS_ERR(opp_table->clk)) {
- ret = PTR_ERR(opp_table->clk);
- if (ret != -EPROBE_DEFER)
- dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__,
- ret);
- }
-
- BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head);
- INIT_LIST_HEAD(&opp_table->opp_list);
- mutex_init(&opp_table->lock);
- kref_init(&opp_table->kref);
-
- /* Secure the device table modification */
- list_add(&opp_table->node, &opp_tables);
- return opp_table;
-}
-
-void _get_opp_table_kref(struct opp_table *opp_table)
-{
- kref_get(&opp_table->kref);
-}
-
-struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
-{
- struct opp_table *opp_table;
-
- /* Hold our table modification lock here */
- mutex_lock(&opp_table_lock);
-
- opp_table = _find_opp_table_unlocked(dev);
- if (!IS_ERR(opp_table))
- goto unlock;
-
- opp_table = _allocate_opp_table(dev);
-
-unlock:
- mutex_unlock(&opp_table_lock);
-
- return opp_table;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table);
-
-static void _opp_table_kref_release(struct kref *kref)
-{
- struct opp_table *opp_table = container_of(kref, struct opp_table, kref);
- struct opp_device *opp_dev;
-
- /* Release clk */
- if (!IS_ERR(opp_table->clk))
- clk_put(opp_table->clk);
-
- opp_dev = list_first_entry(&opp_table->dev_list, struct opp_device,
- node);
-
- _remove_opp_dev(opp_dev, opp_table);
-
- /* dev_list must be empty now */
- WARN_ON(!list_empty(&opp_table->dev_list));
-
- mutex_destroy(&opp_table->lock);
- list_del(&opp_table->node);
- kfree(opp_table);
-
- mutex_unlock(&opp_table_lock);
-}
-
-void dev_pm_opp_put_opp_table(struct opp_table *opp_table)
-{
- kref_put_mutex(&opp_table->kref, _opp_table_kref_release,
- &opp_table_lock);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_put_opp_table);
-
-void _opp_free(struct dev_pm_opp *opp)
-{
- kfree(opp);
-}
-
-static void _opp_kref_release(struct kref *kref)
-{
- struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
- struct opp_table *opp_table = opp->opp_table;
-
- /*
- * Notify the changes in the availability of the operable
- * frequency/voltage list.
- */
- blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_REMOVE, opp);
- opp_debug_remove_one(opp);
- list_del(&opp->node);
- kfree(opp);
-
- mutex_unlock(&opp_table->lock);
- dev_pm_opp_put_opp_table(opp_table);
-}
-
-static void dev_pm_opp_get(struct dev_pm_opp *opp)
-{
- kref_get(&opp->kref);
-}
-
-void dev_pm_opp_put(struct dev_pm_opp *opp)
-{
- kref_put_mutex(&opp->kref, _opp_kref_release, &opp->opp_table->lock);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_put);
-
-/**
- * dev_pm_opp_remove() - Remove an OPP from OPP table
- * @dev: device for which we do this operation
- * @freq: OPP to remove with matching 'freq'
- *
- * This function removes an opp from the opp table.
- */
-void dev_pm_opp_remove(struct device *dev, unsigned long freq)
-{
- struct dev_pm_opp *opp;
- struct opp_table *opp_table;
- bool found = false;
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table))
- return;
-
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(opp, &opp_table->opp_list, node) {
- if (opp->rate == freq) {
- found = true;
- break;
- }
- }
-
- mutex_unlock(&opp_table->lock);
-
- if (found) {
- dev_pm_opp_put(opp);
- } else {
- dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
- __func__, freq);
- }
-
- dev_pm_opp_put_opp_table(opp_table);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
-
-struct dev_pm_opp *_opp_allocate(struct opp_table *table)
-{
- struct dev_pm_opp *opp;
- int count, supply_size;
-
- /* Allocate space for at least one supply */
- count = table->regulator_count ? table->regulator_count : 1;
- supply_size = sizeof(*opp->supplies) * count;
-
- /* allocate new OPP node and supplies structures */
- opp = kzalloc(sizeof(*opp) + supply_size, GFP_KERNEL);
- if (!opp)
- return NULL;
-
- /* Put the supplies at the end of the OPP structure as an empty array */
- opp->supplies = (struct dev_pm_opp_supply *)(opp + 1);
- INIT_LIST_HEAD(&opp->node);
-
- return opp;
-}
-
-static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
- struct opp_table *opp_table)
-{
- struct regulator *reg;
- int i;
-
- for (i = 0; i < opp_table->regulator_count; i++) {
- reg = opp_table->regulators[i];
-
- if (!regulator_is_supported_voltage(reg,
- opp->supplies[i].u_volt_min,
- opp->supplies[i].u_volt_max)) {
- pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
- __func__, opp->supplies[i].u_volt_min,
- opp->supplies[i].u_volt_max);
- return false;
- }
- }
-
- return true;
-}
-
-/*
- * Returns:
- * 0: On success. And appropriate error message for duplicate OPPs.
- * -EBUSY: For OPP with same freq/volt and is available. The callers of
- * _opp_add() must return 0 if they receive -EBUSY from it. This is to make
- * sure we don't print error messages unnecessarily if different parts of
- * kernel try to initialize the OPP table.
- * -EEXIST: For OPP with same freq but different volt or is unavailable. This
- * should be considered an error by the callers of _opp_add().
- */
-int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
- struct opp_table *opp_table)
-{
- struct dev_pm_opp *opp;
- struct list_head *head;
- int ret;
-
- /*
- * Insert new OPP in order of increasing frequency and discard if
- * already present.
- *
- * Need to use &opp_table->opp_list in the condition part of the 'for'
- * loop, don't replace it with head otherwise it will become an infinite
- * loop.
- */
- mutex_lock(&opp_table->lock);
- head = &opp_table->opp_list;
-
- list_for_each_entry(opp, &opp_table->opp_list, node) {
- if (new_opp->rate > opp->rate) {
- head = &opp->node;
- continue;
- }
-
- if (new_opp->rate < opp->rate)
- break;
-
- /* Duplicate OPPs */
- dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
- __func__, opp->rate, opp->supplies[0].u_volt,
- opp->available, new_opp->rate,
- new_opp->supplies[0].u_volt, new_opp->available);
-
- /* Should we compare voltages for all regulators here ? */
- ret = opp->available &&
- new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? -EBUSY : -EEXIST;
-
- mutex_unlock(&opp_table->lock);
- return ret;
- }
-
- list_add(&new_opp->node, head);
- mutex_unlock(&opp_table->lock);
-
- new_opp->opp_table = opp_table;
- kref_init(&new_opp->kref);
-
- /* Get a reference to the OPP table */
- _get_opp_table_kref(opp_table);
-
- ret = opp_debug_create_one(new_opp, opp_table);
- if (ret)
- dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
- __func__, ret);
-
- if (!_opp_supported_by_regulators(new_opp, opp_table)) {
- new_opp->available = false;
- dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
- __func__, new_opp->rate);
- }
-
- return 0;
-}
-
-/**
- * _opp_add_v1() - Allocate a OPP based on v1 bindings.
- * @opp_table: OPP table
- * @dev: device for which we do this operation
- * @freq: Frequency in Hz for this OPP
- * @u_volt: Voltage in uVolts for this OPP
- * @dynamic: Dynamically added OPPs.
- *
- * This function adds an opp definition to the opp table and returns status.
- * The opp is made available by default and it can be controlled using
- * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
- *
- * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
- * and freed by dev_pm_opp_of_remove_table.
- *
- * Return:
- * 0 On success OR
- * Duplicate OPPs (both freq and volt are same) and opp->available
- * -EEXIST Freq are same and volt are different OR
- * Duplicate OPPs (both freq and volt are same) and !opp->available
- * -ENOMEM Memory allocation failure
- */
-int _opp_add_v1(struct opp_table *opp_table, struct device *dev,
- unsigned long freq, long u_volt, bool dynamic)
-{
- struct dev_pm_opp *new_opp;
- unsigned long tol;
- int ret;
-
- new_opp = _opp_allocate(opp_table);
- if (!new_opp)
- return -ENOMEM;
-
- /* populate the opp table */
- new_opp->rate = freq;
- tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
- new_opp->supplies[0].u_volt = u_volt;
- new_opp->supplies[0].u_volt_min = u_volt - tol;
- new_opp->supplies[0].u_volt_max = u_volt + tol;
- new_opp->available = true;
- new_opp->dynamic = dynamic;
-
- ret = _opp_add(dev, new_opp, opp_table);
- if (ret) {
- /* Don't return error for duplicate OPPs */
- if (ret == -EBUSY)
- ret = 0;
- goto free_opp;
- }
-
- /*
- * Notify the changes in the availability of the operable
- * frequency/voltage list.
- */
- blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp);
- return 0;
-
-free_opp:
- _opp_free(new_opp);
-
- return ret;
-}
-
-/**
- * dev_pm_opp_set_supported_hw() - Set supported platforms
- * @dev: Device for which supported-hw has to be set.
- * @versions: Array of hierarchy of versions to match.
- * @count: Number of elements in the array.
- *
- * This is required only for the V2 bindings, and it enables a platform to
- * specify the hierarchy of versions it supports. OPP layer will then enable
- * OPPs, which are available for those versions, based on its 'opp-supported-hw'
- * property.
- */
-struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev,
- const u32 *versions, unsigned int count)
-{
- struct opp_table *opp_table;
- int ret;
-
- opp_table = dev_pm_opp_get_opp_table(dev);
- if (!opp_table)
- return ERR_PTR(-ENOMEM);
-
- /* Make sure there are no concurrent readers while updating opp_table */
- WARN_ON(!list_empty(&opp_table->opp_list));
-
- /* Do we already have a version hierarchy associated with opp_table? */
- if (opp_table->supported_hw) {
- dev_err(dev, "%s: Already have supported hardware list\n",
- __func__);
- ret = -EBUSY;
- goto err;
- }
-
- opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
- GFP_KERNEL);
- if (!opp_table->supported_hw) {
- ret = -ENOMEM;
- goto err;
- }
-
- opp_table->supported_hw_count = count;
-
- return opp_table;
-
-err:
- dev_pm_opp_put_opp_table(opp_table);
-
- return ERR_PTR(ret);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
-
-/**
- * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
- * @opp_table: OPP table returned by dev_pm_opp_set_supported_hw().
- *
- * This is required only for the V2 bindings, and is called for a matching
- * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure
- * will not be freed.
- */
-void dev_pm_opp_put_supported_hw(struct opp_table *opp_table)
-{
- /* Make sure there are no concurrent readers while updating opp_table */
- WARN_ON(!list_empty(&opp_table->opp_list));
-
- if (!opp_table->supported_hw) {
- pr_err("%s: Doesn't have supported hardware list\n",
- __func__);
- return;
- }
-
- kfree(opp_table->supported_hw);
- opp_table->supported_hw = NULL;
- opp_table->supported_hw_count = 0;
-
- dev_pm_opp_put_opp_table(opp_table);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
-
-/**
- * dev_pm_opp_set_prop_name() - Set prop-extn name
- * @dev: Device for which the prop-name has to be set.
- * @name: name to postfix to properties.
- *
- * This is required only for the V2 bindings, and it enables a platform to
- * specify the extn to be used for certain property names. The properties to
- * which the extension will apply are opp-microvolt and opp-microamp. OPP core
- * should postfix the property name with -<name> while looking for them.
- */
-struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
-{
- struct opp_table *opp_table;
- int ret;
-
- opp_table = dev_pm_opp_get_opp_table(dev);
- if (!opp_table)
- return ERR_PTR(-ENOMEM);
-
- /* Make sure there are no concurrent readers while updating opp_table */
- WARN_ON(!list_empty(&opp_table->opp_list));
-
- /* Do we already have a prop-name associated with opp_table? */
- if (opp_table->prop_name) {
- dev_err(dev, "%s: Already have prop-name %s\n", __func__,
- opp_table->prop_name);
- ret = -EBUSY;
- goto err;
- }
-
- opp_table->prop_name = kstrdup(name, GFP_KERNEL);
- if (!opp_table->prop_name) {
- ret = -ENOMEM;
- goto err;
- }
-
- return opp_table;
-
-err:
- dev_pm_opp_put_opp_table(opp_table);
-
- return ERR_PTR(ret);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
-
-/**
- * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
- * @opp_table: OPP table returned by dev_pm_opp_set_prop_name().
- *
- * This is required only for the V2 bindings, and is called for a matching
- * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure
- * will not be freed.
- */
-void dev_pm_opp_put_prop_name(struct opp_table *opp_table)
-{
- /* Make sure there are no concurrent readers while updating opp_table */
- WARN_ON(!list_empty(&opp_table->opp_list));
-
- if (!opp_table->prop_name) {
- pr_err("%s: Doesn't have a prop-name\n", __func__);
- return;
- }
-
- kfree(opp_table->prop_name);
- opp_table->prop_name = NULL;
-
- dev_pm_opp_put_opp_table(opp_table);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
-
-static int _allocate_set_opp_data(struct opp_table *opp_table)
-{
- struct dev_pm_set_opp_data *data;
- int len, count = opp_table->regulator_count;
-
- if (WARN_ON(!count))
- return -EINVAL;
-
- /* space for set_opp_data */
- len = sizeof(*data);
-
- /* space for old_opp.supplies and new_opp.supplies */
- len += 2 * sizeof(struct dev_pm_opp_supply) * count;
-
- data = kzalloc(len, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- data->old_opp.supplies = (void *)(data + 1);
- data->new_opp.supplies = data->old_opp.supplies + count;
-
- opp_table->set_opp_data = data;
-
- return 0;
-}
-
-static void _free_set_opp_data(struct opp_table *opp_table)
-{
- kfree(opp_table->set_opp_data);
- opp_table->set_opp_data = NULL;
-}
-
-/**
- * dev_pm_opp_set_regulators() - Set regulator names for the device
- * @dev: Device for which regulator name is being set.
- * @names: Array of pointers to the names of the regulator.
- * @count: Number of regulators.
- *
- * In order to support OPP switching, OPP layer needs to know the name of the
- * device's regulators, as the core would be required to switch voltages as
- * well.
- *
- * This must be called before any OPPs are initialized for the device.
- */
-struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
- const char * const names[],
- unsigned int count)
-{
- struct opp_table *opp_table;
- struct regulator *reg;
- int ret, i;
-
- opp_table = dev_pm_opp_get_opp_table(dev);
- if (!opp_table)
- return ERR_PTR(-ENOMEM);
-
- /* This should be called before OPPs are initialized */
- if (WARN_ON(!list_empty(&opp_table->opp_list))) {
- ret = -EBUSY;
- goto err;
- }
-
- /* Already have regulators set */
- if (opp_table->regulators) {
- ret = -EBUSY;
- goto err;
- }
-
- opp_table->regulators = kmalloc_array(count,
- sizeof(*opp_table->regulators),
- GFP_KERNEL);
- if (!opp_table->regulators) {
- ret = -ENOMEM;
- goto err;
- }
-
- for (i = 0; i < count; i++) {
- reg = regulator_get_optional(dev, names[i]);
- if (IS_ERR(reg)) {
- ret = PTR_ERR(reg);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "%s: no regulator (%s) found: %d\n",
- __func__, names[i], ret);
- goto free_regulators;
- }
-
- opp_table->regulators[i] = reg;
- }
-
- opp_table->regulator_count = count;
-
- /* Allocate block only once to pass to set_opp() routines */
- ret = _allocate_set_opp_data(opp_table);
- if (ret)
- goto free_regulators;
-
- return opp_table;
-
-free_regulators:
- while (i != 0)
- regulator_put(opp_table->regulators[--i]);
-
- kfree(opp_table->regulators);
- opp_table->regulators = NULL;
- opp_table->regulator_count = 0;
-err:
- dev_pm_opp_put_opp_table(opp_table);
-
- return ERR_PTR(ret);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulators);
-
-/**
- * dev_pm_opp_put_regulators() - Releases resources blocked for regulator
- * @opp_table: OPP table returned from dev_pm_opp_set_regulators().
- */
-void dev_pm_opp_put_regulators(struct opp_table *opp_table)
-{
- int i;
-
- if (!opp_table->regulators) {
- pr_err("%s: Doesn't have regulators set\n", __func__);
- return;
- }
-
- /* Make sure there are no concurrent readers while updating opp_table */
- WARN_ON(!list_empty(&opp_table->opp_list));
-
- for (i = opp_table->regulator_count - 1; i >= 0; i--)
- regulator_put(opp_table->regulators[i]);
-
- _free_set_opp_data(opp_table);
-
- kfree(opp_table->regulators);
- opp_table->regulators = NULL;
- opp_table->regulator_count = 0;
-
- dev_pm_opp_put_opp_table(opp_table);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulators);
-
-/**
- * dev_pm_opp_set_clkname() - Set clk name for the device
- * @dev: Device for which clk name is being set.
- * @name: Clk name.
- *
- * In order to support OPP switching, OPP layer needs to get pointer to the
- * clock for the device. Simple cases work fine without using this routine (i.e.
- * by passing connection-id as NULL), but for a device with multiple clocks
- * available, the OPP core needs to know the exact name of the clk to use.
- *
- * This must be called before any OPPs are initialized for the device.
- */
-struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name)
-{
- struct opp_table *opp_table;
- int ret;
-
- opp_table = dev_pm_opp_get_opp_table(dev);
- if (!opp_table)
- return ERR_PTR(-ENOMEM);
-
- /* This should be called before OPPs are initialized */
- if (WARN_ON(!list_empty(&opp_table->opp_list))) {
- ret = -EBUSY;
- goto err;
- }
-
- /* Already have default clk set, free it */
- if (!IS_ERR(opp_table->clk))
- clk_put(opp_table->clk);
-
- /* Find clk for the device */
- opp_table->clk = clk_get(dev, name);
- if (IS_ERR(opp_table->clk)) {
- ret = PTR_ERR(opp_table->clk);
- if (ret != -EPROBE_DEFER) {
- dev_err(dev, "%s: Couldn't find clock: %d\n", __func__,
- ret);
- }
- goto err;
- }
-
- return opp_table;
-
-err:
- dev_pm_opp_put_opp_table(opp_table);
-
- return ERR_PTR(ret);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_set_clkname);
-
-/**
- * dev_pm_opp_put_clkname() - Releases resources blocked for clk.
- * @opp_table: OPP table returned from dev_pm_opp_set_clkname().
- */
-void dev_pm_opp_put_clkname(struct opp_table *opp_table)
-{
- /* Make sure there are no concurrent readers while updating opp_table */
- WARN_ON(!list_empty(&opp_table->opp_list));
-
- clk_put(opp_table->clk);
- opp_table->clk = ERR_PTR(-EINVAL);
-
- dev_pm_opp_put_opp_table(opp_table);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_put_clkname);
-
-/**
- * dev_pm_opp_register_set_opp_helper() - Register custom set OPP helper
- * @dev: Device for which the helper is getting registered.
- * @set_opp: Custom set OPP helper.
- *
- * This is useful to support complex platforms (like platforms with multiple
- * regulators per device), instead of the generic OPP set rate helper.
- *
- * This must be called before any OPPs are initialized for the device.
- */
-struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
- int (*set_opp)(struct dev_pm_set_opp_data *data))
-{
- struct opp_table *opp_table;
- int ret;
-
- if (!set_opp)
- return ERR_PTR(-EINVAL);
-
- opp_table = dev_pm_opp_get_opp_table(dev);
- if (!opp_table)
- return ERR_PTR(-ENOMEM);
-
- /* This should be called before OPPs are initialized */
- if (WARN_ON(!list_empty(&opp_table->opp_list))) {
- ret = -EBUSY;
- goto err;
- }
-
- /* Already have custom set_opp helper */
- if (WARN_ON(opp_table->set_opp)) {
- ret = -EBUSY;
- goto err;
- }
-
- opp_table->set_opp = set_opp;
-
- return opp_table;
-
-err:
- dev_pm_opp_put_opp_table(opp_table);
-
- return ERR_PTR(ret);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper);
-
-/**
- * dev_pm_opp_register_put_opp_helper() - Releases resources blocked for
- * set_opp helper
- * @opp_table: OPP table returned from dev_pm_opp_register_set_opp_helper().
- *
- * Release resources blocked for platform specific set_opp helper.
- */
-void dev_pm_opp_register_put_opp_helper(struct opp_table *opp_table)
-{
- if (!opp_table->set_opp) {
- pr_err("%s: Doesn't have custom set_opp helper set\n",
- __func__);
- return;
- }
-
- /* Make sure there are no concurrent readers while updating opp_table */
- WARN_ON(!list_empty(&opp_table->opp_list));
-
- opp_table->set_opp = NULL;
-
- dev_pm_opp_put_opp_table(opp_table);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_register_put_opp_helper);
-
-/**
- * dev_pm_opp_add() - Add an OPP table from a table definitions
- * @dev: device for which we do this operation
- * @freq: Frequency in Hz for this OPP
- * @u_volt: Voltage in uVolts for this OPP
- *
- * This function adds an opp definition to the opp table and returns status.
- * The opp is made available by default and it can be controlled using
- * dev_pm_opp_enable/disable functions.
- *
- * Return:
- * 0 On success OR
- * Duplicate OPPs (both freq and volt are same) and opp->available
- * -EEXIST Freq are same and volt are different OR
- * Duplicate OPPs (both freq and volt are same) and !opp->available
- * -ENOMEM Memory allocation failure
- */
-int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
-{
- struct opp_table *opp_table;
- int ret;
-
- opp_table = dev_pm_opp_get_opp_table(dev);
- if (!opp_table)
- return -ENOMEM;
-
- ret = _opp_add_v1(opp_table, dev, freq, u_volt, true);
-
- dev_pm_opp_put_opp_table(opp_table);
- return ret;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_add);
-
-/**
- * _opp_set_availability() - helper to set the availability of an opp
- * @dev: device for which we do this operation
- * @freq: OPP frequency to modify availability
- * @availability_req: availability status requested for this opp
- *
- * Set the availability of an OPP, opp_{enable,disable} share a common logic
- * which is isolated here.
- *
- * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
- * copy operation, returns 0 if no modification was done OR modification was
- * successful.
- */
-static int _opp_set_availability(struct device *dev, unsigned long freq,
- bool availability_req)
-{
- struct opp_table *opp_table;
- struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV);
- int r = 0;
-
- /* Find the opp_table */
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table)) {
- r = PTR_ERR(opp_table);
- dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
- return r;
- }
-
- mutex_lock(&opp_table->lock);
-
- /* Do we have the frequency? */
- list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
- if (tmp_opp->rate == freq) {
- opp = tmp_opp;
- break;
- }
- }
-
- if (IS_ERR(opp)) {
- r = PTR_ERR(opp);
- goto unlock;
- }
-
- /* Is update really needed? */
- if (opp->available == availability_req)
- goto unlock;
-
- opp->available = availability_req;
-
- dev_pm_opp_get(opp);
- mutex_unlock(&opp_table->lock);
-
- /* Notify the change of the OPP availability */
- if (availability_req)
- blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE,
- opp);
- else
- blocking_notifier_call_chain(&opp_table->head,
- OPP_EVENT_DISABLE, opp);
-
- dev_pm_opp_put(opp);
- goto put_table;
-
-unlock:
- mutex_unlock(&opp_table->lock);
-put_table:
- dev_pm_opp_put_opp_table(opp_table);
- return r;
-}
-
-/**
- * dev_pm_opp_enable() - Enable a specific OPP
- * @dev: device for which we do this operation
- * @freq: OPP frequency to enable
- *
- * Enables a provided opp. If the operation is valid, this returns 0, else the
- * corresponding error value. It is meant to be used for users an OPP available
- * after being temporarily made unavailable with dev_pm_opp_disable.
- *
- * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
- * copy operation, returns 0 if no modification was done OR modification was
- * successful.
- */
-int dev_pm_opp_enable(struct device *dev, unsigned long freq)
-{
- return _opp_set_availability(dev, freq, true);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
-
-/**
- * dev_pm_opp_disable() - Disable a specific OPP
- * @dev: device for which we do this operation
- * @freq: OPP frequency to disable
- *
- * Disables a provided opp. If the operation is valid, this returns
- * 0, else the corresponding error value. It is meant to be a temporary
- * control by users to make this OPP not available until the circumstances are
- * right to make it available again (with a call to dev_pm_opp_enable).
- *
- * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
- * copy operation, returns 0 if no modification was done OR modification was
- * successful.
- */
-int dev_pm_opp_disable(struct device *dev, unsigned long freq)
-{
- return _opp_set_availability(dev, freq, false);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
-
-/**
- * dev_pm_opp_register_notifier() - Register OPP notifier for the device
- * @dev: Device for which notifier needs to be registered
- * @nb: Notifier block to be registered
- *
- * Return: 0 on success or a negative error value.
- */
-int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb)
-{
- struct opp_table *opp_table;
- int ret;
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table))
- return PTR_ERR(opp_table);
-
- ret = blocking_notifier_chain_register(&opp_table->head, nb);
-
- dev_pm_opp_put_opp_table(opp_table);
-
- return ret;
-}
-EXPORT_SYMBOL(dev_pm_opp_register_notifier);
-
-/**
- * dev_pm_opp_unregister_notifier() - Unregister OPP notifier for the device
- * @dev: Device for which notifier needs to be unregistered
- * @nb: Notifier block to be unregistered
- *
- * Return: 0 on success or a negative error value.
- */
-int dev_pm_opp_unregister_notifier(struct device *dev,
- struct notifier_block *nb)
-{
- struct opp_table *opp_table;
- int ret;
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table))
- return PTR_ERR(opp_table);
-
- ret = blocking_notifier_chain_unregister(&opp_table->head, nb);
-
- dev_pm_opp_put_opp_table(opp_table);
-
- return ret;
-}
-EXPORT_SYMBOL(dev_pm_opp_unregister_notifier);
-
-/*
- * Free OPPs either created using static entries present in DT or even the
- * dynamically added entries based on remove_all param.
- */
-void _dev_pm_opp_remove_table(struct opp_table *opp_table, struct device *dev,
- bool remove_all)
-{
- struct dev_pm_opp *opp, *tmp;
-
- /* Find if opp_table manages a single device */
- if (list_is_singular(&opp_table->dev_list)) {
- /* Free static OPPs */
- list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
- if (remove_all || !opp->dynamic)
- dev_pm_opp_put(opp);
- }
- } else {
- _remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table);
- }
-}
-
-void _dev_pm_opp_find_and_remove_table(struct device *dev, bool remove_all)
-{
- struct opp_table *opp_table;
-
- /* Check for existing table for 'dev' */
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table)) {
- int error = PTR_ERR(opp_table);
-
- if (error != -ENODEV)
- WARN(1, "%s: opp_table: %d\n",
- IS_ERR_OR_NULL(dev) ?
- "Invalid device" : dev_name(dev),
- error);
- return;
- }
-
- _dev_pm_opp_remove_table(opp_table, dev, remove_all);
-
- dev_pm_opp_put_opp_table(opp_table);
-}
-
-/**
- * dev_pm_opp_remove_table() - Free all OPPs associated with the device
- * @dev: device pointer used to lookup OPP table.
- *
- * Free both OPPs created using static entries present in DT and the
- * dynamically added entries.
- */
-void dev_pm_opp_remove_table(struct device *dev)
-{
- _dev_pm_opp_find_and_remove_table(dev, true);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);
diff --git a/drivers/base/power/opp/cpu.c b/drivers/base/power/opp/cpu.c
deleted file mode 100644
index 2d87bc1adf38..000000000000
--- a/drivers/base/power/opp/cpu.c
+++ /dev/null
@@ -1,236 +0,0 @@
-/*
- * Generic OPP helper interface for CPU device
- *
- * Copyright (C) 2009-2014 Texas Instruments Incorporated.
- * Nishanth Menon
- * Romit Dasgupta
- * Kevin Hilman
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/cpu.h>
-#include <linux/cpufreq.h>
-#include <linux/err.h>
-#include <linux/errno.h>
-#include <linux/export.h>
-#include <linux/slab.h>
-
-#include "opp.h"
-
-#ifdef CONFIG_CPU_FREQ
-
-/**
- * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device
- * @dev: device for which we do this operation
- * @table: Cpufreq table returned back to caller
- *
- * Generate a cpufreq table for a provided device- this assumes that the
- * opp table is already initialized and ready for usage.
- *
- * This function allocates required memory for the cpufreq table. It is
- * expected that the caller does the required maintenance such as freeing
- * the table as required.
- *
- * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM
- * if no memory available for the operation (table is not populated), returns 0
- * if successful and table is populated.
- *
- * WARNING: It is important for the callers to ensure refreshing their copy of
- * the table if any of the mentioned functions have been invoked in the interim.
- */
-int dev_pm_opp_init_cpufreq_table(struct device *dev,
- struct cpufreq_frequency_table **table)
-{
- struct dev_pm_opp *opp;
- struct cpufreq_frequency_table *freq_table = NULL;
- int i, max_opps, ret = 0;
- unsigned long rate;
-
- max_opps = dev_pm_opp_get_opp_count(dev);
- if (max_opps <= 0)
- return max_opps ? max_opps : -ENODATA;
-
- freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_ATOMIC);
- if (!freq_table)
- return -ENOMEM;
-
- for (i = 0, rate = 0; i < max_opps; i++, rate++) {
- /* find next rate */
- opp = dev_pm_opp_find_freq_ceil(dev, &rate);
- if (IS_ERR(opp)) {
- ret = PTR_ERR(opp);
- goto out;
- }
- freq_table[i].driver_data = i;
- freq_table[i].frequency = rate / 1000;
-
- /* Is Boost/turbo opp ? */
- if (dev_pm_opp_is_turbo(opp))
- freq_table[i].flags = CPUFREQ_BOOST_FREQ;
-
- dev_pm_opp_put(opp);
- }
-
- freq_table[i].driver_data = i;
- freq_table[i].frequency = CPUFREQ_TABLE_END;
-
- *table = &freq_table[0];
-
-out:
- if (ret)
- kfree(freq_table);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table);
-
-/**
- * dev_pm_opp_free_cpufreq_table() - free the cpufreq table
- * @dev: device for which we do this operation
- * @table: table to free
- *
- * Free up the table allocated by dev_pm_opp_init_cpufreq_table
- */
-void dev_pm_opp_free_cpufreq_table(struct device *dev,
- struct cpufreq_frequency_table **table)
-{
- if (!table)
- return;
-
- kfree(*table);
- *table = NULL;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
-#endif /* CONFIG_CPU_FREQ */
-
-void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of)
-{
- struct device *cpu_dev;
- int cpu;
-
- WARN_ON(cpumask_empty(cpumask));
-
- for_each_cpu(cpu, cpumask) {
- cpu_dev = get_cpu_device(cpu);
- if (!cpu_dev) {
- pr_err("%s: failed to get cpu%d device\n", __func__,
- cpu);
- continue;
- }
-
- if (of)
- dev_pm_opp_of_remove_table(cpu_dev);
- else
- dev_pm_opp_remove_table(cpu_dev);
- }
-}
-
-/**
- * dev_pm_opp_cpumask_remove_table() - Removes OPP table for @cpumask
- * @cpumask: cpumask for which OPP table needs to be removed
- *
- * This removes the OPP tables for CPUs present in the @cpumask.
- * This should be used to remove all the OPPs entries associated with
- * the cpus in @cpumask.
- */
-void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask)
-{
- _dev_pm_opp_cpumask_remove_table(cpumask, false);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_cpumask_remove_table);
-
-/**
- * dev_pm_opp_set_sharing_cpus() - Mark OPP table as shared by few CPUs
- * @cpu_dev: CPU device for which we do this operation
- * @cpumask: cpumask of the CPUs which share the OPP table with @cpu_dev
- *
- * This marks OPP table of the @cpu_dev as shared by the CPUs present in
- * @cpumask.
- *
- * Returns -ENODEV if OPP table isn't already present.
- */
-int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev,
- const struct cpumask *cpumask)
-{
- struct opp_device *opp_dev;
- struct opp_table *opp_table;
- struct device *dev;
- int cpu, ret = 0;
-
- opp_table = _find_opp_table(cpu_dev);
- if (IS_ERR(opp_table))
- return PTR_ERR(opp_table);
-
- for_each_cpu(cpu, cpumask) {
- if (cpu == cpu_dev->id)
- continue;
-
- dev = get_cpu_device(cpu);
- if (!dev) {
- dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
- __func__, cpu);
- continue;
- }
-
- opp_dev = _add_opp_dev(dev, opp_table);
- if (!opp_dev) {
- dev_err(dev, "%s: failed to add opp-dev for cpu%d device\n",
- __func__, cpu);
- continue;
- }
-
- /* Mark opp-table as multiple CPUs are sharing it now */
- opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
- }
-
- dev_pm_opp_put_opp_table(opp_table);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus);
-
-/**
- * dev_pm_opp_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with @cpu_dev
- * @cpu_dev: CPU device for which we do this operation
- * @cpumask: cpumask to update with information of sharing CPUs
- *
- * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
- *
- * Returns -ENODEV if OPP table isn't already present and -EINVAL if the OPP
- * table's status is access-unknown.
- */
-int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
-{
- struct opp_device *opp_dev;
- struct opp_table *opp_table;
- int ret = 0;
-
- opp_table = _find_opp_table(cpu_dev);
- if (IS_ERR(opp_table))
- return PTR_ERR(opp_table);
-
- if (opp_table->shared_opp == OPP_TABLE_ACCESS_UNKNOWN) {
- ret = -EINVAL;
- goto put_opp_table;
- }
-
- cpumask_clear(cpumask);
-
- if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) {
- list_for_each_entry(opp_dev, &opp_table->dev_list, node)
- cpumask_set_cpu(opp_dev->dev->id, cpumask);
- } else {
- cpumask_set_cpu(cpu_dev->id, cpumask);
- }
-
-put_opp_table:
- dev_pm_opp_put_opp_table(opp_table);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_get_sharing_cpus);
diff --git a/drivers/base/power/opp/debugfs.c b/drivers/base/power/opp/debugfs.c
deleted file mode 100644
index 81cf120fcf43..000000000000
--- a/drivers/base/power/opp/debugfs.c
+++ /dev/null
@@ -1,249 +0,0 @@
-/*
- * Generic OPP debugfs interface
- *
- * Copyright (C) 2015-2016 Viresh Kumar <viresh.kumar@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/debugfs.h>
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/limits.h>
-#include <linux/slab.h>
-
-#include "opp.h"
-
-static struct dentry *rootdir;
-
-static void opp_set_dev_name(const struct device *dev, char *name)
-{
- if (dev->parent)
- snprintf(name, NAME_MAX, "%s-%s", dev_name(dev->parent),
- dev_name(dev));
- else
- snprintf(name, NAME_MAX, "%s", dev_name(dev));
-}
-
-void opp_debug_remove_one(struct dev_pm_opp *opp)
-{
- debugfs_remove_recursive(opp->dentry);
-}
-
-static bool opp_debug_create_supplies(struct dev_pm_opp *opp,
- struct opp_table *opp_table,
- struct dentry *pdentry)
-{
- struct dentry *d;
- int i;
- char *name;
-
- for (i = 0; i < opp_table->regulator_count; i++) {
- name = kasprintf(GFP_KERNEL, "supply-%d", i);
-
- /* Create per-opp directory */
- d = debugfs_create_dir(name, pdentry);
-
- kfree(name);
-
- if (!d)
- return false;
-
- if (!debugfs_create_ulong("u_volt_target", S_IRUGO, d,
- &opp->supplies[i].u_volt))
- return false;
-
- if (!debugfs_create_ulong("u_volt_min", S_IRUGO, d,
- &opp->supplies[i].u_volt_min))
- return false;
-
- if (!debugfs_create_ulong("u_volt_max", S_IRUGO, d,
- &opp->supplies[i].u_volt_max))
- return false;
-
- if (!debugfs_create_ulong("u_amp", S_IRUGO, d,
- &opp->supplies[i].u_amp))
- return false;
- }
-
- return true;
-}
-
-int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
-{
- struct dentry *pdentry = opp_table->dentry;
- struct dentry *d;
- char name[25]; /* 20 chars for 64 bit value + 5 (opp:\0) */
-
- /* Rate is unique to each OPP, use it to give opp-name */
- snprintf(name, sizeof(name), "opp:%lu", opp->rate);
-
- /* Create per-opp directory */
- d = debugfs_create_dir(name, pdentry);
- if (!d)
- return -ENOMEM;
-
- if (!debugfs_create_bool("available", S_IRUGO, d, &opp->available))
- return -ENOMEM;
-
- if (!debugfs_create_bool("dynamic", S_IRUGO, d, &opp->dynamic))
- return -ENOMEM;
-
- if (!debugfs_create_bool("turbo", S_IRUGO, d, &opp->turbo))
- return -ENOMEM;
-
- if (!debugfs_create_bool("suspend", S_IRUGO, d, &opp->suspend))
- return -ENOMEM;
-
- if (!debugfs_create_ulong("rate_hz", S_IRUGO, d, &opp->rate))
- return -ENOMEM;
-
- if (!opp_debug_create_supplies(opp, opp_table, d))
- return -ENOMEM;
-
- if (!debugfs_create_ulong("clock_latency_ns", S_IRUGO, d,
- &opp->clock_latency_ns))
- return -ENOMEM;
-
- opp->dentry = d;
- return 0;
-}
-
-static int opp_list_debug_create_dir(struct opp_device *opp_dev,
- struct opp_table *opp_table)
-{
- const struct device *dev = opp_dev->dev;
- struct dentry *d;
-
- opp_set_dev_name(dev, opp_table->dentry_name);
-
- /* Create device specific directory */
- d = debugfs_create_dir(opp_table->dentry_name, rootdir);
- if (!d) {
- dev_err(dev, "%s: Failed to create debugfs dir\n", __func__);
- return -ENOMEM;
- }
-
- opp_dev->dentry = d;
- opp_table->dentry = d;
-
- return 0;
-}
-
-static int opp_list_debug_create_link(struct opp_device *opp_dev,
- struct opp_table *opp_table)
-{
- const struct device *dev = opp_dev->dev;
- char name[NAME_MAX];
- struct dentry *d;
-
- opp_set_dev_name(opp_dev->dev, name);
-
- /* Create device specific directory link */
- d = debugfs_create_symlink(name, rootdir, opp_table->dentry_name);
- if (!d) {
- dev_err(dev, "%s: Failed to create link\n", __func__);
- return -ENOMEM;
- }
-
- opp_dev->dentry = d;
-
- return 0;
-}
-
-/**
- * opp_debug_register - add a device opp node to the debugfs 'opp' directory
- * @opp_dev: opp-dev pointer for device
- * @opp_table: the device-opp being added
- *
- * Dynamically adds device specific directory in debugfs 'opp' directory. If the
- * device-opp is shared with other devices, then links will be created for all
- * devices except the first.
- *
- * Return: 0 on success, otherwise negative error.
- */
-int opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table)
-{
- if (!rootdir) {
- pr_debug("%s: Uninitialized rootdir\n", __func__);
- return -EINVAL;
- }
-
- if (opp_table->dentry)
- return opp_list_debug_create_link(opp_dev, opp_table);
-
- return opp_list_debug_create_dir(opp_dev, opp_table);
-}
-
-static void opp_migrate_dentry(struct opp_device *opp_dev,
- struct opp_table *opp_table)
-{
- struct opp_device *new_dev;
- const struct device *dev;
- struct dentry *dentry;
-
- /* Look for next opp-dev */
- list_for_each_entry(new_dev, &opp_table->dev_list, node)
- if (new_dev != opp_dev)
- break;
-
- /* new_dev is guaranteed to be valid here */
- dev = new_dev->dev;
- debugfs_remove_recursive(new_dev->dentry);
-
- opp_set_dev_name(dev, opp_table->dentry_name);
-
- dentry = debugfs_rename(rootdir, opp_dev->dentry, rootdir,
- opp_table->dentry_name);
- if (!dentry) {
- dev_err(dev, "%s: Failed to rename link from: %s to %s\n",
- __func__, dev_name(opp_dev->dev), dev_name(dev));
- return;
- }
-
- new_dev->dentry = dentry;
- opp_table->dentry = dentry;
-}
-
-/**
- * opp_debug_unregister - remove a device opp node from debugfs opp directory
- * @opp_dev: opp-dev pointer for device
- * @opp_table: the device-opp being removed
- *
- * Dynamically removes device specific directory from debugfs 'opp' directory.
- */
-void opp_debug_unregister(struct opp_device *opp_dev,
- struct opp_table *opp_table)
-{
- if (opp_dev->dentry == opp_table->dentry) {
- /* Move the real dentry object under another device */
- if (!list_is_singular(&opp_table->dev_list)) {
- opp_migrate_dentry(opp_dev, opp_table);
- goto out;
- }
- opp_table->dentry = NULL;
- }
-
- debugfs_remove_recursive(opp_dev->dentry);
-
-out:
- opp_dev->dentry = NULL;
-}
-
-static int __init opp_debug_init(void)
-{
- /* Create /sys/kernel/debug/opp directory */
- rootdir = debugfs_create_dir("opp", NULL);
- if (!rootdir) {
- pr_err("%s: Failed to create root directory\n", __func__);
- return -ENOMEM;
- }
-
- return 0;
-}
-core_initcall(opp_debug_init);
diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c
deleted file mode 100644
index 0b718886479b..000000000000
--- a/drivers/base/power/opp/of.c
+++ /dev/null
@@ -1,633 +0,0 @@
-/*
- * Generic OPP OF helpers
- *
- * Copyright (C) 2009-2010 Texas Instruments Incorporated.
- * Nishanth Menon
- * Romit Dasgupta
- * Kevin Hilman
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/cpu.h>
-#include <linux/errno.h>
-#include <linux/device.h>
-#include <linux/of.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-
-#include "opp.h"
-
-static struct opp_table *_managed_opp(const struct device_node *np)
-{
- struct opp_table *opp_table, *managed_table = NULL;
-
- mutex_lock(&opp_table_lock);
-
- list_for_each_entry(opp_table, &opp_tables, node) {
- if (opp_table->np == np) {
- /*
- * Multiple devices can point to the same OPP table and
- * so will have same node-pointer, np.
- *
- * But the OPPs will be considered as shared only if the
- * OPP table contains a "opp-shared" property.
- */
- if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) {
- _get_opp_table_kref(opp_table);
- managed_table = opp_table;
- }
-
- break;
- }
- }
-
- mutex_unlock(&opp_table_lock);
-
- return managed_table;
-}
-
-void _of_init_opp_table(struct opp_table *opp_table, struct device *dev)
-{
- struct device_node *np;
-
- /*
- * Only required for backward compatibility with v1 bindings, but isn't
- * harmful for other cases. And so we do it unconditionally.
- */
- np = of_node_get(dev->of_node);
- if (np) {
- u32 val;
-
- if (!of_property_read_u32(np, "clock-latency", &val))
- opp_table->clock_latency_ns_max = val;
- of_property_read_u32(np, "voltage-tolerance",
- &opp_table->voltage_tolerance_v1);
- of_node_put(np);
- }
-}
-
-static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
- struct device_node *np)
-{
- unsigned int count = opp_table->supported_hw_count;
- u32 version;
- int ret;
-
- if (!opp_table->supported_hw) {
- /*
- * In the case that no supported_hw has been set by the
- * platform but there is an opp-supported-hw value set for
- * an OPP then the OPP should not be enabled as there is
- * no way to see if the hardware supports it.
- */
- if (of_find_property(np, "opp-supported-hw", NULL))
- return false;
- else
- return true;
- }
-
- while (count--) {
- ret = of_property_read_u32_index(np, "opp-supported-hw", count,
- &version);
- if (ret) {
- dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
- __func__, count, ret);
- return false;
- }
-
- /* Both of these are bitwise masks of the versions */
- if (!(version & opp_table->supported_hw[count]))
- return false;
- }
-
- return true;
-}
-
-static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
- struct opp_table *opp_table)
-{
- u32 *microvolt, *microamp = NULL;
- int supplies, vcount, icount, ret, i, j;
- struct property *prop = NULL;
- char name[NAME_MAX];
-
- supplies = opp_table->regulator_count ? opp_table->regulator_count : 1;
-
- /* Search for "opp-microvolt-<name>" */
- if (opp_table->prop_name) {
- snprintf(name, sizeof(name), "opp-microvolt-%s",
- opp_table->prop_name);
- prop = of_find_property(opp->np, name, NULL);
- }
-
- if (!prop) {
- /* Search for "opp-microvolt" */
- sprintf(name, "opp-microvolt");
- prop = of_find_property(opp->np, name, NULL);
-
- /* Missing property isn't a problem, but an invalid entry is */
- if (!prop) {
- if (!opp_table->regulator_count)
- return 0;
-
- dev_err(dev, "%s: opp-microvolt missing although OPP managing regulators\n",
- __func__);
- return -EINVAL;
- }
- }
-
- vcount = of_property_count_u32_elems(opp->np, name);
- if (vcount < 0) {
- dev_err(dev, "%s: Invalid %s property (%d)\n",
- __func__, name, vcount);
- return vcount;
- }
-
- /* There can be one or three elements per supply */
- if (vcount != supplies && vcount != supplies * 3) {
- dev_err(dev, "%s: Invalid number of elements in %s property (%d) with supplies (%d)\n",
- __func__, name, vcount, supplies);
- return -EINVAL;
- }
-
- microvolt = kmalloc_array(vcount, sizeof(*microvolt), GFP_KERNEL);
- if (!microvolt)
- return -ENOMEM;
-
- ret = of_property_read_u32_array(opp->np, name, microvolt, vcount);
- if (ret) {
- dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
- ret = -EINVAL;
- goto free_microvolt;
- }
-
- /* Search for "opp-microamp-<name>" */
- prop = NULL;
- if (opp_table->prop_name) {
- snprintf(name, sizeof(name), "opp-microamp-%s",
- opp_table->prop_name);
- prop = of_find_property(opp->np, name, NULL);
- }
-
- if (!prop) {
- /* Search for "opp-microamp" */
- sprintf(name, "opp-microamp");
- prop = of_find_property(opp->np, name, NULL);
- }
-
- if (prop) {
- icount = of_property_count_u32_elems(opp->np, name);
- if (icount < 0) {
- dev_err(dev, "%s: Invalid %s property (%d)\n", __func__,
- name, icount);
- ret = icount;
- goto free_microvolt;
- }
-
- if (icount != supplies) {
- dev_err(dev, "%s: Invalid number of elements in %s property (%d) with supplies (%d)\n",
- __func__, name, icount, supplies);
- ret = -EINVAL;
- goto free_microvolt;
- }
-
- microamp = kmalloc_array(icount, sizeof(*microamp), GFP_KERNEL);
- if (!microamp) {
- ret = -EINVAL;
- goto free_microvolt;
- }
-
- ret = of_property_read_u32_array(opp->np, name, microamp,
- icount);
- if (ret) {
- dev_err(dev, "%s: error parsing %s: %d\n", __func__,
- name, ret);
- ret = -EINVAL;
- goto free_microamp;
- }
- }
-
- for (i = 0, j = 0; i < supplies; i++) {
- opp->supplies[i].u_volt = microvolt[j++];
-
- if (vcount == supplies) {
- opp->supplies[i].u_volt_min = opp->supplies[i].u_volt;
- opp->supplies[i].u_volt_max = opp->supplies[i].u_volt;
- } else {
- opp->supplies[i].u_volt_min = microvolt[j++];
- opp->supplies[i].u_volt_max = microvolt[j++];
- }
-
- if (microamp)
- opp->supplies[i].u_amp = microamp[i];
- }
-
-free_microamp:
- kfree(microamp);
-free_microvolt:
- kfree(microvolt);
-
- return ret;
-}
-
-/**
- * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
- * entries
- * @dev: device pointer used to lookup OPP table.
- *
- * Free OPPs created using static entries present in DT.
- */
-void dev_pm_opp_of_remove_table(struct device *dev)
-{
- _dev_pm_opp_find_and_remove_table(dev, false);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
-
-/* Returns opp descriptor node for a device node, caller must
- * do of_node_put() */
-static struct device_node *_opp_of_get_opp_desc_node(struct device_node *np)
-{
- /*
- * There should be only ONE phandle present in "operating-points-v2"
- * property.
- */
-
- return of_parse_phandle(np, "operating-points-v2", 0);
-}
-
-/* Returns opp descriptor node for a device, caller must do of_node_put() */
-struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev)
-{
- return _opp_of_get_opp_desc_node(dev->of_node);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node);
-
-/**
- * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
- * @opp_table: OPP table
- * @dev: device for which we do this operation
- * @np: device node
- *
- * This function adds an opp definition to the opp table and returns status. The
- * opp can be controlled using dev_pm_opp_enable/disable functions and may be
- * removed by dev_pm_opp_remove.
- *
- * Return:
- * 0 On success OR
- * Duplicate OPPs (both freq and volt are same) and opp->available
- * -EEXIST Freq are same and volt are different OR
- * Duplicate OPPs (both freq and volt are same) and !opp->available
- * -ENOMEM Memory allocation failure
- * -EINVAL Failed parsing the OPP node
- */
-static int _opp_add_static_v2(struct opp_table *opp_table, struct device *dev,
- struct device_node *np)
-{
- struct dev_pm_opp *new_opp;
- u64 rate;
- u32 val;
- int ret;
-
- new_opp = _opp_allocate(opp_table);
- if (!new_opp)
- return -ENOMEM;
-
- ret = of_property_read_u64(np, "opp-hz", &rate);
- if (ret < 0) {
- dev_err(dev, "%s: opp-hz not found\n", __func__);
- goto free_opp;
- }
-
- /* Check if the OPP supports hardware's hierarchy of versions or not */
- if (!_opp_is_supported(dev, opp_table, np)) {
- dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
- goto free_opp;
- }
-
- /*
- * Rate is defined as an unsigned long in clk API, and so casting
- * explicitly to its type. Must be fixed once rate is 64 bit
- * guaranteed in clk API.
- */
- new_opp->rate = (unsigned long)rate;
- new_opp->turbo = of_property_read_bool(np, "turbo-mode");
-
- new_opp->np = np;
- new_opp->dynamic = false;
- new_opp->available = true;
-
- if (!of_property_read_u32(np, "clock-latency-ns", &val))
- new_opp->clock_latency_ns = val;
-
- ret = opp_parse_supplies(new_opp, dev, opp_table);
- if (ret)
- goto free_opp;
-
- ret = _opp_add(dev, new_opp, opp_table);
- if (ret) {
- /* Don't return error for duplicate OPPs */
- if (ret == -EBUSY)
- ret = 0;
- goto free_opp;
- }
-
- /* OPP to select on device suspend */
- if (of_property_read_bool(np, "opp-suspend")) {
- if (opp_table->suspend_opp) {
- dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
- __func__, opp_table->suspend_opp->rate,
- new_opp->rate);
- } else {
- new_opp->suspend = true;
- opp_table->suspend_opp = new_opp;
- }
- }
-
- if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max)
- opp_table->clock_latency_ns_max = new_opp->clock_latency_ns;
-
- pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
- __func__, new_opp->turbo, new_opp->rate,
- new_opp->supplies[0].u_volt, new_opp->supplies[0].u_volt_min,
- new_opp->supplies[0].u_volt_max, new_opp->clock_latency_ns);
-
- /*
- * Notify the changes in the availability of the operable
- * frequency/voltage list.
- */
- blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp);
- return 0;
-
-free_opp:
- _opp_free(new_opp);
-
- return ret;
-}
-
-/* Initializes OPP tables based on new bindings */
-static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
-{
- struct device_node *np;
- struct opp_table *opp_table;
- int ret = 0, count = 0;
-
- opp_table = _managed_opp(opp_np);
- if (opp_table) {
- /* OPPs are already managed */
- if (!_add_opp_dev(dev, opp_table))
- ret = -ENOMEM;
- goto put_opp_table;
- }
-
- opp_table = dev_pm_opp_get_opp_table(dev);
- if (!opp_table)
- return -ENOMEM;
-
- /* We have opp-table node now, iterate over it and add OPPs */
- for_each_available_child_of_node(opp_np, np) {
- count++;
-
- ret = _opp_add_static_v2(opp_table, dev, np);
- if (ret) {
- dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
- ret);
- _dev_pm_opp_remove_table(opp_table, dev, false);
- goto put_opp_table;
- }
- }
-
- /* There should be one of more OPP defined */
- if (WARN_ON(!count)) {
- ret = -ENOENT;
- goto put_opp_table;
- }
-
- opp_table->np = opp_np;
- if (of_property_read_bool(opp_np, "opp-shared"))
- opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
- else
- opp_table->shared_opp = OPP_TABLE_ACCESS_EXCLUSIVE;
-
-put_opp_table:
- dev_pm_opp_put_opp_table(opp_table);
-
- return ret;
-}
-
-/* Initializes OPP tables based on old-deprecated bindings */
-static int _of_add_opp_table_v1(struct device *dev)
-{
- struct opp_table *opp_table;
- const struct property *prop;
- const __be32 *val;
- int nr, ret = 0;
-
- prop = of_find_property(dev->of_node, "operating-points", NULL);
- if (!prop)
- return -ENODEV;
- if (!prop->value)
- return -ENODATA;
-
- /*
- * Each OPP is a set of tuples consisting of frequency and
- * voltage like <freq-kHz vol-uV>.
- */
- nr = prop->length / sizeof(u32);
- if (nr % 2) {
- dev_err(dev, "%s: Invalid OPP table\n", __func__);
- return -EINVAL;
- }
-
- opp_table = dev_pm_opp_get_opp_table(dev);
- if (!opp_table)
- return -ENOMEM;
-
- val = prop->value;
- while (nr) {
- unsigned long freq = be32_to_cpup(val++) * 1000;
- unsigned long volt = be32_to_cpup(val++);
-
- ret = _opp_add_v1(opp_table, dev, freq, volt, false);
- if (ret) {
- dev_err(dev, "%s: Failed to add OPP %ld (%d)\n",
- __func__, freq, ret);
- _dev_pm_opp_remove_table(opp_table, dev, false);
- break;
- }
- nr -= 2;
- }
-
- dev_pm_opp_put_opp_table(opp_table);
- return ret;
-}
-
-/**
- * dev_pm_opp_of_add_table() - Initialize opp table from device tree
- * @dev: device pointer used to lookup OPP table.
- *
- * Register the initial OPP table with the OPP library for given device.
- *
- * Return:
- * 0 On success OR
- * Duplicate OPPs (both freq and volt are same) and opp->available
- * -EEXIST Freq are same and volt are different OR
- * Duplicate OPPs (both freq and volt are same) and !opp->available
- * -ENOMEM Memory allocation failure
- * -ENODEV when 'operating-points' property is not found or is invalid data
- * in device node.
- * -ENODATA when empty 'operating-points' property is found
- * -EINVAL when invalid entries are found in opp-v2 table
- */
-int dev_pm_opp_of_add_table(struct device *dev)
-{
- struct device_node *opp_np;
- int ret;
-
- /*
- * OPPs have two version of bindings now. The older one is deprecated,
- * try for the new binding first.
- */
- opp_np = dev_pm_opp_of_get_opp_desc_node(dev);
- if (!opp_np) {
- /*
- * Try old-deprecated bindings for backward compatibility with
- * older dtbs.
- */
- return _of_add_opp_table_v1(dev);
- }
-
- ret = _of_add_opp_table_v2(dev, opp_np);
- of_node_put(opp_np);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
-
-/* CPU device specific helpers */
-
-/**
- * dev_pm_opp_of_cpumask_remove_table() - Removes OPP table for @cpumask
- * @cpumask: cpumask for which OPP table needs to be removed
- *
- * This removes the OPP tables for CPUs present in the @cpumask.
- * This should be used only to remove static entries created from DT.
- */
-void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask)
-{
- _dev_pm_opp_cpumask_remove_table(cpumask, true);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table);
-
-/**
- * dev_pm_opp_of_cpumask_add_table() - Adds OPP table for @cpumask
- * @cpumask: cpumask for which OPP table needs to be added.
- *
- * This adds the OPP tables for CPUs present in the @cpumask.
- */
-int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask)
-{
- struct device *cpu_dev;
- int cpu, ret = 0;
-
- WARN_ON(cpumask_empty(cpumask));
-
- for_each_cpu(cpu, cpumask) {
- cpu_dev = get_cpu_device(cpu);
- if (!cpu_dev) {
- pr_err("%s: failed to get cpu%d device\n", __func__,
- cpu);
- continue;
- }
-
- ret = dev_pm_opp_of_add_table(cpu_dev);
- if (ret) {
- /*
- * OPP may get registered dynamically, don't print error
- * message here.
- */
- pr_debug("%s: couldn't find opp table for cpu:%d, %d\n",
- __func__, cpu, ret);
-
- /* Free all other OPPs */
- dev_pm_opp_of_cpumask_remove_table(cpumask);
- break;
- }
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
-
-/*
- * Works only for OPP v2 bindings.
- *
- * Returns -ENOENT if operating-points-v2 bindings aren't supported.
- */
-/**
- * dev_pm_opp_of_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with
- * @cpu_dev using operating-points-v2
- * bindings.
- *
- * @cpu_dev: CPU device for which we do this operation
- * @cpumask: cpumask to update with information of sharing CPUs
- *
- * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
- *
- * Returns -ENOENT if operating-points-v2 isn't present for @cpu_dev.
- */
-int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
- struct cpumask *cpumask)
-{
- struct device_node *np, *tmp_np, *cpu_np;
- int cpu, ret = 0;
-
- /* Get OPP descriptor node */
- np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
- if (!np) {
- dev_dbg(cpu_dev, "%s: Couldn't find opp node.\n", __func__);
- return -ENOENT;
- }
-
- cpumask_set_cpu(cpu_dev->id, cpumask);
-
- /* OPPs are shared ? */
- if (!of_property_read_bool(np, "opp-shared"))
- goto put_cpu_node;
-
- for_each_possible_cpu(cpu) {
- if (cpu == cpu_dev->id)
- continue;
-
- cpu_np = of_get_cpu_node(cpu, NULL);
- if (!cpu_np) {
- dev_err(cpu_dev, "%s: failed to get cpu%d node\n",
- __func__, cpu);
- ret = -ENOENT;
- goto put_cpu_node;
- }
-
- /* Get OPP descriptor node */
- tmp_np = _opp_of_get_opp_desc_node(cpu_np);
- if (!tmp_np) {
- pr_err("%pOF: Couldn't find opp node\n", cpu_np);
- ret = -ENOENT;
- goto put_cpu_node;
- }
-
- /* CPUs are sharing opp node */
- if (np == tmp_np)
- cpumask_set_cpu(cpu, cpumask);
-
- of_node_put(tmp_np);
- }
-
-put_cpu_node:
- of_node_put(np);
- return ret;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus);
diff --git a/drivers/base/power/opp/opp.h b/drivers/base/power/opp/opp.h
deleted file mode 100644
index 166eef990599..000000000000
--- a/drivers/base/power/opp/opp.h
+++ /dev/null
@@ -1,222 +0,0 @@
-/*
- * Generic OPP Interface
- *
- * Copyright (C) 2009-2010 Texas Instruments Incorporated.
- * Nishanth Menon
- * Romit Dasgupta
- * Kevin Hilman
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __DRIVER_OPP_H__
-#define __DRIVER_OPP_H__
-
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/kref.h>
-#include <linux/list.h>
-#include <linux/limits.h>
-#include <linux/pm_opp.h>
-#include <linux/notifier.h>
-
-struct clk;
-struct regulator;
-
-/* Lock to allow exclusive modification to the device and opp lists */
-extern struct mutex opp_table_lock;
-
-extern struct list_head opp_tables;
-
-/*
- * Internal data structure organization with the OPP layer library is as
- * follows:
- * opp_tables (root)
- * |- device 1 (represents voltage domain 1)
- * | |- opp 1 (availability, freq, voltage)
- * | |- opp 2 ..
- * ... ...
- * | `- opp n ..
- * |- device 2 (represents the next voltage domain)
- * ...
- * `- device m (represents mth voltage domain)
- * device 1, 2.. are represented by opp_table structure while each opp
- * is represented by the opp structure.
- */
-
-/**
- * struct dev_pm_opp - Generic OPP description structure
- * @node: opp table node. The nodes are maintained throughout the lifetime
- * of boot. It is expected only an optimal set of OPPs are
- * added to the library by the SoC framework.
- * IMPORTANT: the opp nodes should be maintained in increasing
- * order.
- * @kref: for reference count of the OPP.
- * @available: true/false - marks if this OPP as available or not
- * @dynamic: not-created from static DT entries.
- * @turbo: true if turbo (boost) OPP
- * @suspend: true if suspend OPP
- * @rate: Frequency in hertz
- * @supplies: Power supplies voltage/current values
- * @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's
- * frequency from any other OPP's frequency.
- * @opp_table: points back to the opp_table struct this opp belongs to
- * @np: OPP's device node.
- * @dentry: debugfs dentry pointer (per opp)
- *
- * This structure stores the OPP information for a given device.
- */
-struct dev_pm_opp {
- struct list_head node;
- struct kref kref;
-
- bool available;
- bool dynamic;
- bool turbo;
- bool suspend;
- unsigned long rate;
-
- struct dev_pm_opp_supply *supplies;
-
- unsigned long clock_latency_ns;
-
- struct opp_table *opp_table;
-
- struct device_node *np;
-
-#ifdef CONFIG_DEBUG_FS
- struct dentry *dentry;
-#endif
-};
-
-/**
- * struct opp_device - devices managed by 'struct opp_table'
- * @node: list node
- * @dev: device to which the struct object belongs
- * @dentry: debugfs dentry pointer (per device)
- *
- * This is an internal data structure maintaining the devices that are managed
- * by 'struct opp_table'.
- */
-struct opp_device {
- struct list_head node;
- const struct device *dev;
-
-#ifdef CONFIG_DEBUG_FS
- struct dentry *dentry;
-#endif
-};
-
-enum opp_table_access {
- OPP_TABLE_ACCESS_UNKNOWN = 0,
- OPP_TABLE_ACCESS_EXCLUSIVE = 1,
- OPP_TABLE_ACCESS_SHARED = 2,
-};
-
-/**
- * struct opp_table - Device opp structure
- * @node: table node - contains the devices with OPPs that
- * have been registered. Nodes once added are not modified in this
- * table.
- * @head: notifier head to notify the OPP availability changes.
- * @dev_list: list of devices that share these OPPs
- * @opp_list: table of opps
- * @kref: for reference count of the table.
- * @lock: mutex protecting the opp_list.
- * @np: struct device_node pointer for opp's DT node.
- * @clock_latency_ns_max: Max clock latency in nanoseconds.
- * @shared_opp: OPP is shared between multiple devices.
- * @suspend_opp: Pointer to OPP to be used during device suspend.
- * @supported_hw: Array of version number to support.
- * @supported_hw_count: Number of elements in supported_hw array.
- * @prop_name: A name to postfix to many DT properties, while parsing them.
- * @clk: Device's clock handle
- * @regulators: Supply regulators
- * @regulator_count: Number of power supply regulators
- * @set_opp: Platform specific set_opp callback
- * @set_opp_data: Data to be passed to set_opp callback
- * @dentry: debugfs dentry pointer of the real device directory (not links).
- * @dentry_name: Name of the real dentry.
- *
- * @voltage_tolerance_v1: In percentage, for v1 bindings only.
- *
- * This is an internal data structure maintaining the link to opps attached to
- * a device. This structure is not meant to be shared to users as it is
- * meant for book keeping and private to OPP library.
- */
-struct opp_table {
- struct list_head node;
-
- struct blocking_notifier_head head;
- struct list_head dev_list;
- struct list_head opp_list;
- struct kref kref;
- struct mutex lock;
-
- struct device_node *np;
- unsigned long clock_latency_ns_max;
-
- /* For backward compatibility with v1 bindings */
- unsigned int voltage_tolerance_v1;
-
- enum opp_table_access shared_opp;
- struct dev_pm_opp *suspend_opp;
-
- unsigned int *supported_hw;
- unsigned int supported_hw_count;
- const char *prop_name;
- struct clk *clk;
- struct regulator **regulators;
- unsigned int regulator_count;
-
- int (*set_opp)(struct dev_pm_set_opp_data *data);
- struct dev_pm_set_opp_data *set_opp_data;
-
-#ifdef CONFIG_DEBUG_FS
- struct dentry *dentry;
- char dentry_name[NAME_MAX];
-#endif
-};
-
-/* Routines internal to opp core */
-void _get_opp_table_kref(struct opp_table *opp_table);
-struct opp_table *_find_opp_table(struct device *dev);
-struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table);
-void _dev_pm_opp_remove_table(struct opp_table *opp_table, struct device *dev, bool remove_all);
-void _dev_pm_opp_find_and_remove_table(struct device *dev, bool remove_all);
-struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table);
-void _opp_free(struct dev_pm_opp *opp);
-int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table);
-int _opp_add_v1(struct opp_table *opp_table, struct device *dev, unsigned long freq, long u_volt, bool dynamic);
-void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of);
-struct opp_table *_add_opp_table(struct device *dev);
-
-#ifdef CONFIG_OF
-void _of_init_opp_table(struct opp_table *opp_table, struct device *dev);
-#else
-static inline void _of_init_opp_table(struct opp_table *opp_table, struct device *dev) {}
-#endif
-
-#ifdef CONFIG_DEBUG_FS
-void opp_debug_remove_one(struct dev_pm_opp *opp);
-int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table);
-int opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table);
-void opp_debug_unregister(struct opp_device *opp_dev, struct opp_table *opp_table);
-#else
-static inline void opp_debug_remove_one(struct dev_pm_opp *opp) {}
-
-static inline int opp_debug_create_one(struct dev_pm_opp *opp,
- struct opp_table *opp_table)
-{ return 0; }
-static inline int opp_debug_register(struct opp_device *opp_dev,
- struct opp_table *opp_table)
-{ return 0; }
-
-static inline void opp_debug_unregister(struct opp_device *opp_dev,
- struct opp_table *opp_table)
-{ }
-#endif /* DEBUG_FS */
-
-#endif /* __DRIVER_OPP_H__ */
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 277d43a83f53..3382542b39b7 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -139,6 +139,9 @@ static int apply_constraint(struct dev_pm_qos_request *req,
switch(req->type) {
case DEV_PM_QOS_RESUME_LATENCY:
+ if (WARN_ON(action != PM_QOS_REMOVE_REQ && value < 0))
+ value = 0;
+
ret = pm_qos_update_target(&qos->resume_latency,
&req->data.pnode, action, value);
break;
@@ -189,7 +192,7 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
plist_head_init(&c->list);
c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
- c->no_constraint_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
+ c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
c->type = PM_QOS_MIN;
c->notifiers = n;
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 7bcf80fa9ada..2362b9e9701e 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -134,11 +134,11 @@ unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
if (!dev->power.use_autosuspend)
goto out;
- autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
+ autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
if (autosuspend_delay < 0)
goto out;
- last_busy = ACCESS_ONCE(dev->power.last_busy);
+ last_busy = READ_ONCE(dev->power.last_busy);
elapsed = jiffies - last_busy;
if (elapsed < 0)
goto out; /* jiffies has wrapped around. */
@@ -253,7 +253,7 @@ static int rpm_check_suspend_allowed(struct device *dev)
|| (dev->power.request_pending
&& dev->power.request == RPM_REQ_RESUME))
retval = -EAGAIN;
- else if (__dev_pm_qos_read_value(dev) < 0)
+ else if (__dev_pm_qos_read_value(dev) == 0)
retval = -EPERM;
else if (dev->power.runtime_status == RPM_SUSPENDED)
retval = 1;
@@ -894,9 +894,9 @@ static void pm_runtime_work(struct work_struct *work)
*
* Check if the time is right and queue a suspend request.
*/
-static void pm_suspend_timer_fn(unsigned long data)
+static void pm_suspend_timer_fn(struct timer_list *t)
{
- struct device *dev = (struct device *)data;
+ struct device *dev = from_timer(dev, t, power.suspend_timer);
unsigned long flags;
unsigned long expires;
@@ -1499,8 +1499,7 @@ void pm_runtime_init(struct device *dev)
INIT_WORK(&dev->power.work, pm_runtime_work);
dev->power.timer_expires = 0;
- setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
- (unsigned long)dev);
+ timer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, 0);
init_waitqueue_head(&dev->power.wait_queue);
}
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 156ab57bca77..e153e28b1857 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -218,7 +218,14 @@ static ssize_t pm_qos_resume_latency_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%d\n", dev_pm_qos_requested_resume_latency(dev));
+ s32 value = dev_pm_qos_requested_resume_latency(dev);
+
+ if (value == 0)
+ return sprintf(buf, "n/a\n");
+ else if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
+ value = 0;
+
+ return sprintf(buf, "%d\n", value);
}
static ssize_t pm_qos_resume_latency_store(struct device *dev,
@@ -228,11 +235,21 @@ static ssize_t pm_qos_resume_latency_store(struct device *dev,
s32 value;
int ret;
- if (kstrtos32(buf, 0, &value))
- return -EINVAL;
+ if (!kstrtos32(buf, 0, &value)) {
+ /*
+ * Prevent users from writing negative or "no constraint" values
+ * directly.
+ */
+ if (value < 0 || value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
+ return -EINVAL;
- if (value < 0)
+ if (value == 0)
+ value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
+ } else if (!strcmp(buf, "n/a") || !strcmp(buf, "n/a\n")) {
+ value = 0;
+ } else {
return -EINVAL;
+ }
ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req,
value);
@@ -309,33 +326,6 @@ static ssize_t pm_qos_no_power_off_store(struct device *dev,
static DEVICE_ATTR(pm_qos_no_power_off, 0644,
pm_qos_no_power_off_show, pm_qos_no_power_off_store);
-static ssize_t pm_qos_remote_wakeup_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev)
- & PM_QOS_FLAG_REMOTE_WAKEUP));
-}
-
-static ssize_t pm_qos_remote_wakeup_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t n)
-{
- int ret;
-
- if (kstrtoint(buf, 0, &ret))
- return -EINVAL;
-
- if (ret != 0 && ret != 1)
- return -EINVAL;
-
- ret = dev_pm_qos_update_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP, ret);
- return ret < 0 ? ret : n;
-}
-
-static DEVICE_ATTR(pm_qos_remote_wakeup, 0644,
- pm_qos_remote_wakeup_show, pm_qos_remote_wakeup_store);
-
#ifdef CONFIG_PM_SLEEP
static const char _enabled[] = "enabled";
static const char _disabled[] = "disabled";
@@ -671,7 +661,6 @@ static const struct attribute_group pm_qos_latency_tolerance_attr_group = {
static struct attribute *pm_qos_flags_attrs[] = {
&dev_attr_pm_qos_no_power_off.attr,
- &dev_attr_pm_qos_remote_wakeup.attr,
NULL,
};
static const struct attribute_group pm_qos_flags_attr_group = {
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index cdd6f256da59..680ee1d36ac9 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -54,7 +54,7 @@ static unsigned int saved_count;
static DEFINE_SPINLOCK(events_lock);
-static void pm_wakeup_timer_fn(unsigned long data);
+static void pm_wakeup_timer_fn(struct timer_list *t);
static LIST_HEAD(wakeup_sources);
@@ -176,7 +176,7 @@ void wakeup_source_add(struct wakeup_source *ws)
return;
spin_lock_init(&ws->lock);
- setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws);
+ timer_setup(&ws->timer, pm_wakeup_timer_fn, 0);
ws->active = false;
ws->last_time = ktime_get();
@@ -481,8 +481,7 @@ static bool wakeup_source_not_registered(struct wakeup_source *ws)
* Use timer struct to check if the given source is initialized
* by wakeup_source_add.
*/
- return ws->timer.function != pm_wakeup_timer_fn ||
- ws->timer.data != (unsigned long)ws;
+ return ws->timer.function != (TIMER_FUNC_TYPE)pm_wakeup_timer_fn;
}
/*
@@ -724,9 +723,9 @@ EXPORT_SYMBOL_GPL(pm_relax);
* in @data if it is currently active and its timer has not been canceled and
* the expiration time of the timer is not in future.
*/
-static void pm_wakeup_timer_fn(unsigned long data)
+static void pm_wakeup_timer_fn(struct timer_list *t)
{
- struct wakeup_source *ws = (struct wakeup_source *)data;
+ struct wakeup_source *ws = from_timer(ws, t, timer);
unsigned long flags;
spin_lock_irqsave(&ws->lock, flags);