aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/Kconfig3
-rw-r--r--drivers/base/attribute_container.c1
-rw-r--r--drivers/base/bus.c2
-rw-r--r--drivers/base/component.c8
-rw-r--r--drivers/base/core.c20
-rw-r--r--drivers/base/cpu.c46
-rw-r--r--drivers/base/devres.c26
-rw-r--r--drivers/base/dma-buf.c43
-rw-r--r--drivers/base/firmware_class.c14
-rw-r--r--drivers/base/node.c4
-rw-r--r--drivers/base/platform.c11
-rw-r--r--drivers/base/power/Makefile3
-rw-r--r--drivers/base/power/clock_ops.c1
-rw-r--r--drivers/base/power/common.c1
-rw-r--r--drivers/base/power/domain.c3
-rw-r--r--drivers/base/power/domain_governor.c1
-rw-r--r--drivers/base/power/generic_ops.c2
-rw-r--r--drivers/base/power/main.c280
-rw-r--r--drivers/base/power/opp.c1
-rw-r--r--drivers/base/power/power.h4
-rw-r--r--drivers/base/power/qos.c220
-rw-r--r--drivers/base/power/runtime.c164
-rw-r--r--drivers/base/power/sysfs.c97
-rw-r--r--drivers/base/regmap/internal.h2
-rw-r--r--drivers/base/regmap/regcache.c13
-rw-r--r--drivers/base/regmap/regmap-debugfs.c2
-rw-r--r--drivers/base/regmap/regmap-i2c.c1
-rw-r--r--drivers/base/regmap/regmap-irq.c6
-rw-r--r--drivers/base/regmap/regmap-mmio.c57
-rw-r--r--drivers/base/regmap/regmap-spi.c1
-rw-r--r--drivers/base/regmap/regmap-spmi.c228
-rw-r--r--drivers/base/regmap/regmap.c353
-rw-r--r--drivers/base/topology.c1
33 files changed, 1288 insertions, 331 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index ec36e7772e57..8fa8deab6449 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -185,6 +185,9 @@ config GENERIC_CPU_DEVICES
bool
default n
+config GENERIC_CPU_AUTOPROBE
+ bool
+
config SOC_BUS
bool
diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
index ecc1929d7f6a..b84ca8f13f9e 100644
--- a/drivers/base/attribute_container.c
+++ b/drivers/base/attribute_container.c
@@ -12,7 +12,6 @@
*/
#include <linux/attribute_container.h>
-#include <linux/init.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/slab.h>
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 59dc8086e4fa..83e910a57563 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -1218,7 +1218,7 @@ err_dev:
* with the name of the subsystem. The root device can carry subsystem-
* wide attributes. All registered devices are below this single root
* device and are named after the subsystem with a simple enumeration
- * number appended. The registered devices are not explicitely named;
+ * number appended. The registered devices are not explicitly named;
* only 'id' in the device needs to be set.
*
* Do not use this interface for anything new, it exists for compatibility
diff --git a/drivers/base/component.c b/drivers/base/component.c
index c53efe6c6d8e..c4778995cd72 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -133,9 +133,16 @@ static int try_to_bring_up_master(struct master *master,
goto out;
}
+ if (!devres_open_group(master->dev, NULL, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
/* Found all components */
ret = master->ops->bind(master->dev);
if (ret < 0) {
+ devres_release_group(master->dev, NULL);
+ dev_info(master->dev, "master bind failed: %d\n", ret);
master_remove_components(master);
goto out;
}
@@ -166,6 +173,7 @@ static void take_down_master(struct master *master)
{
if (master->bound) {
master->ops->unbind(master->dev);
+ devres_release_group(master->dev, NULL);
master->bound = false;
}
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 2b567177ef78..0dd65281cc65 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -23,7 +23,6 @@
#include <linux/genhd.h>
#include <linux/kallsyms.h>
#include <linux/mutex.h>
-#include <linux/async.h>
#include <linux/pm_runtime.h>
#include <linux/netdevice.h>
#include <linux/sysfs.h>
@@ -571,6 +570,23 @@ void device_remove_file(struct device *dev,
EXPORT_SYMBOL_GPL(device_remove_file);
/**
+ * device_remove_file_self - remove sysfs attribute file from its own method.
+ * @dev: device.
+ * @attr: device attribute descriptor.
+ *
+ * See kernfs_remove_self() for details.
+ */
+bool device_remove_file_self(struct device *dev,
+ const struct device_attribute *attr)
+{
+ if (dev)
+ return sysfs_remove_file_self(&dev->kobj, &attr->attr);
+ else
+ return false;
+}
+EXPORT_SYMBOL_GPL(device_remove_file_self);
+
+/**
* device_create_bin_file - create sysfs binary attribute file for device.
* @dev: device.
* @attr: device binary attribute descriptor.
@@ -2003,7 +2019,6 @@ void device_shutdown(void)
spin_lock(&devices_kset->list_lock);
}
spin_unlock(&devices_kset->list_lock);
- async_synchronize_full();
}
/*
@@ -2058,7 +2073,6 @@ create_syslog_header(const struct device *dev, char *hdr, size_t hdrlen)
return pos;
}
-EXPORT_SYMBOL(create_syslog_header);
int dev_vprintk_emit(int level, const struct device *dev,
const char *fmt, va_list args)
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index f48370dfc908..006b1bc5297d 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -15,6 +15,7 @@
#include <linux/percpu.h>
#include <linux/acpi.h>
#include <linux/of.h>
+#include <linux/cpufeature.h>
#include "base.h"
@@ -286,6 +287,41 @@ static void cpu_device_release(struct device *dev)
*/
}
+#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
+static ssize_t print_cpu_modalias(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t n;
+ u32 i;
+
+ n = sprintf(buf, "cpu:type:" CPU_FEATURE_TYPEFMT ":feature:",
+ CPU_FEATURE_TYPEVAL);
+
+ for (i = 0; i < MAX_CPU_FEATURES; i++)
+ if (cpu_have_feature(i)) {
+ if (PAGE_SIZE < n + sizeof(",XXXX\n")) {
+ WARN(1, "CPU features overflow page\n");
+ break;
+ }
+ n += sprintf(&buf[n], ",%04X", i);
+ }
+ buf[n++] = '\n';
+ return n;
+}
+
+static int cpu_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (buf) {
+ print_cpu_modalias(NULL, NULL, buf);
+ add_uevent_var(env, "MODALIAS=%s", buf);
+ kfree(buf);
+ }
+ return 0;
+}
+#endif
+
/*
* register_cpu - Setup a sysfs device for a CPU.
* @cpu - cpu->hotpluggable field set to 1 will generate a control file in
@@ -306,8 +342,8 @@ int register_cpu(struct cpu *cpu, int num)
cpu->dev.offline_disabled = !cpu->hotpluggable;
cpu->dev.offline = !cpu_online(num);
cpu->dev.of_node = of_get_cpu_node(num, NULL);
-#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
- cpu->dev.bus->uevent = arch_cpu_uevent;
+#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
+ cpu->dev.bus->uevent = cpu_uevent;
#endif
cpu->dev.groups = common_cpu_attr_groups;
if (cpu->hotpluggable)
@@ -330,8 +366,8 @@ struct device *get_cpu_device(unsigned cpu)
}
EXPORT_SYMBOL_GPL(get_cpu_device);
-#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
-static DEVICE_ATTR(modalias, 0444, arch_print_cpu_modalias, NULL);
+#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
+static DEVICE_ATTR(modalias, 0444, print_cpu_modalias, NULL);
#endif
static struct attribute *cpu_root_attrs[] = {
@@ -344,7 +380,7 @@ static struct attribute *cpu_root_attrs[] = {
&cpu_attrs[2].attr.attr,
&dev_attr_kernel_max.attr,
&dev_attr_offline.attr,
-#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
+#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
&dev_attr_modalias.attr,
#endif
NULL
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 545c4de412c3..db4e264eecb6 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -791,6 +791,32 @@ void * devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
EXPORT_SYMBOL_GPL(devm_kmalloc);
/**
+ * devm_kstrdup - Allocate resource managed space and
+ * copy an existing string into that.
+ * @dev: Device to allocate memory for
+ * @s: the string to duplicate
+ * @gfp: the GFP mask used in the devm_kmalloc() call when
+ * allocating memory
+ * RETURNS:
+ * Pointer to allocated string on success, NULL on failure.
+ */
+char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp)
+{
+ size_t size;
+ char *buf;
+
+ if (!s)
+ return NULL;
+
+ size = strlen(s) + 1;
+ buf = devm_kmalloc(dev, size, gfp);
+ if (buf)
+ memcpy(buf, s, size);
+ return buf;
+}
+EXPORT_SYMBOL_GPL(devm_kstrdup);
+
+/**
* devm_kfree - Resource-managed kfree
* @dev: Device this memory belongs to
* @p: Memory to free
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index 1e16cbd61da2..ea77701deda4 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -251,9 +251,8 @@ EXPORT_SYMBOL_GPL(dma_buf_put);
* @dmabuf: [in] buffer to attach device to.
* @dev: [in] device to be attached.
*
- * Returns struct dma_buf_attachment * for this attachment; may return negative
- * error codes.
- *
+ * Returns struct dma_buf_attachment * for this attachment; returns ERR_PTR on
+ * error.
*/
struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
struct device *dev)
@@ -319,9 +318,8 @@ EXPORT_SYMBOL_GPL(dma_buf_detach);
* @attach: [in] attachment whose scatterlist is to be returned
* @direction: [in] direction of DMA transfer
*
- * Returns sg_table containing the scatterlist to be returned; may return NULL
- * or ERR_PTR.
- *
+ * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
+ * on error.
*/
struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
enum dma_data_direction direction)
@@ -334,6 +332,8 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
return ERR_PTR(-EINVAL);
sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
+ if (!sg_table)
+ sg_table = ERR_PTR(-ENOMEM);
return sg_table;
}
@@ -544,6 +544,8 @@ EXPORT_SYMBOL_GPL(dma_buf_mmap);
* These calls are optional in drivers. The intended use for them
* is for mapping objects linear in kernel space for high use objects.
* Please attempt to use kmap/kunmap before thinking about these interfaces.
+ *
+ * Returns NULL on error.
*/
void *dma_buf_vmap(struct dma_buf *dmabuf)
{
@@ -566,7 +568,9 @@ void *dma_buf_vmap(struct dma_buf *dmabuf)
BUG_ON(dmabuf->vmap_ptr);
ptr = dmabuf->ops->vmap(dmabuf);
- if (IS_ERR_OR_NULL(ptr))
+ if (WARN_ON_ONCE(IS_ERR(ptr)))
+ ptr = NULL;
+ if (!ptr)
goto out_unlock;
dmabuf->vmap_ptr = ptr;
@@ -616,36 +620,35 @@ static int dma_buf_describe(struct seq_file *s)
if (ret)
return ret;
- seq_printf(s, "\nDma-buf Objects:\n");
- seq_printf(s, "\texp_name\tsize\tflags\tmode\tcount\n");
+ seq_puts(s, "\nDma-buf Objects:\n");
+ seq_puts(s, "size\tflags\tmode\tcount\texp_name\n");
list_for_each_entry(buf_obj, &db_list.head, list_node) {
ret = mutex_lock_interruptible(&buf_obj->lock);
if (ret) {
- seq_printf(s,
- "\tERROR locking buffer object: skipping\n");
+ seq_puts(s,
+ "\tERROR locking buffer object: skipping\n");
continue;
}
- seq_printf(s, "\t");
-
- seq_printf(s, "\t%s\t%08zu\t%08x\t%08x\t%08ld\n",
- buf_obj->exp_name, buf_obj->size,
+ seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\n",
+ buf_obj->size,
buf_obj->file->f_flags, buf_obj->file->f_mode,
- (long)(buf_obj->file->f_count.counter));
+ (long)(buf_obj->file->f_count.counter),
+ buf_obj->exp_name);
- seq_printf(s, "\t\tAttached Devices:\n");
+ seq_puts(s, "\tAttached Devices:\n");
attach_count = 0;
list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
- seq_printf(s, "\t\t");
+ seq_puts(s, "\t");
- seq_printf(s, "%s\n", attach_obj->dev->init_name);
+ seq_printf(s, "%s\n", dev_name(attach_obj->dev));
attach_count++;
}
- seq_printf(s, "\n\t\tTotal %d devices attached\n",
+ seq_printf(s, "Total %d devices attached\n\n",
attach_count);
count++;
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 8a97ddfa6122..d276e33880be 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -649,7 +649,9 @@ static ssize_t firmware_loading_store(struct device *dev,
* see the mapped 'buf->data' once the loading
* is completed.
* */
- fw_map_pages_buf(fw_buf);
+ if (fw_map_pages_buf(fw_buf))
+ dev_err(dev, "%s: map pages failed\n",
+ __func__);
list_del_init(&fw_buf->pending_list);
complete_all(&fw_buf->completion);
break;
@@ -900,7 +902,8 @@ static int _request_firmware_load(struct firmware_priv *fw_priv,
dev_set_uevent_suppress(f_dev, false);
dev_dbg(f_dev, "firmware: requesting %s\n", buf->fw_id);
if (timeout != MAX_SCHEDULE_TIMEOUT)
- schedule_delayed_work(&fw_priv->timeout_work, timeout);
+ queue_delayed_work(system_power_efficient_wq,
+ &fw_priv->timeout_work, timeout);
kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
}
@@ -908,6 +911,8 @@ static int _request_firmware_load(struct firmware_priv *fw_priv,
wait_for_completion(&buf->completion);
cancel_delayed_work_sync(&fw_priv->timeout_work);
+ if (!buf->data)
+ retval = -ENOMEM;
device_remove_file(f_dev, &dev_attr_loading);
err_del_bin_attr:
@@ -1570,8 +1575,8 @@ static void device_uncache_fw_images_work(struct work_struct *work)
*/
static void device_uncache_fw_images_delay(unsigned long delay)
{
- schedule_delayed_work(&fw_cache.work,
- msecs_to_jiffies(delay));
+ queue_delayed_work(system_power_efficient_wq, &fw_cache.work,
+ msecs_to_jiffies(delay));
}
static int fw_pm_notify(struct notifier_block *notify_block,
@@ -1580,6 +1585,7 @@ static int fw_pm_notify(struct notifier_block *notify_block,
switch (mode) {
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
+ case PM_RESTORE_PREPARE:
kill_requests_without_uevent();
device_cache_fw_images();
break;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index bc9f43bf7e29..8f7ed9933a7c 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -599,7 +599,11 @@ int register_one_node(int nid)
void unregister_one_node(int nid)
{
+ if (!node_devices[nid])
+ return;
+
unregister_node(node_devices[nid]);
+ kfree(node_devices[nid]);
node_devices[nid] = NULL;
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index bc78848dd59a..e714709704e4 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -481,11 +481,10 @@ static int platform_drv_probe(struct device *_dev)
struct platform_device *dev = to_platform_device(_dev);
int ret;
- if (ACPI_HANDLE(_dev))
- acpi_dev_pm_attach(_dev, true);
+ acpi_dev_pm_attach(_dev, true);
ret = drv->probe(dev);
- if (ret && ACPI_HANDLE(_dev))
+ if (ret)
acpi_dev_pm_detach(_dev, true);
if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
@@ -508,8 +507,7 @@ static int platform_drv_remove(struct device *_dev)
int ret;
ret = drv->remove(dev);
- if (ACPI_HANDLE(_dev))
- acpi_dev_pm_detach(_dev, true);
+ acpi_dev_pm_detach(_dev, true);
return ret;
}
@@ -520,8 +518,7 @@ static void platform_drv_shutdown(struct device *_dev)
struct platform_device *dev = to_platform_device(_dev);
drv->shutdown(dev);
- if (ACPI_HANDLE(_dev))
- acpi_dev_pm_detach(_dev, true);
+ acpi_dev_pm_detach(_dev, true);
}
/**
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 2e58ebb1f6c0..1cb8544598d5 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -1,6 +1,5 @@
-obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o
+obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o runtime.o
obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
-obj-$(CONFIG_PM_RUNTIME) += runtime.o
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
obj-$(CONFIG_PM_OPP) += opp.o
obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index e870bbe9ec4e..b99e6c06ee67 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -6,7 +6,6 @@
* This file is released under the GPLv2.
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/io.h>
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index 5da914041305..df2e5eeaeb05 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -6,7 +6,6 @@
* This file is released under the GPLv2.
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/export.h>
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index bfb8955c406c..6f54962aae1d 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -6,7 +6,6 @@
* This file is released under the GPLv2.
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/pm_runtime.h>
@@ -42,7 +41,7 @@
struct gpd_timing_data *__td = &dev_gpd_data(dev)->td; \
if (!__retval && __elapsed > __td->field) { \
__td->field = __elapsed; \
- dev_warn(dev, name " latency exceeded, new value %lld ns\n", \
+ dev_dbg(dev, name " latency exceeded, new value %lld ns\n", \
__elapsed); \
genpd->max_off_time_changed = true; \
__td->constraint_changed = true; \
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index 28dee3053f1f..a089e3bcdfbc 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -6,7 +6,6 @@
* This file is released under the GPLv2.
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/pm_domain.h>
#include <linux/pm_qos.h>
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index a2e55bfdf572..96a92db83cad 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -285,7 +285,7 @@ int pm_generic_restore(struct device *dev)
EXPORT_SYMBOL_GPL(pm_generic_restore);
/**
- * pm_generic_complete - Generic routine competing a device power transition.
+ * pm_generic_complete - Generic routine completing a device power transition.
* @dev: Device to handle.
*
* Complete a device power transition during a system-wide power transition.
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 1b41fca3d65a..86d5e4fb5b98 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -29,6 +29,7 @@
#include <linux/async.h>
#include <linux/suspend.h>
#include <trace/events/power.h>
+#include <linux/cpufreq.h>
#include <linux/cpuidle.h>
#include <linux/timer.h>
@@ -91,6 +92,8 @@ void device_pm_sleep_init(struct device *dev)
{
dev->power.is_prepared = false;
dev->power.is_suspended = false;
+ dev->power.is_noirq_suspended = false;
+ dev->power.is_late_suspended = false;
init_completion(&dev->power.completion);
complete_all(&dev->power.completion);
dev->power.wakeup = NULL;
@@ -467,7 +470,7 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
-static int device_resume_noirq(struct device *dev, pm_message_t state)
+static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
char *info = NULL;
@@ -479,6 +482,11 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
if (dev->power.syscore)
goto Out;
+ if (!dev->power.is_noirq_suspended)
+ goto Out;
+
+ dpm_wait(dev->parent, async);
+
if (dev->pm_domain) {
info = "noirq power domain ";
callback = pm_noirq_op(&dev->pm_domain->ops, state);
@@ -499,12 +507,32 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
}
error = dpm_run_callback(callback, dev, state, info);
+ dev->power.is_noirq_suspended = false;
Out:
+ complete_all(&dev->power.completion);
TRACE_RESUME(error);
return error;
}
+static bool is_async(struct device *dev)
+{
+ return dev->power.async_suspend && pm_async_enabled
+ && !pm_trace_is_enabled();
+}
+
+static void async_resume_noirq(void *data, async_cookie_t cookie)
+{
+ struct device *dev = (struct device *)data;
+ int error;
+
+ error = device_resume_noirq(dev, pm_transition, true);
+ if (error)
+ pm_dev_err(dev, pm_transition, " async", error);
+
+ put_device(dev);
+}
+
/**
* dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
* @state: PM transition of the system being carried out.
@@ -514,29 +542,48 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
*/
static void dpm_resume_noirq(pm_message_t state)
{
+ struct device *dev;
ktime_t starttime = ktime_get();
mutex_lock(&dpm_list_mtx);
- while (!list_empty(&dpm_noirq_list)) {
- struct device *dev = to_device(dpm_noirq_list.next);
- int error;
+ pm_transition = state;
+ /*
+ * Advanced the async threads upfront,
+ * in case the starting of async threads is
+ * delayed by non-async resuming devices.
+ */
+ list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
+ reinit_completion(&dev->power.completion);
+ if (is_async(dev)) {
+ get_device(dev);
+ async_schedule(async_resume_noirq, dev);
+ }
+ }
+
+ while (!list_empty(&dpm_noirq_list)) {
+ dev = to_device(dpm_noirq_list.next);
get_device(dev);
list_move_tail(&dev->power.entry, &dpm_late_early_list);
mutex_unlock(&dpm_list_mtx);
- error = device_resume_noirq(dev, state);
- if (error) {
- suspend_stats.failed_resume_noirq++;
- dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, state, " noirq", error);
+ if (!is_async(dev)) {
+ int error;
+
+ error = device_resume_noirq(dev, state, false);
+ if (error) {
+ suspend_stats.failed_resume_noirq++;
+ dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, " noirq", error);
+ }
}
mutex_lock(&dpm_list_mtx);
put_device(dev);
}
mutex_unlock(&dpm_list_mtx);
+ async_synchronize_full();
dpm_show_time(starttime, state, "noirq");
resume_device_irqs();
cpuidle_resume();
@@ -549,7 +596,7 @@ static void dpm_resume_noirq(pm_message_t state)
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
-static int device_resume_early(struct device *dev, pm_message_t state)
+static int device_resume_early(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
char *info = NULL;
@@ -561,6 +608,11 @@ static int device_resume_early(struct device *dev, pm_message_t state)
if (dev->power.syscore)
goto Out;
+ if (!dev->power.is_late_suspended)
+ goto Out;
+
+ dpm_wait(dev->parent, async);
+
if (dev->pm_domain) {
info = "early power domain ";
callback = pm_late_early_op(&dev->pm_domain->ops, state);
@@ -581,43 +633,75 @@ static int device_resume_early(struct device *dev, pm_message_t state)
}
error = dpm_run_callback(callback, dev, state, info);
+ dev->power.is_late_suspended = false;
Out:
TRACE_RESUME(error);
pm_runtime_enable(dev);
+ complete_all(&dev->power.completion);
return error;
}
+static void async_resume_early(void *data, async_cookie_t cookie)
+{
+ struct device *dev = (struct device *)data;
+ int error;
+
+ error = device_resume_early(dev, pm_transition, true);
+ if (error)
+ pm_dev_err(dev, pm_transition, " async", error);
+
+ put_device(dev);
+}
+
/**
* dpm_resume_early - Execute "early resume" callbacks for all devices.
* @state: PM transition of the system being carried out.
*/
static void dpm_resume_early(pm_message_t state)
{
+ struct device *dev;
ktime_t starttime = ktime_get();
mutex_lock(&dpm_list_mtx);
- while (!list_empty(&dpm_late_early_list)) {
- struct device *dev = to_device(dpm_late_early_list.next);
- int error;
+ pm_transition = state;
+ /*
+ * Advanced the async threads upfront,
+ * in case the starting of async threads is
+ * delayed by non-async resuming devices.
+ */
+ list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
+ reinit_completion(&dev->power.completion);
+ if (is_async(dev)) {
+ get_device(dev);
+ async_schedule(async_resume_early, dev);
+ }
+ }
+
+ while (!list_empty(&dpm_late_early_list)) {
+ dev = to_device(dpm_late_early_list.next);
get_device(dev);
list_move_tail(&dev->power.entry, &dpm_suspended_list);
mutex_unlock(&dpm_list_mtx);
- error = device_resume_early(dev, state);
- if (error) {
- suspend_stats.failed_resume_early++;
- dpm_save_failed_step(SUSPEND_RESUME_EARLY);
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, state, " early", error);
- }
+ if (!is_async(dev)) {
+ int error;
+ error = device_resume_early(dev, state, false);
+ if (error) {
+ suspend_stats.failed_resume_early++;
+ dpm_save_failed_step(SUSPEND_RESUME_EARLY);
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, " early", error);
+ }
+ }
mutex_lock(&dpm_list_mtx);
put_device(dev);
}
mutex_unlock(&dpm_list_mtx);
+ async_synchronize_full();
dpm_show_time(starttime, state, "early");
}
@@ -732,12 +816,6 @@ static void async_resume(void *data, async_cookie_t cookie)
put_device(dev);
}
-static bool is_async(struct device *dev)
-{
- return dev->power.async_suspend && pm_async_enabled
- && !pm_trace_is_enabled();
-}
-
/**
* dpm_resume - Execute "resume" callbacks for non-sysdev devices.
* @state: PM transition of the system being carried out.
@@ -789,6 +867,8 @@ void dpm_resume(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, NULL);
+
+ cpufreq_resume();
}
/**
@@ -913,13 +993,24 @@ static pm_message_t resume_event(pm_message_t sleep_state)
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
-static int device_suspend_noirq(struct device *dev, pm_message_t state)
+static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
char *info = NULL;
+ int error = 0;
+
+ if (async_error)
+ goto Complete;
+
+ if (pm_wakeup_pending()) {
+ async_error = -EBUSY;
+ goto Complete;
+ }
if (dev->power.syscore)
- return 0;
+ goto Complete;
+
+ dpm_wait_for_children(dev, async);
if (dev->pm_domain) {
info = "noirq power domain ";
@@ -940,7 +1031,41 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
callback = pm_noirq_op(dev->driver->pm, state);
}
- return dpm_run_callback(callback, dev, state, info);
+ error = dpm_run_callback(callback, dev, state, info);
+ if (!error)
+ dev->power.is_noirq_suspended = true;
+ else
+ async_error = error;
+
+Complete:
+ complete_all(&dev->power.completion);
+ return error;
+}
+
+static void async_suspend_noirq(void *data, async_cookie_t cookie)
+{
+ struct device *dev = (struct device *)data;
+ int error;
+
+ error = __device_suspend_noirq(dev, pm_transition, true);
+ if (error) {
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, pm_transition, " async", error);
+ }
+
+ put_device(dev);
+}
+
+static int device_suspend_noirq(struct device *dev)
+{
+ reinit_completion(&dev->power.completion);
+
+ if (pm_async_enabled && dev->power.async_suspend) {
+ get_device(dev);
+ async_schedule(async_suspend_noirq, dev);
+ return 0;
+ }
+ return __device_suspend_noirq(dev, pm_transition, false);
}
/**
@@ -958,19 +1083,20 @@ static int dpm_suspend_noirq(pm_message_t state)
cpuidle_pause();
suspend_device_irqs();
mutex_lock(&dpm_list_mtx);
+ pm_transition = state;
+ async_error = 0;
+
while (!list_empty(&dpm_late_early_list)) {
struct device *dev = to_device(dpm_late_early_list.prev);
get_device(dev);
mutex_unlock(&dpm_list_mtx);
- error = device_suspend_noirq(dev, state);
+ error = device_suspend_noirq(dev);
mutex_lock(&dpm_list_mtx);
if (error) {
pm_dev_err(dev, state, " noirq", error);
- suspend_stats.failed_suspend_noirq++;
- dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
dpm_save_failed_dev(dev_name(dev));
put_device(dev);
break;
@@ -979,16 +1105,21 @@ static int dpm_suspend_noirq(pm_message_t state)
list_move(&dev->power.entry, &dpm_noirq_list);
put_device(dev);
- if (pm_wakeup_pending()) {
- error = -EBUSY;
+ if (async_error)
break;
- }
}
mutex_unlock(&dpm_list_mtx);
- if (error)
+ async_synchronize_full();
+ if (!error)
+ error = async_error;
+
+ if (error) {
+ suspend_stats.failed_suspend_noirq++;
+ dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
dpm_resume_noirq(resume_event(state));
- else
+ } else {
dpm_show_time(starttime, state, "noirq");
+ }
return error;
}
@@ -999,15 +1130,26 @@ static int dpm_suspend_noirq(pm_message_t state)
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
-static int device_suspend_late(struct device *dev, pm_message_t state)
+static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
char *info = NULL;
+ int error = 0;
__pm_runtime_disable(dev, false);
+ if (async_error)
+ goto Complete;
+
+ if (pm_wakeup_pending()) {
+ async_error = -EBUSY;
+ goto Complete;
+ }
+
if (dev->power.syscore)
- return 0;
+ goto Complete;
+
+ dpm_wait_for_children(dev, async);
if (dev->pm_domain) {
info = "late power domain ";
@@ -1028,7 +1170,41 @@ static int device_suspend_late(struct device *dev, pm_message_t state)
callback = pm_late_early_op(dev->driver->pm, state);
}
- return dpm_run_callback(callback, dev, state, info);
+ error = dpm_run_callback(callback, dev, state, info);
+ if (!error)
+ dev->power.is_late_suspended = true;
+ else
+ async_error = error;
+
+Complete:
+ complete_all(&dev->power.completion);
+ return error;
+}
+
+static void async_suspend_late(void *data, async_cookie_t cookie)
+{
+ struct device *dev = (struct device *)data;
+ int error;
+
+ error = __device_suspend_late(dev, pm_transition, true);
+ if (error) {
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, pm_transition, " async", error);
+ }
+ put_device(dev);
+}
+
+static int device_suspend_late(struct device *dev)
+{
+ reinit_completion(&dev->power.completion);
+
+ if (pm_async_enabled && dev->power.async_suspend) {
+ get_device(dev);
+ async_schedule(async_suspend_late, dev);
+ return 0;
+ }
+
+ return __device_suspend_late(dev, pm_transition, false);
}
/**
@@ -1041,19 +1217,20 @@ static int dpm_suspend_late(pm_message_t state)
int error = 0;
mutex_lock(&dpm_list_mtx);
+ pm_transition = state;
+ async_error = 0;
+
while (!list_empty(&dpm_suspended_list)) {
struct device *dev = to_device(dpm_suspended_list.prev);
get_device(dev);
mutex_unlock(&dpm_list_mtx);
- error = device_suspend_late(dev, state);
+ error = device_suspend_late(dev);
mutex_lock(&dpm_list_mtx);
if (error) {
pm_dev_err(dev, state, " late", error);
- suspend_stats.failed_suspend_late++;
- dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
dpm_save_failed_dev(dev_name(dev));
put_device(dev);
break;
@@ -1062,17 +1239,18 @@ static int dpm_suspend_late(pm_message_t state)
list_move(&dev->power.entry, &dpm_late_early_list);
put_device(dev);
- if (pm_wakeup_pending()) {
- error = -EBUSY;
+ if (async_error)
break;
- }
}
mutex_unlock(&dpm_list_mtx);
- if (error)
+ async_synchronize_full();
+ if (error) {
+ suspend_stats.failed_suspend_late++;
+ dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
dpm_resume_early(resume_event(state));
- else
+ } else {
dpm_show_time(starttime, state, "late");
-
+ }
return error;
}
@@ -1259,6 +1437,8 @@ int dpm_suspend(pm_message_t state)
might_sleep();
+ cpufreq_suspend();
+
mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index fa4187418440..25538675d59e 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -14,7 +14,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/cpufreq.h>
#include <linux/device.h>
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index cfc3226ec492..a21223d95926 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -89,8 +89,8 @@ extern void dpm_sysfs_remove(struct device *dev);
extern void rpm_sysfs_remove(struct device *dev);
extern int wakeup_sysfs_add(struct device *dev);
extern void wakeup_sysfs_remove(struct device *dev);
-extern int pm_qos_sysfs_add_latency(struct device *dev);
-extern void pm_qos_sysfs_remove_latency(struct device *dev);
+extern int pm_qos_sysfs_add_resume_latency(struct device *dev);
+extern void pm_qos_sysfs_remove_resume_latency(struct device *dev);
extern int pm_qos_sysfs_add_flags(struct device *dev);
extern void pm_qos_sysfs_remove_flags(struct device *dev);
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 5c1361a9e5dd..36b9eb4862cb 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -105,7 +105,7 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
s32 __dev_pm_qos_read_value(struct device *dev)
{
return IS_ERR_OR_NULL(dev->power.qos) ?
- 0 : pm_qos_read_value(&dev->power.qos->latency);
+ 0 : pm_qos_read_value(&dev->power.qos->resume_latency);
}
/**
@@ -141,16 +141,24 @@ static int apply_constraint(struct dev_pm_qos_request *req,
int ret;
switch(req->type) {
- case DEV_PM_QOS_LATENCY:
- ret = pm_qos_update_target(&qos->latency, &req->data.pnode,
- action, value);
+ case DEV_PM_QOS_RESUME_LATENCY:
+ ret = pm_qos_update_target(&qos->resume_latency,
+ &req->data.pnode, action, value);
if (ret) {
- value = pm_qos_read_value(&qos->latency);
+ value = pm_qos_read_value(&qos->resume_latency);
blocking_notifier_call_chain(&dev_pm_notifiers,
(unsigned long)value,
req);
}
break;
+ case DEV_PM_QOS_LATENCY_TOLERANCE:
+ ret = pm_qos_update_target(&qos->latency_tolerance,
+ &req->data.pnode, action, value);
+ if (ret) {
+ value = pm_qos_read_value(&qos->latency_tolerance);
+ req->dev->power.set_latency_tolerance(req->dev, value);
+ }
+ break;
case DEV_PM_QOS_FLAGS:
ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
action, value);
@@ -186,13 +194,21 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
}
BLOCKING_INIT_NOTIFIER_HEAD(n);
- c = &qos->latency;
+ c = &qos->resume_latency;
plist_head_init(&c->list);
- c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
- c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
+ c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
+ c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
+ c->no_constraint_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
c->type = PM_QOS_MIN;
c->notifiers = n;
+ c = &qos->latency_tolerance;
+ plist_head_init(&c->list);
+ c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
+ c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
+ c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
+ c->type = PM_QOS_MIN;
+
INIT_LIST_HEAD(&qos->flags.list);
spin_lock_irq(&dev->power.lock);
@@ -224,7 +240,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
* If the device's PM QoS resume latency limit or PM QoS flags have been
* exposed to user space, they have to be hidden at this point.
*/
- pm_qos_sysfs_remove_latency(dev);
+ pm_qos_sysfs_remove_resume_latency(dev);
pm_qos_sysfs_remove_flags(dev);
mutex_lock(&dev_pm_qos_mtx);
@@ -237,7 +253,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
goto out;
/* Flush the constraints lists for the device. */
- c = &qos->latency;
+ c = &qos->resume_latency;
plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
/*
* Update constraints list and call the notification
@@ -246,6 +262,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
}
+ c = &qos->latency_tolerance;
+ plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
+ apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
+ memset(req, 0, sizeof(*req));
+ }
f = &qos->flags;
list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
@@ -265,6 +286,40 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
mutex_unlock(&dev_pm_qos_sysfs_mtx);
}
+static bool dev_pm_qos_invalid_request(struct device *dev,
+ struct dev_pm_qos_request *req)
+{
+ return !req || (req->type == DEV_PM_QOS_LATENCY_TOLERANCE
+ && !dev->power.set_latency_tolerance);
+}
+
+static int __dev_pm_qos_add_request(struct device *dev,
+ struct dev_pm_qos_request *req,
+ enum dev_pm_qos_req_type type, s32 value)
+{
+ int ret = 0;
+
+ if (!dev || dev_pm_qos_invalid_request(dev, req))
+ return -EINVAL;
+
+ if (WARN(dev_pm_qos_request_active(req),
+ "%s() called for already added request\n", __func__))
+ return -EINVAL;
+
+ if (IS_ERR(dev->power.qos))
+ ret = -ENODEV;
+ else if (!dev->power.qos)
+ ret = dev_pm_qos_constraints_allocate(dev);
+
+ trace_dev_pm_qos_add_request(dev_name(dev), type, value);
+ if (!ret) {
+ req->dev = dev;
+ req->type = type;
+ ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
+ }
+ return ret;
+}
+
/**
* dev_pm_qos_add_request - inserts new qos request into the list
* @dev: target device for the constraint
@@ -290,31 +345,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
enum dev_pm_qos_req_type type, s32 value)
{
- int ret = 0;
-
- if (!dev || !req) /*guard against callers passing in null */
- return -EINVAL;
-
- if (WARN(dev_pm_qos_request_active(req),
- "%s() called for already added request\n", __func__))
- return -EINVAL;
+ int ret;
mutex_lock(&dev_pm_qos_mtx);
-
- if (IS_ERR(dev->power.qos))
- ret = -ENODEV;
- else if (!dev->power.qos)
- ret = dev_pm_qos_constraints_allocate(dev);
-
- trace_dev_pm_qos_add_request(dev_name(dev), type, value);
- if (!ret) {
- req->dev = dev;
- req->type = type;
- ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
- }
-
+ ret = __dev_pm_qos_add_request(dev, req, type, value);
mutex_unlock(&dev_pm_qos_mtx);
-
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
@@ -341,7 +376,8 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
return -ENODEV;
switch(req->type) {
- case DEV_PM_QOS_LATENCY:
+ case DEV_PM_QOS_RESUME_LATENCY:
+ case DEV_PM_QOS_LATENCY_TOLERANCE:
curr_value = req->data.pnode.prio;
break;
case DEV_PM_QOS_FLAGS:
@@ -460,8 +496,8 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
ret = dev_pm_qos_constraints_allocate(dev);
if (!ret)
- ret = blocking_notifier_chain_register(
- dev->power.qos->latency.notifiers, notifier);
+ ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
+ notifier);
mutex_unlock(&dev_pm_qos_mtx);
return ret;
@@ -487,9 +523,8 @@ int dev_pm_qos_remove_notifier(struct device *dev,
/* Silently return if the constraints object is not present. */
if (!IS_ERR_OR_NULL(dev->power.qos))
- retval = blocking_notifier_chain_unregister(
- dev->power.qos->latency.notifiers,
- notifier);
+ retval = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
+ notifier);
mutex_unlock(&dev_pm_qos_mtx);
return retval;
@@ -530,20 +565,32 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
* dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
* @dev: Device whose ancestor to add the request for.
* @req: Pointer to the preallocated handle.
+ * @type: Type of the request.
* @value: Constraint latency value.
*/
int dev_pm_qos_add_ancestor_request(struct device *dev,
- struct dev_pm_qos_request *req, s32 value)
+ struct dev_pm_qos_request *req,
+ enum dev_pm_qos_req_type type, s32 value)
{
struct device *ancestor = dev->parent;
int ret = -ENODEV;
- while (ancestor && !ancestor->power.ignore_children)
- ancestor = ancestor->parent;
+ switch (type) {
+ case DEV_PM_QOS_RESUME_LATENCY:
+ while (ancestor && !ancestor->power.ignore_children)
+ ancestor = ancestor->parent;
+ break;
+ case DEV_PM_QOS_LATENCY_TOLERANCE:
+ while (ancestor && !ancestor->power.set_latency_tolerance)
+ ancestor = ancestor->parent;
+
+ break;
+ default:
+ ancestor = NULL;
+ }
if (ancestor)
- ret = dev_pm_qos_add_request(ancestor, req,
- DEV_PM_QOS_LATENCY, value);
+ ret = dev_pm_qos_add_request(ancestor, req, type, value);
if (ret < 0)
req->dev = NULL;
@@ -559,9 +606,13 @@ static void __dev_pm_qos_drop_user_request(struct device *dev,
struct dev_pm_qos_request *req = NULL;
switch(type) {
- case DEV_PM_QOS_LATENCY:
- req = dev->power.qos->latency_req;
- dev->power.qos->latency_req = NULL;
+ case DEV_PM_QOS_RESUME_LATENCY:
+ req = dev->power.qos->resume_latency_req;
+ dev->power.qos->resume_latency_req = NULL;
+ break;
+ case DEV_PM_QOS_LATENCY_TOLERANCE:
+ req = dev->power.qos->latency_tolerance_req;
+ dev->power.qos->latency_tolerance_req = NULL;
break;
case DEV_PM_QOS_FLAGS:
req = dev->power.qos->flags_req;
@@ -597,7 +648,7 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
if (!req)
return -ENOMEM;
- ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value);
+ ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
if (ret < 0) {
kfree(req);
return ret;
@@ -609,7 +660,7 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
if (IS_ERR_OR_NULL(dev->power.qos))
ret = -ENODEV;
- else if (dev->power.qos->latency_req)
+ else if (dev->power.qos->resume_latency_req)
ret = -EEXIST;
if (ret < 0) {
@@ -618,13 +669,13 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
mutex_unlock(&dev_pm_qos_mtx);
goto out;
}
- dev->power.qos->latency_req = req;
+ dev->power.qos->resume_latency_req = req;
mutex_unlock(&dev_pm_qos_mtx);
- ret = pm_qos_sysfs_add_latency(dev);
+ ret = pm_qos_sysfs_add_resume_latency(dev);
if (ret)
- dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
+ dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
out:
mutex_unlock(&dev_pm_qos_sysfs_mtx);
@@ -634,8 +685,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
static void __dev_pm_qos_hide_latency_limit(struct device *dev)
{
- if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req)
- __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
+ if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req)
+ __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
}
/**
@@ -646,7 +697,7 @@ void dev_pm_qos_hide_latency_limit(struct device *dev)
{
mutex_lock(&dev_pm_qos_sysfs_mtx);
- pm_qos_sysfs_remove_latency(dev);
+ pm_qos_sysfs_remove_resume_latency(dev);
mutex_lock(&dev_pm_qos_mtx);
__dev_pm_qos_hide_latency_limit(dev);
@@ -768,6 +819,67 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
pm_runtime_put(dev);
return ret;
}
+
+/**
+ * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance.
+ * @dev: Device to obtain the user space latency tolerance for.
+ */
+s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
+{
+ s32 ret;
+
+ mutex_lock(&dev_pm_qos_mtx);
+ ret = IS_ERR_OR_NULL(dev->power.qos)
+ || !dev->power.qos->latency_tolerance_req ?
+ PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
+ dev->power.qos->latency_tolerance_req->data.pnode.prio;
+ mutex_unlock(&dev_pm_qos_mtx);
+ return ret;
+}
+
+/**
+ * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance.
+ * @dev: Device to update the user space latency tolerance for.
+ * @val: New user space latency tolerance for @dev (negative values disable).
+ */
+int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
+{
+ int ret;
+
+ mutex_lock(&dev_pm_qos_mtx);
+
+ if (IS_ERR_OR_NULL(dev->power.qos)
+ || !dev->power.qos->latency_tolerance_req) {
+ struct dev_pm_qos_request *req;
+
+ if (val < 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
+ if (ret < 0) {
+ kfree(req);
+ goto out;
+ }
+ dev->power.qos->latency_tolerance_req = req;
+ } else {
+ if (val < 0) {
+ __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
+ ret = 0;
+ } else {
+ ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
+ }
+ }
+
+ out:
+ mutex_unlock(&dev_pm_qos_mtx);
+ return ret;
+}
#else /* !CONFIG_PM_RUNTIME */
static void __dev_pm_qos_hide_latency_limit(struct device *dev) {}
static void __dev_pm_qos_hide_flags(struct device *dev) {}
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 72e00e66ecc5..67c7938e430b 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -13,6 +13,43 @@
#include <trace/events/rpm.h>
#include "power.h"
+#define RPM_GET_CALLBACK(dev, cb) \
+({ \
+ int (*__rpm_cb)(struct device *__d); \
+ \
+ if (dev->pm_domain) \
+ __rpm_cb = dev->pm_domain->ops.cb; \
+ else if (dev->type && dev->type->pm) \
+ __rpm_cb = dev->type->pm->cb; \
+ else if (dev->class && dev->class->pm) \
+ __rpm_cb = dev->class->pm->cb; \
+ else if (dev->bus && dev->bus->pm) \
+ __rpm_cb = dev->bus->pm->cb; \
+ else \
+ __rpm_cb = NULL; \
+ \
+ if (!__rpm_cb && dev->driver && dev->driver->pm) \
+ __rpm_cb = dev->driver->pm->cb; \
+ \
+ __rpm_cb; \
+})
+
+static int (*rpm_get_suspend_cb(struct device *dev))(struct device *)
+{
+ return RPM_GET_CALLBACK(dev, runtime_suspend);
+}
+
+static int (*rpm_get_resume_cb(struct device *dev))(struct device *)
+{
+ return RPM_GET_CALLBACK(dev, runtime_resume);
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int (*rpm_get_idle_cb(struct device *dev))(struct device *)
+{
+ return RPM_GET_CALLBACK(dev, runtime_idle);
+}
+
static int rpm_resume(struct device *dev, int rpmflags);
static int rpm_suspend(struct device *dev, int rpmflags);
@@ -310,19 +347,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
dev->power.idle_notification = true;
- if (dev->pm_domain)
- callback = dev->pm_domain->ops.runtime_idle;
- else if (dev->type && dev->type->pm)
- callback = dev->type->pm->runtime_idle;
- else if (dev->class && dev->class->pm)
- callback = dev->class->pm->runtime_idle;
- else if (dev->bus && dev->bus->pm)
- callback = dev->bus->pm->runtime_idle;
- else
- callback = NULL;
-
- if (!callback && dev->driver && dev->driver->pm)
- callback = dev->driver->pm->runtime_idle;
+ callback = rpm_get_idle_cb(dev);
if (callback)
retval = __rpm_callback(callback, dev);
@@ -492,19 +517,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
__update_runtime_status(dev, RPM_SUSPENDING);
- if (dev->pm_domain)
- callback = dev->pm_domain->ops.runtime_suspend;
- else if (dev->type && dev->type->pm)
- callback = dev->type->pm->runtime_suspend;
- else if (dev->class && dev->class->pm)
- callback = dev->class->pm->runtime_suspend;
- else if (dev->bus && dev->bus->pm)
- callback = dev->bus->pm->runtime_suspend;
- else
- callback = NULL;
-
- if (!callback && dev->driver && dev->driver->pm)
- callback = dev->driver->pm->runtime_suspend;
+ callback = rpm_get_suspend_cb(dev);
retval = rpm_callback(callback, dev);
if (retval)
@@ -724,19 +737,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
__update_runtime_status(dev, RPM_RESUMING);
- if (dev->pm_domain)
- callback = dev->pm_domain->ops.runtime_resume;
- else if (dev->type && dev->type->pm)
- callback = dev->type->pm->runtime_resume;
- else if (dev->class && dev->class->pm)
- callback = dev->class->pm->runtime_resume;
- else if (dev->bus && dev->bus->pm)
- callback = dev->bus->pm->runtime_resume;
- else
- callback = NULL;
-
- if (!callback && dev->driver && dev->driver->pm)
- callback = dev->driver->pm->runtime_resume;
+ callback = rpm_get_resume_cb(dev);
retval = rpm_callback(callback, dev);
if (retval) {
@@ -1130,7 +1131,7 @@ EXPORT_SYMBOL_GPL(pm_runtime_barrier);
* @dev: Device to handle.
* @check_resume: If set, check if there's a resume request for the device.
*
- * Increment power.disable_depth for the device and if was zero previously,
+ * Increment power.disable_depth for the device and if it was zero previously,
* cancel all pending runtime PM requests for the device and wait for all
* operations in progress to complete. The device can be either active or
* suspended after its runtime PM has been disabled.
@@ -1401,3 +1402,86 @@ void pm_runtime_remove(struct device *dev)
if (dev->power.irq_safe && dev->parent)
pm_runtime_put(dev->parent);
}
+#endif
+
+/**
+ * pm_runtime_force_suspend - Force a device into suspend state if needed.
+ * @dev: Device to suspend.
+ *
+ * Disable runtime PM so we safely can check the device's runtime PM status and
+ * if it is active, invoke it's .runtime_suspend callback to bring it into
+ * suspend state. Keep runtime PM disabled to preserve the state unless we
+ * encounter errors.
+ *
+ * Typically this function may be invoked from a system suspend callback to make
+ * sure the device is put into low power state.
+ */
+int pm_runtime_force_suspend(struct device *dev)
+{
+ int (*callback)(struct device *);
+ int ret = 0;
+
+ pm_runtime_disable(dev);
+
+ /*
+ * Note that pm_runtime_status_suspended() returns false while
+ * !CONFIG_PM_RUNTIME, which means the device will be put into low
+ * power state.
+ */
+ if (pm_runtime_status_suspended(dev))
+ return 0;
+
+ callback = rpm_get_suspend_cb(dev);
+
+ if (!callback) {
+ ret = -ENOSYS;
+ goto err;
+ }
+
+ ret = callback(dev);
+ if (ret)
+ goto err;
+
+ pm_runtime_set_suspended(dev);
+ return 0;
+err:
+ pm_runtime_enable(dev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
+
+/**
+ * pm_runtime_force_resume - Force a device into resume state.
+ * @dev: Device to resume.
+ *
+ * Prior invoking this function we expect the user to have brought the device
+ * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
+ * those actions and brings the device into full power. We update the runtime PM
+ * status and re-enables runtime PM.
+ *
+ * Typically this function may be invoked from a system resume callback to make
+ * sure the device is put into full power state.
+ */
+int pm_runtime_force_resume(struct device *dev)
+{
+ int (*callback)(struct device *);
+ int ret = 0;
+
+ callback = rpm_get_resume_cb(dev);
+
+ if (!callback) {
+ ret = -ENOSYS;
+ goto out;
+ }
+
+ ret = callback(dev);
+ if (ret)
+ goto out;
+
+ pm_runtime_set_active(dev);
+ pm_runtime_mark_last_busy(dev);
+out:
+ pm_runtime_enable(dev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pm_runtime_force_resume);
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 03e089ade5ce..95b181d1ca6d 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -218,15 +218,16 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev,
static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show,
autosuspend_delay_ms_store);
-static ssize_t pm_qos_latency_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t pm_qos_resume_latency_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
- return sprintf(buf, "%d\n", dev_pm_qos_requested_latency(dev));
+ return sprintf(buf, "%d\n", dev_pm_qos_requested_resume_latency(dev));
}
-static ssize_t pm_qos_latency_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t n)
+static ssize_t pm_qos_resume_latency_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
{
s32 value;
int ret;
@@ -237,12 +238,47 @@ static ssize_t pm_qos_latency_store(struct device *dev,
if (value < 0)
return -EINVAL;
- ret = dev_pm_qos_update_request(dev->power.qos->latency_req, value);
+ ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req,
+ value);
return ret < 0 ? ret : n;
}
static DEVICE_ATTR(pm_qos_resume_latency_us, 0644,
- pm_qos_latency_show, pm_qos_latency_store);
+ pm_qos_resume_latency_show, pm_qos_resume_latency_store);
+
+static ssize_t pm_qos_latency_tolerance_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ s32 value = dev_pm_qos_get_user_latency_tolerance(dev);
+
+ if (value < 0)
+ return sprintf(buf, "auto\n");
+ else if (value == PM_QOS_LATENCY_ANY)
+ return sprintf(buf, "any\n");
+
+ return sprintf(buf, "%d\n", value);
+}
+
+static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ s32 value;
+ int ret;
+
+ if (kstrtos32(buf, 0, &value)) {
+ if (!strcmp(buf, "auto") || !strcmp(buf, "auto\n"))
+ value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
+ else if (!strcmp(buf, "any") || !strcmp(buf, "any\n"))
+ value = PM_QOS_LATENCY_ANY;
+ }
+ ret = dev_pm_qos_update_user_latency_tolerance(dev, value);
+ return ret < 0 ? ret : n;
+}
+
+static DEVICE_ATTR(pm_qos_latency_tolerance_us, 0644,
+ pm_qos_latency_tolerance_show, pm_qos_latency_tolerance_store);
static ssize_t pm_qos_no_power_off_show(struct device *dev,
struct device_attribute *attr,
@@ -618,15 +654,26 @@ static struct attribute_group pm_runtime_attr_group = {
.attrs = runtime_attrs,
};
-static struct attribute *pm_qos_latency_attrs[] = {
+static struct attribute *pm_qos_resume_latency_attrs[] = {
#ifdef CONFIG_PM_RUNTIME
&dev_attr_pm_qos_resume_latency_us.attr,
#endif /* CONFIG_PM_RUNTIME */
NULL,
};
-static struct attribute_group pm_qos_latency_attr_group = {
+static struct attribute_group pm_qos_resume_latency_attr_group = {
+ .name = power_group_name,
+ .attrs = pm_qos_resume_latency_attrs,
+};
+
+static struct attribute *pm_qos_latency_tolerance_attrs[] = {
+#ifdef CONFIG_PM_RUNTIME
+ &dev_attr_pm_qos_latency_tolerance_us.attr,
+#endif /* CONFIG_PM_RUNTIME */
+ NULL,
+};
+static struct attribute_group pm_qos_latency_tolerance_attr_group = {
.name = power_group_name,
- .attrs = pm_qos_latency_attrs,
+ .attrs = pm_qos_latency_tolerance_attrs,
};
static struct attribute *pm_qos_flags_attrs[] = {
@@ -654,18 +701,23 @@ int dpm_sysfs_add(struct device *dev)
if (rc)
goto err_out;
}
-
if (device_can_wakeup(dev)) {
rc = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
- if (rc) {
- if (pm_runtime_callbacks_present(dev))
- sysfs_unmerge_group(&dev->kobj,
- &pm_runtime_attr_group);
- goto err_out;
- }
+ if (rc)
+ goto err_runtime;
+ }
+ if (dev->power.set_latency_tolerance) {
+ rc = sysfs_merge_group(&dev->kobj,
+ &pm_qos_latency_tolerance_attr_group);
+ if (rc)
+ goto err_wakeup;
}
return 0;
+ err_wakeup:
+ sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
+ err_runtime:
+ sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
err_out:
sysfs_remove_group(&dev->kobj, &pm_attr_group);
return rc;
@@ -681,14 +733,14 @@ void wakeup_sysfs_remove(struct device *dev)
sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
}
-int pm_qos_sysfs_add_latency(struct device *dev)
+int pm_qos_sysfs_add_resume_latency(struct device *dev)
{
- return sysfs_merge_group(&dev->kobj, &pm_qos_latency_attr_group);
+ return sysfs_merge_group(&dev->kobj, &pm_qos_resume_latency_attr_group);
}
-void pm_qos_sysfs_remove_latency(struct device *dev)
+void pm_qos_sysfs_remove_resume_latency(struct device *dev)
{
- sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_attr_group);
+ sysfs_unmerge_group(&dev->kobj, &pm_qos_resume_latency_attr_group);
}
int pm_qos_sysfs_add_flags(struct device *dev)
@@ -708,6 +760,7 @@ void rpm_sysfs_remove(struct device *dev)
void dpm_sysfs_remove(struct device *dev)
{
+ sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
dev_pm_qos_constraints_destroy(dev);
rpm_sysfs_remove(dev);
sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 33414b1de201..7d1326985bee 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -134,6 +134,8 @@ struct regmap {
/* if set, converts bulk rw to single rw */
bool use_single_rw;
+ /* if set, the device supports multi write mode */
+ bool can_multi_write;
struct rb_root range_tree;
void *selector_work_buf; /* Scratch buffer used for selector */
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index d4dd77134814..29b4128da0b0 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -249,11 +249,12 @@ static int regcache_default_sync(struct regmap *map, unsigned int min,
{
unsigned int reg;
- for (reg = min; reg <= max; reg++) {
+ for (reg = min; reg <= max; reg += map->reg_stride) {
unsigned int val;
int ret;
- if (regmap_volatile(map, reg))
+ if (regmap_volatile(map, reg) ||
+ !regmap_writeable(map, reg))
continue;
ret = regcache_read(map, reg, &val);
@@ -312,10 +313,6 @@ int regcache_sync(struct regmap *map)
/* Apply any patch first */
map->cache_bypass = 1;
for (i = 0; i < map->patch_regs; i++) {
- if (map->patch[i].reg % map->reg_stride) {
- ret = -EINVAL;
- goto out;
- }
ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
if (ret != 0) {
dev_err(map->dev, "Failed to write %x = %x: %d\n",
@@ -636,10 +633,10 @@ static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
if (*data == NULL)
return 0;
- count = cur - base;
+ count = (cur - base) / map->reg_stride;
dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n",
- count * val_bytes, count, base, cur - 1);
+ count * val_bytes, count, base, cur - map->reg_stride);
map->cache_bypass = 1;
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index c5471cd6ebb7..45d812c0ea77 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -511,7 +511,7 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
debugfs_create_file("range", 0400, map->debugfs,
map, &regmap_reg_ranges_fops);
- if (map->max_register) {
+ if (map->max_register || regmap_readable(map, 0)) {
debugfs_create_file("registers", 0400, map->debugfs,
map, &regmap_map_fops);
debugfs_create_file("access", 0400, map->debugfs,
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index fa6bf5279d28..ebd189529760 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -13,7 +13,6 @@
#include <linux/regmap.h>
#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/init.h>
static int regmap_i2c_write(void *context, const void *data, size_t count)
{
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 82692068d3cb..edf88f20cbce 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -368,8 +368,6 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
if (!d)
return -ENOMEM;
- *data = d;
-
d->status_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
GFP_KERNEL);
if (!d->status_buf)
@@ -506,6 +504,8 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
goto err_domain;
}
+ *data = d;
+
return 0;
err_domain:
@@ -533,7 +533,7 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
return;
free_irq(irq, d);
- /* We should unmap the domain but... */
+ irq_domain_remove(d->domain);
kfree(d->wake_buf);
kfree(d->mask_buf_def);
kfree(d->mask_buf);
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index 81f977510775..1e03e7f8bacb 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -18,7 +18,6 @@
#include <linux/clk.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/regmap.h>
@@ -26,10 +25,47 @@
struct regmap_mmio_context {
void __iomem *regs;
+ unsigned reg_bytes;
unsigned val_bytes;
+ unsigned pad_bytes;
struct clk *clk;
};
+static inline void regmap_mmio_regsize_check(size_t reg_size)
+{
+ switch (reg_size) {
+ case 1:
+ case 2:
+ case 4:
+#ifdef CONFIG_64BIT
+ case 8:
+#endif
+ break;
+ default:
+ BUG();
+ }
+}
+
+static int regmap_mmio_regbits_check(size_t reg_bits)
+{
+ switch (reg_bits) {
+ case 8:
+ case 16:
+ case 32:
+#ifdef CONFIG_64BIT
+ case 64:
+#endif
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static inline void regmap_mmio_count_check(size_t count)
+{
+ BUG_ON(count % 2 != 0);
+}
+
static int regmap_mmio_gather_write(void *context,
const void *reg, size_t reg_size,
const void *val, size_t val_size)
@@ -38,7 +74,7 @@ static int regmap_mmio_gather_write(void *context,
u32 offset;
int ret;
- BUG_ON(reg_size != 4);
+ regmap_mmio_regsize_check(reg_size);
if (!IS_ERR(ctx->clk)) {
ret = clk_enable(ctx->clk);
@@ -81,9 +117,13 @@ static int regmap_mmio_gather_write(void *context,
static int regmap_mmio_write(void *context, const void *data, size_t count)
{
- BUG_ON(count < 4);
+ struct regmap_mmio_context *ctx = context;
+ u32 offset = ctx->reg_bytes + ctx->pad_bytes;
+
+ regmap_mmio_count_check(count);
- return regmap_mmio_gather_write(context, data, 4, data + 4, count - 4);
+ return regmap_mmio_gather_write(context, data, ctx->reg_bytes,
+ data + offset, count - offset);
}
static int regmap_mmio_read(void *context,
@@ -94,7 +134,7 @@ static int regmap_mmio_read(void *context,
u32 offset;
int ret;
- BUG_ON(reg_size != 4);
+ regmap_mmio_regsize_check(reg_size);
if (!IS_ERR(ctx->clk)) {
ret = clk_enable(ctx->clk);
@@ -165,8 +205,9 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
int min_stride;
int ret;
- if (config->reg_bits != 32)
- return ERR_PTR(-EINVAL);
+ ret = regmap_mmio_regbits_check(config->reg_bits);
+ if (ret)
+ return ERR_PTR(ret);
if (config->pad_bits)
return ERR_PTR(-EINVAL);
@@ -209,6 +250,8 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
ctx->regs = regs;
ctx->val_bytes = config->val_bits / 8;
+ ctx->reg_bytes = config->reg_bits / 8;
+ ctx->pad_bytes = config->pad_bits / 8;
ctx->clk = ERR_PTR(-ENODEV);
if (clk_id == NULL)
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index 37f12ae7aada..0eb3097c0d76 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -12,7 +12,6 @@
#include <linux/regmap.h>
#include <linux/spi/spi.h>
-#include <linux/init.h>
#include <linux/module.h>
#include "internal.h"
diff --git a/drivers/base/regmap/regmap-spmi.c b/drivers/base/regmap/regmap-spmi.c
index ac2391013db1..d7026dc33388 100644
--- a/drivers/base/regmap/regmap-spmi.c
+++ b/drivers/base/regmap/regmap-spmi.c
@@ -22,69 +22,235 @@
#include <linux/module.h>
#include <linux/init.h>
-static int regmap_spmi_read(void *context,
- const void *reg, size_t reg_size,
- void *val, size_t val_size)
+static int regmap_spmi_base_read(void *context,
+ const void *reg, size_t reg_size,
+ void *val, size_t val_size)
{
+ u8 addr = *(u8 *)reg;
+ int err = 0;
+
+ BUG_ON(reg_size != 1);
+
+ while (val_size-- && !err)
+ err = spmi_register_read(context, addr++, val++);
+
+ return err;
+}
+
+static int regmap_spmi_base_gather_write(void *context,
+ const void *reg, size_t reg_size,
+ const void *val, size_t val_size)
+{
+ const u8 *data = val;
+ u8 addr = *(u8 *)reg;
+ int err = 0;
+
+ BUG_ON(reg_size != 1);
+
+ /*
+ * SPMI defines a more bandwidth-efficient 'Register 0 Write' sequence,
+ * use it when possible.
+ */
+ if (addr == 0 && val_size) {
+ err = spmi_register_zero_write(context, *data);
+ if (err)
+ goto err_out;
+
+ data++;
+ addr++;
+ val_size--;
+ }
+
+ while (val_size) {
+ err = spmi_register_write(context, addr, *data);
+ if (err)
+ goto err_out;
+
+ data++;
+ addr++;
+ val_size--;
+ }
+
+err_out:
+ return err;
+}
+
+static int regmap_spmi_base_write(void *context, const void *data,
+ size_t count)
+{
+ BUG_ON(count < 1);
+ return regmap_spmi_base_gather_write(context, data, 1, data + 1,
+ count - 1);
+}
+
+static struct regmap_bus regmap_spmi_base = {
+ .read = regmap_spmi_base_read,
+ .write = regmap_spmi_base_write,
+ .gather_write = regmap_spmi_base_gather_write,
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
+/**
+ * regmap_init_spmi_base(): Create regmap for the Base register space
+ * @sdev: SPMI device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+struct regmap *regmap_init_spmi_base(struct spmi_device *sdev,
+ const struct regmap_config *config)
+{
+ return regmap_init(&sdev->dev, &regmap_spmi_base, sdev, config);
+}
+EXPORT_SYMBOL_GPL(regmap_init_spmi_base);
+
+/**
+ * devm_regmap_init_spmi_base(): Create managed regmap for Base register space
+ * @sdev: SPMI device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+struct regmap *devm_regmap_init_spmi_base(struct spmi_device *sdev,
+ const struct regmap_config *config)
+{
+ return devm_regmap_init(&sdev->dev, &regmap_spmi_base, sdev, config);
+}
+EXPORT_SYMBOL_GPL(devm_regmap_init_spmi_base);
+
+static int regmap_spmi_ext_read(void *context,
+ const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ int err = 0;
+ size_t len;
+ u16 addr;
+
BUG_ON(reg_size != 2);
- return spmi_ext_register_readl(context, *(u16 *)reg,
- val, val_size);
+
+ addr = *(u16 *)reg;
+
+ /*
+ * Split accesses into two to take advantage of the more
+ * bandwidth-efficient 'Extended Register Read' command when possible
+ */
+ while (addr <= 0xFF && val_size) {
+ len = min_t(size_t, val_size, 16);
+
+ err = spmi_ext_register_read(context, addr, val, len);
+ if (err)
+ goto err_out;
+
+ addr += len;
+ val += len;
+ val_size -= len;
+ }
+
+ while (val_size) {
+ len = min_t(size_t, val_size, 8);
+
+ err = spmi_ext_register_readl(context, addr, val, val_size);
+ if (err)
+ goto err_out;
+
+ addr += len;
+ val += len;
+ val_size -= len;
+ }
+
+err_out:
+ return err;
}
-static int regmap_spmi_gather_write(void *context,
- const void *reg, size_t reg_size,
- const void *val, size_t val_size)
+static int regmap_spmi_ext_gather_write(void *context,
+ const void *reg, size_t reg_size,
+ const void *val, size_t val_size)
{
+ int err = 0;
+ size_t len;
+ u16 addr;
+
BUG_ON(reg_size != 2);
- return spmi_ext_register_writel(context, *(u16 *)reg, val, val_size);
+
+ addr = *(u16 *)reg;
+
+ while (addr <= 0xFF && val_size) {
+ len = min_t(size_t, val_size, 16);
+
+ err = spmi_ext_register_write(context, addr, val, len);
+ if (err)
+ goto err_out;
+
+ addr += len;
+ val += len;
+ val_size -= len;
+ }
+
+ while (val_size) {
+ len = min_t(size_t, val_size, 8);
+
+ err = spmi_ext_register_writel(context, addr, val, len);
+ if (err)
+ goto err_out;
+
+ addr += len;
+ val += len;
+ val_size -= len;
+ }
+
+err_out:
+ return err;
}
-static int regmap_spmi_write(void *context, const void *data,
- size_t count)
+static int regmap_spmi_ext_write(void *context, const void *data,
+ size_t count)
{
BUG_ON(count < 2);
- return regmap_spmi_gather_write(context, data, 2, data + 2, count - 2);
+ return regmap_spmi_ext_gather_write(context, data, 2, data + 2,
+ count - 2);
}
-static struct regmap_bus regmap_spmi = {
- .read = regmap_spmi_read,
- .write = regmap_spmi_write,
- .gather_write = regmap_spmi_gather_write,
+static struct regmap_bus regmap_spmi_ext = {
+ .read = regmap_spmi_ext_read,
+ .write = regmap_spmi_ext_write,
+ .gather_write = regmap_spmi_ext_gather_write,
.reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
};
/**
- * regmap_init_spmi(): Initialize register map
- *
- * @sdev: Device that will be interacted with
- * @config: Configuration for register map
+ * regmap_init_spmi_ext(): Create regmap for Ext register space
+ * @sdev: Device that will be interacted with
+ * @config: Configuration for register map
*
* The return value will be an ERR_PTR() on error or a valid pointer to
* a struct regmap.
*/
-struct regmap *regmap_init_spmi(struct spmi_device *sdev,
- const struct regmap_config *config)
+struct regmap *regmap_init_spmi_ext(struct spmi_device *sdev,
+ const struct regmap_config *config)
{
- return regmap_init(&sdev->dev, &regmap_spmi, sdev, config);
+ return regmap_init(&sdev->dev, &regmap_spmi_ext, sdev, config);
}
-EXPORT_SYMBOL_GPL(regmap_init_spmi);
+EXPORT_SYMBOL_GPL(regmap_init_spmi_ext);
/**
- * devm_regmap_init_spmi(): Initialise managed register map
- *
- * @sdev: Device that will be interacted with
- * @config: Configuration for register map
+ * devm_regmap_init_spmi_ext(): Create managed regmap for Ext register space
+ * @sdev: SPMI device that will be interacted with
+ * @config: Configuration for register map
*
* The return value will be an ERR_PTR() on error or a valid pointer
* to a struct regmap. The regmap will be automatically freed by the
* device management code.
*/
-struct regmap *devm_regmap_init_spmi(struct spmi_device *sdev,
+struct regmap *devm_regmap_init_spmi_ext(struct spmi_device *sdev,
const struct regmap_config *config)
{
- return devm_regmap_init(&sdev->dev, &regmap_spmi, sdev, config);
+ return devm_regmap_init(&sdev->dev, &regmap_spmi_ext, sdev, config);
}
-EXPORT_SYMBOL_GPL(devm_regmap_init_spmi);
+EXPORT_SYMBOL_GPL(devm_regmap_init_spmi_ext);
MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 6a19515f8a45..d0a072463a04 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -380,6 +380,28 @@ static void regmap_range_exit(struct regmap *map)
kfree(map->selector_work_buf);
}
+int regmap_attach_dev(struct device *dev, struct regmap *map,
+ const struct regmap_config *config)
+{
+ struct regmap **m;
+
+ map->dev = dev;
+
+ regmap_debugfs_init(map, config->name);
+
+ /* Add a devres resource for dev_get_regmap() */
+ m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
+ if (!m) {
+ regmap_debugfs_exit(map);
+ return -ENOMEM;
+ }
+ *m = map;
+ devres_add(dev, m);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(regmap_attach_dev);
+
/**
* regmap_init(): Initialise register map
*
@@ -397,7 +419,7 @@ struct regmap *regmap_init(struct device *dev,
void *bus_context,
const struct regmap_config *config)
{
- struct regmap *map, **m;
+ struct regmap *map;
int ret = -EINVAL;
enum regmap_endian reg_endian, val_endian;
int i, j;
@@ -439,6 +461,7 @@ struct regmap *regmap_init(struct device *dev,
else
map->reg_stride = 1;
map->use_single_rw = config->use_single_rw;
+ map->can_multi_write = config->can_multi_write;
map->dev = dev;
map->bus = bus;
map->bus_context = bus_context;
@@ -718,7 +741,7 @@ skip_format_initialization:
new->window_start = range_cfg->window_start;
new->window_len = range_cfg->window_len;
- if (_regmap_range_add(map, new) == false) {
+ if (!_regmap_range_add(map, new)) {
dev_err(map->dev, "Failed to add range %d\n", i);
kfree(new);
goto err_range;
@@ -734,25 +757,18 @@ skip_format_initialization:
}
}
- regmap_debugfs_init(map, config->name);
-
ret = regcache_init(map, config);
if (ret != 0)
goto err_range;
- /* Add a devres resource for dev_get_regmap() */
- m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
- if (!m) {
- ret = -ENOMEM;
- goto err_debugfs;
- }
- *m = map;
- devres_add(dev, m);
+ if (dev)
+ ret = regmap_attach_dev(dev, map, config);
+ if (ret != 0)
+ goto err_regcache;
return map;
-err_debugfs:
- regmap_debugfs_exit(map);
+err_regcache:
regcache_exit(map);
err_range:
regmap_range_exit(map);
@@ -1520,12 +1536,12 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
if (reg % map->reg_stride)
return -EINVAL;
- map->lock(map->lock_arg);
/*
* Some devices don't support bulk write, for
* them we have a series of single write operations.
*/
if (!map->bus || map->use_single_rw) {
+ map->lock(map->lock_arg);
for (i = 0; i < val_count; i++) {
unsigned int ival;
@@ -1554,31 +1570,239 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
if (ret != 0)
goto out;
}
+out:
+ map->unlock(map->lock_arg);
} else {
void *wval;
wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL);
if (!wval) {
- ret = -ENOMEM;
dev_err(map->dev, "Error in memory allocation\n");
- goto out;
+ return -ENOMEM;
}
for (i = 0; i < val_count * val_bytes; i += val_bytes)
map->format.parse_inplace(wval + i);
+ map->lock(map->lock_arg);
ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
+ map->unlock(map->lock_arg);
kfree(wval);
}
-out:
- map->unlock(map->lock_arg);
return ret;
}
EXPORT_SYMBOL_GPL(regmap_bulk_write);
/*
+ * _regmap_raw_multi_reg_write()
+ *
+ * the (register,newvalue) pairs in regs have not been formatted, but
+ * they are all in the same page and have been changed to being page
+ * relative. The page register has been written if that was neccessary.
+ */
+static int _regmap_raw_multi_reg_write(struct regmap *map,
+ const struct reg_default *regs,
+ size_t num_regs)
+{
+ int ret;
+ void *buf;
+ int i;
+ u8 *u8;
+ size_t val_bytes = map->format.val_bytes;
+ size_t reg_bytes = map->format.reg_bytes;
+ size_t pad_bytes = map->format.pad_bytes;
+ size_t pair_size = reg_bytes + pad_bytes + val_bytes;
+ size_t len = pair_size * num_regs;
+
+ buf = kzalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* We have to linearise by hand. */
+
+ u8 = buf;
+
+ for (i = 0; i < num_regs; i++) {
+ int reg = regs[i].reg;
+ int val = regs[i].def;
+ trace_regmap_hw_write_start(map->dev, reg, 1);
+ map->format.format_reg(u8, reg, map->reg_shift);
+ u8 += reg_bytes + pad_bytes;
+ map->format.format_val(u8, val, 0);
+ u8 += val_bytes;
+ }
+ u8 = buf;
+ *u8 |= map->write_flag_mask;
+
+ ret = map->bus->write(map->bus_context, buf, len);
+
+ kfree(buf);
+
+ for (i = 0; i < num_regs; i++) {
+ int reg = regs[i].reg;
+ trace_regmap_hw_write_done(map->dev, reg, 1);
+ }
+ return ret;
+}
+
+static unsigned int _regmap_register_page(struct regmap *map,
+ unsigned int reg,
+ struct regmap_range_node *range)
+{
+ unsigned int win_page = (reg - range->range_min) / range->window_len;
+
+ return win_page;
+}
+
+static int _regmap_range_multi_paged_reg_write(struct regmap *map,
+ struct reg_default *regs,
+ size_t num_regs)
+{
+ int ret;
+ int i, n;
+ struct reg_default *base;
+ unsigned int this_page;
+ /*
+ * the set of registers are not neccessarily in order, but
+ * since the order of write must be preserved this algorithm
+ * chops the set each time the page changes
+ */
+ base = regs;
+ for (i = 0, n = 0; i < num_regs; i++, n++) {
+ unsigned int reg = regs[i].reg;
+ struct regmap_range_node *range;
+
+ range = _regmap_range_lookup(map, reg);
+ if (range) {
+ unsigned int win_page = _regmap_register_page(map, reg,
+ range);
+
+ if (i == 0)
+ this_page = win_page;
+ if (win_page != this_page) {
+ this_page = win_page;
+ ret = _regmap_raw_multi_reg_write(map, base, n);
+ if (ret != 0)
+ return ret;
+ base += n;
+ n = 0;
+ }
+ ret = _regmap_select_page(map, &base[n].reg, range, 1);
+ if (ret != 0)
+ return ret;
+ }
+ }
+ if (n > 0)
+ return _regmap_raw_multi_reg_write(map, base, n);
+ return 0;
+}
+
+static int _regmap_multi_reg_write(struct regmap *map,
+ const struct reg_default *regs,
+ size_t num_regs)
+{
+ int i;
+ int ret;
+
+ if (!map->can_multi_write) {
+ for (i = 0; i < num_regs; i++) {
+ ret = _regmap_write(map, regs[i].reg, regs[i].def);
+ if (ret != 0)
+ return ret;
+ }
+ return 0;
+ }
+
+ if (!map->format.parse_inplace)
+ return -EINVAL;
+
+ if (map->writeable_reg)
+ for (i = 0; i < num_regs; i++) {
+ int reg = regs[i].reg;
+ if (!map->writeable_reg(map->dev, reg))
+ return -EINVAL;
+ if (reg % map->reg_stride)
+ return -EINVAL;
+ }
+
+ if (!map->cache_bypass) {
+ for (i = 0; i < num_regs; i++) {
+ unsigned int val = regs[i].def;
+ unsigned int reg = regs[i].reg;
+ ret = regcache_write(map, reg, val);
+ if (ret) {
+ dev_err(map->dev,
+ "Error in caching of register: %x ret: %d\n",
+ reg, ret);
+ return ret;
+ }
+ }
+ if (map->cache_only) {
+ map->cache_dirty = true;
+ return 0;
+ }
+ }
+
+ WARN_ON(!map->bus);
+
+ for (i = 0; i < num_regs; i++) {
+ unsigned int reg = regs[i].reg;
+ struct regmap_range_node *range;
+ range = _regmap_range_lookup(map, reg);
+ if (range) {
+ size_t len = sizeof(struct reg_default)*num_regs;
+ struct reg_default *base = kmemdup(regs, len,
+ GFP_KERNEL);
+ if (!base)
+ return -ENOMEM;
+ ret = _regmap_range_multi_paged_reg_write(map, base,
+ num_regs);
+ kfree(base);
+
+ return ret;
+ }
+ }
+ return _regmap_raw_multi_reg_write(map, regs, num_regs);
+}
+
+/*
* regmap_multi_reg_write(): Write multiple registers to the device
*
+ * where the set of register,value pairs are supplied in any order,
+ * possibly not all in a single range.
+ *
+ * @map: Register map to write to
+ * @regs: Array of structures containing register,value to be written
+ * @num_regs: Number of registers to write
+ *
+ * The 'normal' block write mode will send ultimately send data on the
+ * target bus as R,V1,V2,V3,..,Vn where successively higer registers are
+ * addressed. However, this alternative block multi write mode will send
+ * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device
+ * must of course support the mode.
+ *
+ * A value of zero will be returned on success, a negative errno will be
+ * returned in error cases.
+ */
+int regmap_multi_reg_write(struct regmap *map, const struct reg_default *regs,
+ int num_regs)
+{
+ int ret;
+
+ map->lock(map->lock_arg);
+
+ ret = _regmap_multi_reg_write(map, regs, num_regs);
+
+ map->unlock(map->lock_arg);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
+
+/*
+ * regmap_multi_reg_write_bypassed(): Write multiple registers to the
+ * device but not the cache
+ *
* where the set of register are supplied in any order
*
* @map: Register map to write to
@@ -1592,30 +1816,27 @@ EXPORT_SYMBOL_GPL(regmap_bulk_write);
* A value of zero will be returned on success, a negative errno will
* be returned in error cases.
*/
-int regmap_multi_reg_write(struct regmap *map, struct reg_default *regs,
- int num_regs)
+int regmap_multi_reg_write_bypassed(struct regmap *map,
+ const struct reg_default *regs,
+ int num_regs)
{
- int ret = 0, i;
-
- for (i = 0; i < num_regs; i++) {
- int reg = regs[i].reg;
- if (reg % map->reg_stride)
- return -EINVAL;
- }
+ int ret;
+ bool bypass;
map->lock(map->lock_arg);
- for (i = 0; i < num_regs; i++) {
- ret = _regmap_write(map, regs[i].reg, regs[i].def);
- if (ret != 0)
- goto out;
- }
-out:
+ bypass = map->cache_bypass;
+ map->cache_bypass = true;
+
+ ret = _regmap_multi_reg_write(map, regs, num_regs);
+
+ map->cache_bypass = bypass;
+
map->unlock(map->lock_arg);
return ret;
}
-EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
+EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed);
/**
* regmap_raw_write_async(): Write raw values to one or more registers
@@ -1736,6 +1957,9 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
if (map->cache_only)
return -EBUSY;
+ if (!regmap_readable(map, reg))
+ return -EIO;
+
ret = map->reg_read(context, reg, val);
if (ret == 0) {
#ifdef LOG_DEVICE
@@ -1966,9 +2190,11 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
if (tmp != orig) {
ret = _regmap_write(map, reg, tmp);
- *change = true;
+ if (change)
+ *change = true;
} else {
- *change = false;
+ if (change)
+ *change = false;
}
return ret;
@@ -1987,11 +2213,10 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
int regmap_update_bits(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val)
{
- bool change;
int ret;
map->lock(map->lock_arg);
- ret = _regmap_update_bits(map, reg, mask, val, &change);
+ ret = _regmap_update_bits(map, reg, mask, val, NULL);
map->unlock(map->lock_arg);
return ret;
@@ -2016,14 +2241,13 @@ EXPORT_SYMBOL_GPL(regmap_update_bits);
int regmap_update_bits_async(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val)
{
- bool change;
int ret;
map->lock(map->lock_arg);
map->async = true;
- ret = _regmap_update_bits(map, reg, mask, val, &change);
+ ret = _regmap_update_bits(map, reg, mask, val, NULL);
map->async = false;
@@ -2173,35 +2397,21 @@ EXPORT_SYMBOL_GPL(regmap_async_complete);
* apply them immediately. Typically this is used to apply
* corrections to be applied to the device defaults on startup, such
* as the updates some vendors provide to undocumented registers.
+ *
+ * The caller must ensure that this function cannot be called
+ * concurrently with either itself or regcache_sync().
*/
int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
int num_regs)
{
struct reg_default *p;
- int i, ret;
+ int ret;
bool bypass;
if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n",
num_regs))
return 0;
- map->lock(map->lock_arg);
-
- bypass = map->cache_bypass;
-
- map->cache_bypass = true;
- map->async = true;
-
- /* Write out first; it's useful to apply even if we fail later. */
- for (i = 0; i < num_regs; i++) {
- ret = _regmap_write(map, regs[i].reg, regs[i].def);
- if (ret != 0) {
- dev_err(map->dev, "Failed to write %x = %x: %d\n",
- regs[i].reg, regs[i].def, ret);
- goto out;
- }
- }
-
p = krealloc(map->patch,
sizeof(struct reg_default) * (map->patch_regs + num_regs),
GFP_KERNEL);
@@ -2210,9 +2420,20 @@ int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
map->patch = p;
map->patch_regs += num_regs;
} else {
- ret = -ENOMEM;
+ return -ENOMEM;
}
+ map->lock(map->lock_arg);
+
+ bypass = map->cache_bypass;
+
+ map->cache_bypass = true;
+ map->async = true;
+
+ ret = _regmap_multi_reg_write(map, regs, num_regs);
+ if (ret != 0)
+ goto out;
+
out:
map->async = false;
map->cache_bypass = bypass;
@@ -2240,6 +2461,18 @@ int regmap_get_val_bytes(struct regmap *map)
}
EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
+int regmap_parse_val(struct regmap *map, const void *buf,
+ unsigned int *val)
+{
+ if (!map->format.parse_val)
+ return -EINVAL;
+
+ *val = map->format.parse_val(buf);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(regmap_parse_val);
+
static int __init regmap_initcall(void)
{
regmap_debugfs_initcall();
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index 94ffee378f10..ad9d17762664 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -23,7 +23,6 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
-#include <linux/init.h>
#include <linux/mm.h>
#include <linux/cpu.h>
#include <linux/module.h>