aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/Kconfig11
-rw-r--r--drivers/base/Makefile1
-rw-r--r--drivers/base/arch_numa.c68
-rw-r--r--drivers/base/arch_topology.c178
-rw-r--r--drivers/base/auxiliary.c152
-rw-r--r--drivers/base/base.h4
-rw-r--r--drivers/base/bus.c8
-rw-r--r--drivers/base/cacheinfo.c145
-rw-r--r--drivers/base/class.c4
-rw-r--r--drivers/base/component.c300
-rw-r--r--drivers/base/core.c192
-rw-r--r--drivers/base/cpu.c18
-rw-r--r--drivers/base/dd.c179
-rw-r--r--drivers/base/devcoredump.c83
-rw-r--r--drivers/base/devres.c8
-rw-r--r--drivers/base/devtmpfs.c22
-rw-r--r--drivers/base/driver.c76
-rw-r--r--drivers/base/firmware_loader/Kconfig46
-rw-r--r--drivers/base/firmware_loader/Makefile2
-rw-r--r--drivers/base/firmware_loader/builtin/Makefile4
-rw-r--r--drivers/base/firmware_loader/fallback.c425
-rw-r--r--drivers/base/firmware_loader/fallback.h35
-rw-r--r--drivers/base/firmware_loader/fallback_table.c25
-rw-r--r--drivers/base/firmware_loader/firmware.h16
-rw-r--r--drivers/base/firmware_loader/main.c115
-rw-r--r--drivers/base/firmware_loader/sysfs.c419
-rw-r--r--drivers/base/firmware_loader/sysfs.h122
-rw-r--r--drivers/base/firmware_loader/sysfs_upload.c407
-rw-r--r--drivers/base/firmware_loader/sysfs_upload.h41
-rw-r--r--drivers/base/init.c3
-rw-r--r--drivers/base/memory.c160
-rw-r--r--drivers/base/node.c184
-rw-r--r--drivers/base/physical_location.c143
-rw-r--r--drivers/base/physical_location.h16
-rw-r--r--drivers/base/platform-msi.c235
-rw-r--r--drivers/base/platform.c70
-rw-r--r--drivers/base/power/common.c8
-rw-r--r--drivers/base/power/domain.c329
-rw-r--r--drivers/base/power/domain_governor.c65
-rw-r--r--drivers/base/power/main.c16
-rw-r--r--drivers/base/power/runtime.c105
-rw-r--r--drivers/base/power/trace.c6
-rw-r--r--drivers/base/power/wakeirq.c2
-rw-r--r--drivers/base/power/wakeup.c78
-rw-r--r--drivers/base/property.c355
-rw-r--r--drivers/base/regmap/internal.h6
-rw-r--r--drivers/base/regmap/regcache.c15
-rw-r--r--drivers/base/regmap/regmap-i3c.c2
-rw-r--r--drivers/base/regmap/regmap-irq.c466
-rw-r--r--drivers/base/regmap/regmap-mmio.c289
-rw-r--r--drivers/base/regmap/regmap-sccb.c2
-rw-r--r--drivers/base/regmap/regmap-sdw-mbq.c2
-rw-r--r--drivers/base/regmap/regmap-sdw.c2
-rw-r--r--drivers/base/regmap/regmap-slimbus.c2
-rw-r--r--drivers/base/regmap/regmap-spi-avmm.c14
-rw-r--r--drivers/base/regmap/regmap-spi.c8
-rw-r--r--drivers/base/regmap/regmap-w1.c6
-rw-r--r--drivers/base/regmap/regmap.c288
-rw-r--r--drivers/base/regmap/trace.h61
-rw-r--r--drivers/base/soc.c14
-rw-r--r--drivers/base/test/test_async_driver_probe.c14
-rw-r--r--drivers/base/topology.c90
62 files changed, 4204 insertions, 1958 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index ffcbe2bc460e..6f04b831a5c0 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -62,6 +62,17 @@ config DEVTMPFS_MOUNT
rescue mode with init=/bin/sh, even when the /dev directory
on the rootfs is completely empty.
+config DEVTMPFS_SAFE
+ bool "Use nosuid,noexec mount options on devtmpfs"
+ depends on DEVTMPFS
+ help
+ This instructs the kernel to include the MS_NOEXEC and MS_NOSUID mount
+ flags when mounting devtmpfs.
+
+ Notice: If enabled, things like /dev/mem cannot be mmapped
+ with the PROT_EXEC flag. This can break, for example, non-KMS
+ video drivers.
+
config STANDALONE
bool "Select only drivers that don't need compile-time external firmware"
default y
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 02f7f1358e86..83217d243c25 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_DEV_COREDUMP) += devcoredump.o
obj-$(CONFIG_GENERIC_MSI_IRQ_DOMAIN) += platform-msi.o
obj-$(CONFIG_GENERIC_ARCH_TOPOLOGY) += arch_topology.o
obj-$(CONFIG_GENERIC_ARCH_NUMA) += arch_numa.o
+obj-$(CONFIG_ACPI) += physical_location.o
obj-y += test/
diff --git a/drivers/base/arch_numa.c b/drivers/base/arch_numa.c
index bc1876915457..eaa31e567d1e 100644
--- a/drivers/base/arch_numa.c
+++ b/drivers/base/arch_numa.c
@@ -14,7 +14,6 @@
#include <linux/of.h>
#include <asm/sections.h>
-#include <asm/pgalloc.h>
struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
EXPORT_SYMBOL(node_data);
@@ -155,66 +154,6 @@ static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
return node_distance(early_cpu_to_node(from), early_cpu_to_node(to));
}
-static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size,
- size_t align)
-{
- int nid = early_cpu_to_node(cpu);
-
- return memblock_alloc_try_nid(size, align,
- __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
-}
-
-static void __init pcpu_fc_free(void *ptr, size_t size)
-{
- memblock_free(ptr, size);
-}
-
-#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
-static void __init pcpu_populate_pte(unsigned long addr)
-{
- pgd_t *pgd = pgd_offset_k(addr);
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
-
- p4d = p4d_offset(pgd, addr);
- if (p4d_none(*p4d)) {
- pud_t *new;
-
- new = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!new)
- goto err_alloc;
- p4d_populate(&init_mm, p4d, new);
- }
-
- pud = pud_offset(p4d, addr);
- if (pud_none(*pud)) {
- pmd_t *new;
-
- new = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!new)
- goto err_alloc;
- pud_populate(&init_mm, pud, new);
- }
-
- pmd = pmd_offset(pud, addr);
- if (!pmd_present(*pmd)) {
- pte_t *new;
-
- new = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!new)
- goto err_alloc;
- pmd_populate_kernel(&init_mm, pmd, new);
- }
-
- return;
-
-err_alloc:
- panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n",
- __func__, PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
-}
-#endif
-
void __init setup_per_cpu_areas(void)
{
unsigned long delta;
@@ -229,7 +168,7 @@ void __init setup_per_cpu_areas(void)
rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
pcpu_cpu_distance,
- pcpu_fc_alloc, pcpu_fc_free);
+ early_cpu_to_node);
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
if (rc < 0)
pr_warn("PERCPU: %s allocator failed (%d), falling back to page size\n",
@@ -239,10 +178,7 @@ void __init setup_per_cpu_areas(void)
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
if (rc < 0)
- rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE,
- pcpu_fc_alloc,
- pcpu_fc_free,
- pcpu_populate_pte);
+ rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, early_cpu_to_node);
#endif
if (rc < 0)
panic("Failed to initialize percpu areas (err=%d).", rc);
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 976154140f0b..e7d6e6657ffa 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -7,6 +7,7 @@
*/
#include <linux/acpi.h>
+#include <linux/cacheinfo.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/device.h>
@@ -19,6 +20,9 @@
#include <linux/rcupdate.h>
#include <linux/sched.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/thermal_pressure.h>
+
static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
static struct cpumask scale_freq_counters_mask;
static bool scale_freq_invariant;
@@ -195,6 +199,8 @@ void topology_update_thermal_pressure(const struct cpumask *cpus,
th_pressure = max_capacity - capacity;
+ trace_thermal_pressure_update(cpu, th_pressure);
+
for_each_cpu(cpu, cpus)
WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
}
@@ -339,6 +345,46 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
return !ret;
}
+#ifdef CONFIG_ACPI_CPPC_LIB
+#include <acpi/cppc_acpi.h>
+
+void topology_init_cpu_capacity_cppc(void)
+{
+ struct cppc_perf_caps perf_caps;
+ int cpu;
+
+ if (likely(!acpi_cpc_valid()))
+ return;
+
+ raw_capacity = kcalloc(num_possible_cpus(), sizeof(*raw_capacity),
+ GFP_KERNEL);
+ if (!raw_capacity)
+ return;
+
+ for_each_possible_cpu(cpu) {
+ if (!cppc_get_perf_caps(cpu, &perf_caps) &&
+ (perf_caps.highest_perf >= perf_caps.nominal_perf) &&
+ (perf_caps.highest_perf >= perf_caps.lowest_perf)) {
+ raw_capacity[cpu] = perf_caps.highest_perf;
+ pr_debug("cpu_capacity: CPU%d cpu_capacity=%u (raw).\n",
+ cpu, raw_capacity[cpu]);
+ continue;
+ }
+
+ pr_err("cpu_capacity: CPU%d missing/invalid highest performance.\n", cpu);
+ pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
+ goto exit;
+ }
+
+ topology_normalize_cpu_scale();
+ schedule_work(&update_topology_flags_work);
+ pr_debug("cpu_capacity: cpu_capacity initialization done\n");
+
+exit:
+ free_raw_capacity();
+}
+#endif
+
#ifdef CONFIG_CPU_FREQ
static cpumask_var_t cpus_to_visit;
static void parsing_done_workfn(struct work_struct *work);
@@ -387,9 +433,8 @@ static int __init register_cpufreq_notifier(void)
int ret;
/*
- * on ACPI-based systems we need to use the default cpu capacity
- * until we have the necessary code to parse the cpu capacity, so
- * skip registering cpufreq notifier.
+ * On ACPI-based systems skip registering cpufreq notifier as cpufreq
+ * information is not needed for cpu capacity initialization.
*/
if (!acpi_disabled || !raw_capacity)
return -EINVAL;
@@ -452,7 +497,7 @@ static int __init get_cpu_for_node(struct device_node *node)
}
static int __init parse_core(struct device_node *core, int package_id,
- int core_id)
+ int cluster_id, int core_id)
{
char name[20];
bool leaf = true;
@@ -468,6 +513,7 @@ static int __init parse_core(struct device_node *core, int package_id,
cpu = get_cpu_for_node(t);
if (cpu >= 0) {
cpu_topology[cpu].package_id = package_id;
+ cpu_topology[cpu].cluster_id = cluster_id;
cpu_topology[cpu].core_id = core_id;
cpu_topology[cpu].thread_id = i;
} else if (cpu != -ENODEV) {
@@ -489,6 +535,7 @@ static int __init parse_core(struct device_node *core, int package_id,
}
cpu_topology[cpu].package_id = package_id;
+ cpu_topology[cpu].cluster_id = cluster_id;
cpu_topology[cpu].core_id = core_id;
} else if (leaf && cpu != -ENODEV) {
pr_err("%pOF: Can't get CPU for leaf core\n", core);
@@ -498,13 +545,13 @@ static int __init parse_core(struct device_node *core, int package_id,
return 0;
}
-static int __init parse_cluster(struct device_node *cluster, int depth)
+static int __init parse_cluster(struct device_node *cluster, int package_id,
+ int cluster_id, int depth)
{
char name[20];
bool leaf = true;
bool has_cores = false;
struct device_node *c;
- static int package_id __initdata;
int core_id = 0;
int i, ret;
@@ -519,7 +566,9 @@ static int __init parse_cluster(struct device_node *cluster, int depth)
c = of_get_child_by_name(cluster, name);
if (c) {
leaf = false;
- ret = parse_cluster(c, depth + 1);
+ ret = parse_cluster(c, package_id, i, depth + 1);
+ if (depth > 0)
+ pr_warn("Topology for clusters of clusters not yet supported\n");
of_node_put(c);
if (ret != 0)
return ret;
@@ -543,7 +592,8 @@ static int __init parse_cluster(struct device_node *cluster, int depth)
}
if (leaf) {
- ret = parse_core(c, package_id, core_id++);
+ ret = parse_core(c, package_id, cluster_id,
+ core_id++);
} else {
pr_err("%pOF: Non-leaf cluster with core %s\n",
cluster, name);
@@ -560,10 +610,33 @@ static int __init parse_cluster(struct device_node *cluster, int depth)
if (leaf && !has_cores)
pr_warn("%pOF: empty cluster\n", cluster);
- if (leaf)
+ return 0;
+}
+
+static int __init parse_socket(struct device_node *socket)
+{
+ char name[20];
+ struct device_node *c;
+ bool has_socket = false;
+ int package_id = 0, ret;
+
+ do {
+ snprintf(name, sizeof(name), "socket%d", package_id);
+ c = of_get_child_by_name(socket, name);
+ if (c) {
+ has_socket = true;
+ ret = parse_cluster(c, package_id, -1, 0);
+ of_node_put(c);
+ if (ret != 0)
+ return ret;
+ }
package_id++;
+ } while (c);
- return 0;
+ if (!has_socket)
+ ret = parse_cluster(socket, 0, -1, 0);
+
+ return ret;
}
static int __init parse_dt_topology(void)
@@ -586,7 +659,7 @@ static int __init parse_dt_topology(void)
if (!map)
goto out;
- ret = parse_cluster(map, 0);
+ ret = parse_socket(map);
if (ret != 0)
goto out_map;
@@ -597,8 +670,10 @@ static int __init parse_dt_topology(void)
* only mark cores described in the DT as possible.
*/
for_each_possible_cpu(cpu)
- if (cpu_topology[cpu].package_id == -1)
+ if (cpu_topology[cpu].package_id < 0) {
ret = -EINVAL;
+ break;
+ }
out_map:
of_node_put(map);
@@ -623,29 +698,51 @@ const struct cpumask *cpu_coregroup_mask(int cpu)
/* not numa in package, lets use the package siblings */
core_mask = &cpu_topology[cpu].core_sibling;
}
- if (cpu_topology[cpu].llc_id != -1) {
+
+ if (last_level_cache_is_valid(cpu)) {
if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask))
core_mask = &cpu_topology[cpu].llc_sibling;
}
+ /*
+ * For systems with no shared cpu-side LLC but with clusters defined,
+ * extend core_mask to cluster_siblings. The sched domain builder will
+ * then remove MC as redundant with CLS if SCHED_CLUSTER is enabled.
+ */
+ if (IS_ENABLED(CONFIG_SCHED_CLUSTER) &&
+ cpumask_subset(core_mask, &cpu_topology[cpu].cluster_sibling))
+ core_mask = &cpu_topology[cpu].cluster_sibling;
+
return core_mask;
}
const struct cpumask *cpu_clustergroup_mask(int cpu)
{
+ /*
+ * Forbid cpu_clustergroup_mask() to span more or the same CPUs as
+ * cpu_coregroup_mask().
+ */
+ if (cpumask_subset(cpu_coregroup_mask(cpu),
+ &cpu_topology[cpu].cluster_sibling))
+ return topology_sibling_cpumask(cpu);
+
return &cpu_topology[cpu].cluster_sibling;
}
void update_siblings_masks(unsigned int cpuid)
{
struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
- int cpu;
+ int cpu, ret;
+
+ ret = detect_cache_attributes(cpuid);
+ if (ret && ret != -ENOENT)
+ pr_info("Early cacheinfo failed, ret = %d\n", ret);
/* update core and thread sibling masks */
for_each_online_cpu(cpu) {
cpu_topo = &cpu_topology[cpu];
- if (cpuid_topo->llc_id == cpu_topo->llc_id) {
+ if (last_level_cache_is_shared(cpu, cpuid)) {
cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
}
@@ -653,15 +750,17 @@ void update_siblings_masks(unsigned int cpuid)
if (cpuid_topo->package_id != cpu_topo->package_id)
continue;
- if (cpuid_topo->cluster_id == cpu_topo->cluster_id &&
- cpuid_topo->cluster_id != -1) {
+ cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
+ cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
+
+ if (cpuid_topo->cluster_id != cpu_topo->cluster_id)
+ continue;
+
+ if (cpuid_topo->cluster_id >= 0) {
cpumask_set_cpu(cpu, &cpuid_topo->cluster_sibling);
cpumask_set_cpu(cpuid, &cpu_topo->cluster_sibling);
}
- cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
- cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
-
if (cpuid_topo->core_id != cpu_topo->core_id)
continue;
@@ -697,7 +796,6 @@ void __init reset_cpu_topology(void)
cpu_topo->core_id = -1;
cpu_topo->cluster_id = -1;
cpu_topo->package_id = -1;
- cpu_topo->llc_id = -1;
clear_cpu_topology(cpu);
}
@@ -727,15 +825,39 @@ __weak int __init parse_acpi_topology(void)
#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
void __init init_cpu_topology(void)
{
+ int ret;
+
reset_cpu_topology();
+ ret = parse_acpi_topology();
+ if (!ret)
+ ret = of_have_populated_dt() && parse_dt_topology();
- /*
- * Discard anything that was parsed if we hit an error so we
- * don't use partial information.
- */
- if (parse_acpi_topology())
- reset_cpu_topology();
- else if (of_have_populated_dt() && parse_dt_topology())
+ if (ret) {
+ /*
+ * Discard anything that was parsed if we hit an error so we
+ * don't use partial information.
+ */
reset_cpu_topology();
+ return;
+ }
+}
+
+void store_cpu_topology(unsigned int cpuid)
+{
+ struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
+
+ if (cpuid_topo->package_id != -1)
+ goto topology_populated;
+
+ cpuid_topo->thread_id = -1;
+ cpuid_topo->core_id = cpuid;
+ cpuid_topo->package_id = cpu_to_node(cpuid);
+
+ pr_debug("CPU%u: package %d core %d thread %d\n",
+ cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
+ cpuid_topo->thread_id);
+
+topology_populated:
+ update_siblings_masks(cpuid);
}
#endif
diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c
index 9230c9472bb0..8c5e65930617 100644
--- a/drivers/base/auxiliary.c
+++ b/drivers/base/auxiliary.c
@@ -17,6 +17,147 @@
#include <linux/auxiliary_bus.h>
#include "base.h"
+/**
+ * DOC: PURPOSE
+ *
+ * In some subsystems, the functionality of the core device (PCI/ACPI/other) is
+ * too complex for a single device to be managed by a monolithic driver (e.g.
+ * Sound Open Firmware), multiple devices might implement a common intersection
+ * of functionality (e.g. NICs + RDMA), or a driver may want to export an
+ * interface for another subsystem to drive (e.g. SIOV Physical Function export
+ * Virtual Function management). A split of the functionality into child-
+ * devices representing sub-domains of functionality makes it possible to
+ * compartmentalize, layer, and distribute domain-specific concerns via a Linux
+ * device-driver model.
+ *
+ * An example for this kind of requirement is the audio subsystem where a
+ * single IP is handling multiple entities such as HDMI, Soundwire, local
+ * devices such as mics/speakers etc. The split for the core's functionality
+ * can be arbitrary or be defined by the DSP firmware topology and include
+ * hooks for test/debug. This allows for the audio core device to be minimal
+ * and focused on hardware-specific control and communication.
+ *
+ * Each auxiliary_device represents a part of its parent functionality. The
+ * generic behavior can be extended and specialized as needed by encapsulating
+ * an auxiliary_device within other domain-specific structures and the use of
+ * .ops callbacks. Devices on the auxiliary bus do not share any structures and
+ * the use of a communication channel with the parent is domain-specific.
+ *
+ * Note that ops are intended as a way to augment instance behavior within a
+ * class of auxiliary devices, it is not the mechanism for exporting common
+ * infrastructure from the parent. Consider EXPORT_SYMBOL_NS() to convey
+ * infrastructure from the parent module to the auxiliary module(s).
+ */
+
+/**
+ * DOC: USAGE
+ *
+ * The auxiliary bus is to be used when a driver and one or more kernel
+ * modules, who share a common header file with the driver, need a mechanism to
+ * connect and provide access to a shared object allocated by the
+ * auxiliary_device's registering driver. The registering driver for the
+ * auxiliary_device(s) and the kernel module(s) registering auxiliary_drivers
+ * can be from the same subsystem, or from multiple subsystems.
+ *
+ * The emphasis here is on a common generic interface that keeps subsystem
+ * customization out of the bus infrastructure.
+ *
+ * One example is a PCI network device that is RDMA-capable and exports a child
+ * device to be driven by an auxiliary_driver in the RDMA subsystem. The PCI
+ * driver allocates and registers an auxiliary_device for each physical
+ * function on the NIC. The RDMA driver registers an auxiliary_driver that
+ * claims each of these auxiliary_devices. This conveys data/ops published by
+ * the parent PCI device/driver to the RDMA auxiliary_driver.
+ *
+ * Another use case is for the PCI device to be split out into multiple sub
+ * functions. For each sub function an auxiliary_device is created. A PCI sub
+ * function driver binds to such devices that creates its own one or more class
+ * devices. A PCI sub function auxiliary device is likely to be contained in a
+ * struct with additional attributes such as user defined sub function number
+ * and optional attributes such as resources and a link to the parent device.
+ * These attributes could be used by systemd/udev; and hence should be
+ * initialized before a driver binds to an auxiliary_device.
+ *
+ * A key requirement for utilizing the auxiliary bus is that there is no
+ * dependency on a physical bus, device, register accesses or regmap support.
+ * These individual devices split from the core cannot live on the platform bus
+ * as they are not physical devices that are controlled by DT/ACPI. The same
+ * argument applies for not using MFD in this scenario as MFD relies on
+ * individual function devices being physical devices.
+ */
+
+/**
+ * DOC: EXAMPLE
+ *
+ * Auxiliary devices are created and registered by a subsystem-level core
+ * device that needs to break up its functionality into smaller fragments. One
+ * way to extend the scope of an auxiliary_device is to encapsulate it within a
+ * domain- pecific structure defined by the parent device. This structure
+ * contains the auxiliary_device and any associated shared data/callbacks
+ * needed to establish the connection with the parent.
+ *
+ * An example is:
+ *
+ * .. code-block:: c
+ *
+ * struct foo {
+ * struct auxiliary_device auxdev;
+ * void (*connect)(struct auxiliary_device *auxdev);
+ * void (*disconnect)(struct auxiliary_device *auxdev);
+ * void *data;
+ * };
+ *
+ * The parent device then registers the auxiliary_device by calling
+ * auxiliary_device_init(), and then auxiliary_device_add(), with the pointer
+ * to the auxdev member of the above structure. The parent provides a name for
+ * the auxiliary_device that, combined with the parent's KBUILD_MODNAME,
+ * creates a match_name that is be used for matching and binding with a driver.
+ *
+ * Whenever an auxiliary_driver is registered, based on the match_name, the
+ * auxiliary_driver's probe() is invoked for the matching devices. The
+ * auxiliary_driver can also be encapsulated inside custom drivers that make
+ * the core device's functionality extensible by adding additional
+ * domain-specific ops as follows:
+ *
+ * .. code-block:: c
+ *
+ * struct my_ops {
+ * void (*send)(struct auxiliary_device *auxdev);
+ * void (*receive)(struct auxiliary_device *auxdev);
+ * };
+ *
+ *
+ * struct my_driver {
+ * struct auxiliary_driver auxiliary_drv;
+ * const struct my_ops ops;
+ * };
+ *
+ * An example of this type of usage is:
+ *
+ * .. code-block:: c
+ *
+ * const struct auxiliary_device_id my_auxiliary_id_table[] = {
+ * { .name = "foo_mod.foo_dev" },
+ * { },
+ * };
+ *
+ * const struct my_ops my_custom_ops = {
+ * .send = my_tx,
+ * .receive = my_rx,
+ * };
+ *
+ * const struct my_driver my_drv = {
+ * .auxiliary_drv = {
+ * .name = "myauxiliarydrv",
+ * .id_table = my_auxiliary_id_table,
+ * .probe = my_probe,
+ * .remove = my_remove,
+ * .shutdown = my_shutdown,
+ * },
+ * .ops = my_custom_ops,
+ * };
+ */
+
static const struct auxiliary_device_id *auxiliary_match_id(const struct auxiliary_device_id *id,
const struct auxiliary_device *auxdev)
{
@@ -117,7 +258,7 @@ static struct bus_type auxiliary_bus_type = {
* auxiliary_device_init - check auxiliary_device and initialize
* @auxdev: auxiliary device struct
*
- * This is the first step in the two-step process to register an
+ * This is the second step in the three-step process to register an
* auxiliary_device.
*
* When this function returns an error code, then the device_initialize will
@@ -155,7 +296,7 @@ EXPORT_SYMBOL_GPL(auxiliary_device_init);
* @auxdev: auxiliary bus device to add to the bus
* @modname: name of the parent device's driver module
*
- * This is the second step in the two-step process to register an
+ * This is the third step in the three-step process to register an
* auxiliary_device.
*
* This function must be called after a successful call to
@@ -202,6 +343,8 @@ EXPORT_SYMBOL_GPL(__auxiliary_device_add);
* This function returns a reference to a device that is 'found'
* for later use, as determined by the @match callback.
*
+ * The reference returned should be released with put_device().
+ *
* The callback should return 0 if the device doesn't match and non-zero
* if it does. If the callback returns non-zero, this function will
* return to the caller and not iterate over any more devices.
@@ -225,6 +368,11 @@ EXPORT_SYMBOL_GPL(auxiliary_find_device);
* @auxdrv: auxiliary_driver structure
* @owner: owning module/driver
* @modname: KBUILD_MODNAME for parent driver
+ *
+ * The expectation is that users will call the "auxiliary_driver_register"
+ * macro so that the caller's KBUILD_MODNAME is automatically inserted for the
+ * modname parameter. Only if a user requires a custom name would this version
+ * be called directly.
*/
int __auxiliary_driver_register(struct auxiliary_driver *auxdrv,
struct module *owner, const char *modname)
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 2882af26392a..b902d1ecc247 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -154,11 +154,11 @@ extern void driver_remove_groups(struct device_driver *drv,
const struct attribute_group **groups);
void device_driver_detach(struct device *dev);
-extern char *make_class_name(const char *name, struct kobject *kobj);
-
extern int devres_release_all(struct device *dev);
extern void device_block_probing(void);
extern void device_unblock_probing(void);
+extern void deferred_probe_extend_timeout(void);
+extern void driver_deferred_probe_trigger(void);
/* /sys/devices directory */
extern struct kset *devices_kset;
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index bdc98c5713d5..7ca47e5b3c1f 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -163,9 +163,9 @@ static struct kobj_type bus_ktype = {
.release = bus_release,
};
-static int bus_uevent_filter(struct kset *kset, struct kobject *kobj)
+static int bus_uevent_filter(struct kobject *kobj)
{
- struct kobj_type *ktype = get_ktype(kobj);
+ const struct kobj_type *ktype = get_ktype(kobj);
if (ktype == &bus_ktype)
return 1;
@@ -617,7 +617,7 @@ int bus_add_driver(struct device_driver *drv)
if (drv->bus->p->drivers_autoprobe) {
error = driver_attach(drv);
if (error)
- goto out_unregister;
+ goto out_del_list;
}
module_add_driver(drv->owner, drv);
@@ -644,6 +644,8 @@ int bus_add_driver(struct device_driver *drv)
return 0;
+out_del_list:
+ klist_del(&priv->knode_bus);
out_unregister:
kobject_put(&priv->kobj);
/* drv->p is freed in driver_release() */
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index dad296229161..4b5cd08c5a65 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -14,7 +14,7 @@
#include <linux/cpu.h>
#include <linux/device.h>
#include <linux/init.h>
-#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/smp.h>
@@ -25,19 +25,60 @@ static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
#define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu))
#define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves)
#define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list)
+#define per_cpu_cacheinfo_idx(cpu, idx) \
+ (per_cpu_cacheinfo(cpu) + (idx))
struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
{
return ci_cacheinfo(cpu);
}
-#ifdef CONFIG_OF
static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
struct cacheinfo *sib_leaf)
{
+ /*
+ * For non DT/ACPI systems, assume unique level 1 caches,
+ * system-wide shared caches for all other levels. This will be used
+ * only if arch specific code has not populated shared_cpu_map
+ */
+ if (!(IS_ENABLED(CONFIG_OF) || IS_ENABLED(CONFIG_ACPI)))
+ return !(this_leaf->level == 1);
+
+ if ((sib_leaf->attributes & CACHE_ID) &&
+ (this_leaf->attributes & CACHE_ID))
+ return sib_leaf->id == this_leaf->id;
+
return sib_leaf->fw_token == this_leaf->fw_token;
}
+bool last_level_cache_is_valid(unsigned int cpu)
+{
+ struct cacheinfo *llc;
+
+ if (!cache_leaves(cpu))
+ return false;
+
+ llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
+
+ return (llc->attributes & CACHE_ID) || !!llc->fw_token;
+
+}
+
+bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y)
+{
+ struct cacheinfo *llc_x, *llc_y;
+
+ if (!last_level_cache_is_valid(cpu_x) ||
+ !last_level_cache_is_valid(cpu_y))
+ return false;
+
+ llc_x = per_cpu_cacheinfo_idx(cpu_x, cache_leaves(cpu_x) - 1);
+ llc_y = per_cpu_cacheinfo_idx(cpu_y, cache_leaves(cpu_y) - 1);
+
+ return cache_leaves_are_shared(llc_x, llc_y);
+}
+
+#ifdef CONFIG_OF
/* OF properties to query for a given cache type */
struct cache_type_info {
const char *size_prop;
@@ -157,27 +198,16 @@ static int cache_setup_of_node(unsigned int cpu)
{
struct device_node *np;
struct cacheinfo *this_leaf;
- struct device *cpu_dev = get_cpu_device(cpu);
- struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
unsigned int index = 0;
- /* skip if fw_token is already populated */
- if (this_cpu_ci->info_list->fw_token) {
- return 0;
- }
-
- if (!cpu_dev) {
- pr_err("No cpu device for CPU %d\n", cpu);
- return -ENODEV;
- }
- np = cpu_dev->of_node;
+ np = of_cpu_device_node_get(cpu);
if (!np) {
pr_err("Failed to find cpu%d device node\n", cpu);
return -ENOENT;
}
while (index < cache_leaves(cpu)) {
- this_leaf = this_cpu_ci->info_list + index;
+ this_leaf = per_cpu_cacheinfo_idx(cpu, index);
if (this_leaf->level != 1)
np = of_find_next_cache_node(np);
else
@@ -196,16 +226,6 @@ static int cache_setup_of_node(unsigned int cpu)
}
#else
static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
-static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
- struct cacheinfo *sib_leaf)
-{
- /*
- * For non-DT/ACPI systems, assume unique level 1 caches, system-wide
- * shared caches for all other levels. This will be used only if
- * arch specific code has not populated shared_cpu_map
- */
- return !(this_leaf->level == 1);
-}
#endif
int __weak cache_setup_acpi(unsigned int cpu)
@@ -215,6 +235,18 @@ int __weak cache_setup_acpi(unsigned int cpu)
unsigned int coherency_max_size;
+static int cache_setup_properties(unsigned int cpu)
+{
+ int ret = 0;
+
+ if (of_have_populated_dt())
+ ret = cache_setup_of_node(cpu);
+ else if (!acpi_disabled)
+ ret = cache_setup_acpi(cpu);
+
+ return ret;
+}
+
static int cache_shared_cpu_map_setup(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
@@ -225,21 +257,21 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
if (this_cpu_ci->cpu_map_populated)
return 0;
- if (of_have_populated_dt())
- ret = cache_setup_of_node(cpu);
- else if (!acpi_disabled)
- ret = cache_setup_acpi(cpu);
-
- if (ret)
- return ret;
+ /*
+ * skip setting up cache properties if LLC is valid, just need
+ * to update the shared cpu_map if the cache attributes were
+ * populated early before all the cpus are brought online
+ */
+ if (!last_level_cache_is_valid(cpu)) {
+ ret = cache_setup_properties(cpu);
+ if (ret)
+ return ret;
+ }
for (index = 0; index < cache_leaves(cpu); index++) {
unsigned int i;
- this_leaf = this_cpu_ci->info_list + index;
- /* skip if shared_cpu_map is already populated */
- if (!cpumask_empty(&this_leaf->shared_cpu_map))
- continue;
+ this_leaf = per_cpu_cacheinfo_idx(cpu, index);
cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
for_each_online_cpu(i) {
@@ -247,7 +279,8 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
if (i == cpu || !sib_cpu_ci->info_list)
continue;/* skip if itself or no cacheinfo */
- sib_leaf = sib_cpu_ci->info_list + index;
+
+ sib_leaf = per_cpu_cacheinfo_idx(i, index);
if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
@@ -263,23 +296,19 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
static void cache_shared_cpu_map_remove(unsigned int cpu)
{
- struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf, *sib_leaf;
unsigned int sibling, index;
for (index = 0; index < cache_leaves(cpu); index++) {
- this_leaf = this_cpu_ci->info_list + index;
+ this_leaf = per_cpu_cacheinfo_idx(cpu, index);
for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
- struct cpu_cacheinfo *sib_cpu_ci;
-
- if (sibling == cpu) /* skip itself */
- continue;
+ struct cpu_cacheinfo *sib_cpu_ci =
+ get_cpu_cacheinfo(sibling);
- sib_cpu_ci = get_cpu_cacheinfo(sibling);
- if (!sib_cpu_ci->info_list)
- continue;
+ if (sibling == cpu || !sib_cpu_ci->info_list)
+ continue;/* skip if itself or no cacheinfo */
- sib_leaf = sib_cpu_ci->info_list + index;
+ sib_leaf = per_cpu_cacheinfo_idx(sibling, index);
cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
}
@@ -310,17 +339,28 @@ int __weak populate_cache_leaves(unsigned int cpu)
return -ENOENT;
}
-static int detect_cache_attributes(unsigned int cpu)
+int detect_cache_attributes(unsigned int cpu)
{
int ret;
+ /* Since early detection of the cacheinfo is allowed via this
+ * function and this also gets called as CPU hotplug callbacks via
+ * cacheinfo_cpu_online, the initialisation can be skipped and only
+ * CPU maps can be updated as the CPU online status would be update
+ * if called via cacheinfo_cpu_online path.
+ */
+ if (per_cpu_cacheinfo(cpu))
+ goto update_cpu_map;
+
if (init_cache_level(cpu) || !cache_leaves(cpu))
return -ENOENT;
per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
- sizeof(struct cacheinfo), GFP_KERNEL);
- if (per_cpu_cacheinfo(cpu) == NULL)
+ sizeof(struct cacheinfo), GFP_ATOMIC);
+ if (per_cpu_cacheinfo(cpu) == NULL) {
+ cache_leaves(cpu) = 0;
return -ENOMEM;
+ }
/*
* populate_cache_leaves() may completely setup the cache leaves and
@@ -329,6 +369,8 @@ static int detect_cache_attributes(unsigned int cpu)
ret = populate_cache_leaves(cpu);
if (ret)
goto free_ci;
+
+update_cpu_map:
/*
* For systems using DT for cache hierarchy, fw_token
* and shared_cpu_map will be set up here only if they are
@@ -614,7 +656,6 @@ static int cache_add_dev(unsigned int cpu)
int rc;
struct device *ci_dev, *parent;
struct cacheinfo *this_leaf;
- struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
const struct attribute_group **cache_groups;
rc = cpu_cache_sysfs_init(cpu);
@@ -623,7 +664,7 @@ static int cache_add_dev(unsigned int cpu)
parent = per_cpu_cache_dev(cpu);
for (i = 0; i < cache_leaves(cpu); i++) {
- this_leaf = this_cpu_ci->info_list + i;
+ this_leaf = per_cpu_cacheinfo_idx(cpu, i);
if (this_leaf->disable_sysfs)
continue;
if (this_leaf->type == CACHE_TYPE_NOCACHE)
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 7476f393df97..64f7b9a0970f 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -16,7 +16,7 @@
#include <linux/kdev_t.h>
#include <linux/err.h>
#include <linux/slab.h>
-#include <linux/genhd.h>
+#include <linux/blkdev.h>
#include <linux/mutex.h>
#include "base.h"
@@ -260,7 +260,7 @@ EXPORT_SYMBOL_GPL(__class_create);
*/
void class_destroy(struct class *cls)
{
- if ((cls == NULL) || (IS_ERR(cls)))
+ if (IS_ERR_OR_NULL(cls))
return;
class_unregister(cls);
diff --git a/drivers/base/component.c b/drivers/base/component.c
index 2d25a6416587..5eadeac6c532 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -1,16 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Componentized device handling.
- *
- * This is work in progress. We gather up the component devices into a list,
- * and bind them when instructed. At the moment, we're specific to the DRM
- * subsystem, and only handles one master device, but this doesn't have to be
- * the case.
*/
#include <linux/component.h>
#include <linux/device.h>
#include <linux/list.h>
#include <linux/mutex.h>
+#include <linux/of.h>
#include <linux/slab.h>
#include <linux/debugfs.h>
@@ -57,7 +53,7 @@ struct component_match {
struct component_match_array *compare;
};
-struct master {
+struct aggregate_device {
struct list_head node;
bool bound;
@@ -68,7 +64,7 @@ struct master {
struct component {
struct list_head node;
- struct master *master;
+ struct aggregate_device *adev;
bool bound;
const struct component_ops *ops;
@@ -78,7 +74,7 @@ struct component {
static DEFINE_MUTEX(component_mutex);
static LIST_HEAD(component_list);
-static LIST_HEAD(masters);
+static LIST_HEAD(aggregate_devices);
#ifdef CONFIG_DEBUG_FS
@@ -86,12 +82,12 @@ static struct dentry *component_debugfs_dir;
static int component_devices_show(struct seq_file *s, void *data)
{
- struct master *m = s->private;
+ struct aggregate_device *m = s->private;
struct component_match *match = m->match;
size_t i;
mutex_lock(&component_mutex);
- seq_printf(s, "%-40s %20s\n", "master name", "status");
+ seq_printf(s, "%-40s %20s\n", "aggregate_device name", "status");
seq_puts(s, "-------------------------------------------------------------\n");
seq_printf(s, "%-40s %20s\n\n",
dev_name(m->parent), m->bound ? "bound" : "not bound");
@@ -121,46 +117,46 @@ static int __init component_debug_init(void)
core_initcall(component_debug_init);
-static void component_master_debugfs_add(struct master *m)
+static void component_debugfs_add(struct aggregate_device *m)
{
debugfs_create_file(dev_name(m->parent), 0444, component_debugfs_dir, m,
&component_devices_fops);
}
-static void component_master_debugfs_del(struct master *m)
+static void component_debugfs_del(struct aggregate_device *m)
{
debugfs_remove(debugfs_lookup(dev_name(m->parent), component_debugfs_dir));
}
#else
-static void component_master_debugfs_add(struct master *m)
+static void component_debugfs_add(struct aggregate_device *m)
{ }
-static void component_master_debugfs_del(struct master *m)
+static void component_debugfs_del(struct aggregate_device *m)
{ }
#endif
-static struct master *__master_find(struct device *parent,
+static struct aggregate_device *__aggregate_find(struct device *parent,
const struct component_master_ops *ops)
{
- struct master *m;
+ struct aggregate_device *m;
- list_for_each_entry(m, &masters, node)
+ list_for_each_entry(m, &aggregate_devices, node)
if (m->parent == parent && (!ops || m->ops == ops))
return m;
return NULL;
}
-static struct component *find_component(struct master *master,
+static struct component *find_component(struct aggregate_device *adev,
struct component_match_array *mc)
{
struct component *c;
list_for_each_entry(c, &component_list, node) {
- if (c->master && c->master != master)
+ if (c->adev && c->adev != adev)
continue;
if (mc->compare && mc->compare(c->dev, mc->data))
@@ -174,102 +170,103 @@ static struct component *find_component(struct master *master,
return NULL;
}
-static int find_components(struct master *master)
+static int find_components(struct aggregate_device *adev)
{
- struct component_match *match = master->match;
+ struct component_match *match = adev->match;
size_t i;
int ret = 0;
/*
* Scan the array of match functions and attach
- * any components which are found to this master.
+ * any components which are found to this adev.
*/
for (i = 0; i < match->num; i++) {
struct component_match_array *mc = &match->compare[i];
struct component *c;
- dev_dbg(master->parent, "Looking for component %zu\n", i);
+ dev_dbg(adev->parent, "Looking for component %zu\n", i);
if (match->compare[i].component)
continue;
- c = find_component(master, mc);
+ c = find_component(adev, mc);
if (!c) {
ret = -ENXIO;
break;
}
- dev_dbg(master->parent, "found component %s, duplicate %u\n", dev_name(c->dev), !!c->master);
+ dev_dbg(adev->parent, "found component %s, duplicate %u\n",
+ dev_name(c->dev), !!c->adev);
- /* Attach this component to the master */
- match->compare[i].duplicate = !!c->master;
+ /* Attach this component to the adev */
+ match->compare[i].duplicate = !!c->adev;
match->compare[i].component = c;
- c->master = master;
+ c->adev = adev;
}
return ret;
}
-/* Detach component from associated master */
-static void remove_component(struct master *master, struct component *c)
+/* Detach component from associated aggregate_device */
+static void remove_component(struct aggregate_device *adev, struct component *c)
{
size_t i;
- /* Detach the component from this master. */
- for (i = 0; i < master->match->num; i++)
- if (master->match->compare[i].component == c)
- master->match->compare[i].component = NULL;
+ /* Detach the component from this adev. */
+ for (i = 0; i < adev->match->num; i++)
+ if (adev->match->compare[i].component == c)
+ adev->match->compare[i].component = NULL;
}
/*
- * Try to bring up a master. If component is NULL, we're interested in
- * this master, otherwise it's a component which must be present to try
- * and bring up the master.
+ * Try to bring up an aggregate device. If component is NULL, we're interested
+ * in this aggregate device, otherwise it's a component which must be present
+ * to try and bring up the aggregate device.
*
* Returns 1 for successful bringup, 0 if not ready, or -ve errno.
*/
-static int try_to_bring_up_master(struct master *master,
+static int try_to_bring_up_aggregate_device(struct aggregate_device *adev,
struct component *component)
{
int ret;
- dev_dbg(master->parent, "trying to bring up master\n");
+ dev_dbg(adev->parent, "trying to bring up adev\n");
- if (find_components(master)) {
- dev_dbg(master->parent, "master has incomplete components\n");
+ if (find_components(adev)) {
+ dev_dbg(adev->parent, "master has incomplete components\n");
return 0;
}
- if (component && component->master != master) {
- dev_dbg(master->parent, "master is not for this component (%s)\n",
+ if (component && component->adev != adev) {
+ dev_dbg(adev->parent, "master is not for this component (%s)\n",
dev_name(component->dev));
return 0;
}
- if (!devres_open_group(master->parent, master, GFP_KERNEL))
+ if (!devres_open_group(adev->parent, adev, GFP_KERNEL))
return -ENOMEM;
/* Found all components */
- ret = master->ops->bind(master->parent);
+ ret = adev->ops->bind(adev->parent);
if (ret < 0) {
- devres_release_group(master->parent, NULL);
+ devres_release_group(adev->parent, NULL);
if (ret != -EPROBE_DEFER)
- dev_info(master->parent, "master bind failed: %d\n", ret);
+ dev_info(adev->parent, "adev bind failed: %d\n", ret);
return ret;
}
- devres_close_group(master->parent, NULL);
- master->bound = true;
+ devres_close_group(adev->parent, NULL);
+ adev->bound = true;
return 1;
}
static int try_to_bring_up_masters(struct component *component)
{
- struct master *m;
+ struct aggregate_device *adev;
int ret = 0;
- list_for_each_entry(m, &masters, node) {
- if (!m->bound) {
- ret = try_to_bring_up_master(m, component);
+ list_for_each_entry(adev, &aggregate_devices, node) {
+ if (!adev->bound) {
+ ret = try_to_bring_up_aggregate_device(adev, component);
if (ret != 0)
break;
}
@@ -278,15 +275,72 @@ static int try_to_bring_up_masters(struct component *component)
return ret;
}
-static void take_down_master(struct master *master)
+static void take_down_aggregate_device(struct aggregate_device *adev)
{
- if (master->bound) {
- master->ops->unbind(master->parent);
- devres_release_group(master->parent, master);
- master->bound = false;
+ if (adev->bound) {
+ adev->ops->unbind(adev->parent);
+ devres_release_group(adev->parent, adev);
+ adev->bound = false;
}
}
+/**
+ * component_compare_of - A common component compare function for of_node
+ * @dev: component device
+ * @data: @compare_data from component_match_add_release()
+ *
+ * A common compare function when compare_data is device of_node. e.g.
+ * component_match_add_release(masterdev, &match, component_release_of,
+ * component_compare_of, component_dev_of_node)
+ */
+int component_compare_of(struct device *dev, void *data)
+{
+ return device_match_of_node(dev, data);
+}
+EXPORT_SYMBOL_GPL(component_compare_of);
+
+/**
+ * component_release_of - A common component release function for of_node
+ * @dev: component device
+ * @data: @compare_data from component_match_add_release()
+ *
+ * About the example, Please see component_compare_of().
+ */
+void component_release_of(struct device *dev, void *data)
+{
+ of_node_put(data);
+}
+EXPORT_SYMBOL_GPL(component_release_of);
+
+/**
+ * component_compare_dev - A common component compare function for dev
+ * @dev: component device
+ * @data: @compare_data from component_match_add_release()
+ *
+ * A common compare function when compare_data is struce device. e.g.
+ * component_match_add(masterdev, &match, component_compare_dev, component_dev)
+ */
+int component_compare_dev(struct device *dev, void *data)
+{
+ return dev == data;
+}
+EXPORT_SYMBOL_GPL(component_compare_dev);
+
+/**
+ * component_compare_dev_name - A common component compare function for device name
+ * @dev: component device
+ * @data: @compare_data from component_match_add_release()
+ *
+ * A common compare function when compare_data is device name string. e.g.
+ * component_match_add(masterdev, &match, component_compare_dev_name,
+ * "component_dev_name")
+ */
+int component_compare_dev_name(struct device *dev, void *data)
+{
+ return device_match_name(dev, data);
+}
+EXPORT_SYMBOL_GPL(component_compare_dev_name);
+
static void devm_component_match_release(struct device *parent, void *res)
{
struct component_match *match = res;
@@ -324,7 +378,7 @@ static int component_match_realloc(struct component_match *match, size_t num)
return 0;
}
-static void __component_match_add(struct device *master,
+static void __component_match_add(struct device *parent,
struct component_match **matchptr,
void (*release)(struct device *, void *),
int (*compare)(struct device *, void *),
@@ -344,7 +398,7 @@ static void __component_match_add(struct device *master,
return;
}
- devres_add(master, match);
+ devres_add(parent, match);
*matchptr = match;
}
@@ -370,13 +424,13 @@ static void __component_match_add(struct device *master,
/**
* component_match_add_release - add a component match entry with release callback
- * @master: device with the aggregate driver
+ * @parent: parent device of the aggregate driver
* @matchptr: pointer to the list of component matches
* @release: release function for @compare_data
* @compare: compare function to match against all components
* @compare_data: opaque pointer passed to the @compare function
*
- * Adds a new component match to the list stored in @matchptr, which the @master
+ * Adds a new component match to the list stored in @matchptr, which the
* aggregate driver needs to function. The list of component matches pointed to
* by @matchptr must be initialized to NULL before adding the first match. This
* only matches against components added with component_add().
@@ -388,24 +442,24 @@ static void __component_match_add(struct device *master,
*
* See also component_match_add() and component_match_add_typed().
*/
-void component_match_add_release(struct device *master,
+void component_match_add_release(struct device *parent,
struct component_match **matchptr,
void (*release)(struct device *, void *),
int (*compare)(struct device *, void *), void *compare_data)
{
- __component_match_add(master, matchptr, release, compare, NULL,
+ __component_match_add(parent, matchptr, release, compare, NULL,
compare_data);
}
EXPORT_SYMBOL(component_match_add_release);
/**
* component_match_add_typed - add a component match entry for a typed component
- * @master: device with the aggregate driver
+ * @parent: parent device of the aggregate driver
* @matchptr: pointer to the list of component matches
* @compare_typed: compare function to match against all typed components
* @compare_data: opaque pointer passed to the @compare function
*
- * Adds a new component match to the list stored in @matchptr, which the @master
+ * Adds a new component match to the list stored in @matchptr, which the
* aggregate driver needs to function. The list of component matches pointed to
* by @matchptr must be initialized to NULL before adding the first match. This
* only matches against components added with component_add_typed().
@@ -415,32 +469,32 @@ EXPORT_SYMBOL(component_match_add_release);
*
* See also component_match_add_release() and component_match_add_typed().
*/
-void component_match_add_typed(struct device *master,
+void component_match_add_typed(struct device *parent,
struct component_match **matchptr,
int (*compare_typed)(struct device *, int, void *), void *compare_data)
{
- __component_match_add(master, matchptr, NULL, NULL, compare_typed,
+ __component_match_add(parent, matchptr, NULL, NULL, compare_typed,
compare_data);
}
EXPORT_SYMBOL(component_match_add_typed);
-static void free_master(struct master *master)
+static void free_aggregate_device(struct aggregate_device *adev)
{
- struct component_match *match = master->match;
+ struct component_match *match = adev->match;
int i;
- component_master_debugfs_del(master);
- list_del(&master->node);
+ component_debugfs_del(adev);
+ list_del(&adev->node);
if (match) {
for (i = 0; i < match->num; i++) {
struct component *c = match->compare[i].component;
if (c)
- c->master = NULL;
+ c->adev = NULL;
}
}
- kfree(master);
+ kfree(adev);
}
/**
@@ -459,7 +513,7 @@ int component_master_add_with_match(struct device *parent,
const struct component_master_ops *ops,
struct component_match *match)
{
- struct master *master;
+ struct aggregate_device *adev;
int ret;
/* Reallocate the match array for its true size */
@@ -467,23 +521,23 @@ int component_master_add_with_match(struct device *parent,
if (ret)
return ret;
- master = kzalloc(sizeof(*master), GFP_KERNEL);
- if (!master)
+ adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+ if (!adev)
return -ENOMEM;
- master->parent = parent;
- master->ops = ops;
- master->match = match;
+ adev->parent = parent;
+ adev->ops = ops;
+ adev->match = match;
- component_master_debugfs_add(master);
- /* Add to the list of available masters. */
+ component_debugfs_add(adev);
+ /* Add to the list of available aggregate devices. */
mutex_lock(&component_mutex);
- list_add(&master->node, &masters);
+ list_add(&adev->node, &aggregate_devices);
- ret = try_to_bring_up_master(master, NULL);
+ ret = try_to_bring_up_aggregate_device(adev, NULL);
if (ret < 0)
- free_master(master);
+ free_aggregate_device(adev);
mutex_unlock(&component_mutex);
@@ -503,25 +557,25 @@ EXPORT_SYMBOL_GPL(component_master_add_with_match);
void component_master_del(struct device *parent,
const struct component_master_ops *ops)
{
- struct master *master;
+ struct aggregate_device *adev;
mutex_lock(&component_mutex);
- master = __master_find(parent, ops);
- if (master) {
- take_down_master(master);
- free_master(master);
+ adev = __aggregate_find(parent, ops);
+ if (adev) {
+ take_down_aggregate_device(adev);
+ free_aggregate_device(adev);
}
mutex_unlock(&component_mutex);
}
EXPORT_SYMBOL_GPL(component_master_del);
static void component_unbind(struct component *component,
- struct master *master, void *data)
+ struct aggregate_device *adev, void *data)
{
WARN_ON(!component->bound);
if (component->ops && component->ops->unbind)
- component->ops->unbind(component->dev, master->parent, data);
+ component->ops->unbind(component->dev, adev->parent, data);
component->bound = false;
/* Release all resources claimed in the binding of this component */
@@ -539,26 +593,26 @@ static void component_unbind(struct component *component,
*/
void component_unbind_all(struct device *parent, void *data)
{
- struct master *master;
+ struct aggregate_device *adev;
struct component *c;
size_t i;
WARN_ON(!mutex_is_locked(&component_mutex));
- master = __master_find(parent, NULL);
- if (!master)
+ adev = __aggregate_find(parent, NULL);
+ if (!adev)
return;
/* Unbind components in reverse order */
- for (i = master->match->num; i--; )
- if (!master->match->compare[i].duplicate) {
- c = master->match->compare[i].component;
- component_unbind(c, master, data);
+ for (i = adev->match->num; i--; )
+ if (!adev->match->compare[i].duplicate) {
+ c = adev->match->compare[i].component;
+ component_unbind(c, adev, data);
}
}
EXPORT_SYMBOL_GPL(component_unbind_all);
-static int component_bind(struct component *component, struct master *master,
+static int component_bind(struct component *component, struct aggregate_device *adev,
void *data)
{
int ret;
@@ -568,7 +622,7 @@ static int component_bind(struct component *component, struct master *master,
* This allows us to roll-back a failed component without
* affecting anything else.
*/
- if (!devres_open_group(master->parent, NULL, GFP_KERNEL))
+ if (!devres_open_group(adev->parent, NULL, GFP_KERNEL))
return -ENOMEM;
/*
@@ -577,14 +631,14 @@ static int component_bind(struct component *component, struct master *master,
* at the appropriate moment.
*/
if (!devres_open_group(component->dev, component, GFP_KERNEL)) {
- devres_release_group(master->parent, NULL);
+ devres_release_group(adev->parent, NULL);
return -ENOMEM;
}
- dev_dbg(master->parent, "binding %s (ops %ps)\n",
+ dev_dbg(adev->parent, "binding %s (ops %ps)\n",
dev_name(component->dev), component->ops);
- ret = component->ops->bind(component->dev, master->parent, data);
+ ret = component->ops->bind(component->dev, adev->parent, data);
if (!ret) {
component->bound = true;
@@ -595,16 +649,16 @@ static int component_bind(struct component *component, struct master *master,
* can clean those resources up independently.
*/
devres_close_group(component->dev, NULL);
- devres_remove_group(master->parent, NULL);
+ devres_remove_group(adev->parent, NULL);
- dev_info(master->parent, "bound %s (ops %ps)\n",
+ dev_info(adev->parent, "bound %s (ops %ps)\n",
dev_name(component->dev), component->ops);
} else {
devres_release_group(component->dev, NULL);
- devres_release_group(master->parent, NULL);
+ devres_release_group(adev->parent, NULL);
if (ret != -EPROBE_DEFER)
- dev_err(master->parent, "failed to bind %s (ops %ps): %d\n",
+ dev_err(adev->parent, "failed to bind %s (ops %ps): %d\n",
dev_name(component->dev), component->ops, ret);
}
@@ -622,31 +676,31 @@ static int component_bind(struct component *component, struct master *master,
*/
int component_bind_all(struct device *parent, void *data)
{
- struct master *master;
+ struct aggregate_device *adev;
struct component *c;
size_t i;
int ret = 0;
WARN_ON(!mutex_is_locked(&component_mutex));
- master = __master_find(parent, NULL);
- if (!master)
+ adev = __aggregate_find(parent, NULL);
+ if (!adev)
return -EINVAL;
/* Bind components in match order */
- for (i = 0; i < master->match->num; i++)
- if (!master->match->compare[i].duplicate) {
- c = master->match->compare[i].component;
- ret = component_bind(c, master, data);
+ for (i = 0; i < adev->match->num; i++)
+ if (!adev->match->compare[i].duplicate) {
+ c = adev->match->compare[i].component;
+ ret = component_bind(c, adev, data);
if (ret)
break;
}
if (ret != 0) {
for (; i > 0; i--)
- if (!master->match->compare[i - 1].duplicate) {
- c = master->match->compare[i - 1].component;
- component_unbind(c, master, data);
+ if (!adev->match->compare[i - 1].duplicate) {
+ c = adev->match->compare[i - 1].component;
+ component_unbind(c, adev, data);
}
}
@@ -675,8 +729,8 @@ static int __component_add(struct device *dev, const struct component_ops *ops,
ret = try_to_bring_up_masters(component);
if (ret < 0) {
- if (component->master)
- remove_component(component->master, component);
+ if (component->adev)
+ remove_component(component->adev, component);
list_del(&component->node);
kfree(component);
@@ -757,9 +811,9 @@ void component_del(struct device *dev, const struct component_ops *ops)
break;
}
- if (component && component->master) {
- take_down_master(component->master);
- remove_component(component->master, component);
+ if (component && component->adev) {
+ take_down_aggregate_device(component->adev);
+ remove_component(component->adev, component);
}
mutex_unlock(&component_mutex);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 63e769057487..d02501933467 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -21,7 +21,7 @@
#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/genhd.h>
+#include <linux/blkdev.h>
#include <linux/mutex.h>
#include <linux/pm_runtime.h>
#include <linux/netdevice.h>
@@ -32,6 +32,7 @@
#include <linux/dma-map-ops.h> /* for dma_default_coherent */
#include "base.h"
+#include "physical_location.h"
#include "power/power.h"
#ifdef CONFIG_SYSFS_DEPRECATED
@@ -53,6 +54,7 @@ static unsigned int defer_sync_state_count = 1;
static DEFINE_MUTEX(fwnode_link_lock);
static bool fw_devlink_is_permissive(void);
static bool fw_devlink_drv_reg_done;
+static bool fw_devlink_best_effort;
/**
* fwnode_link_add - Create a link between two fwnode_handles.
@@ -485,7 +487,18 @@ static void device_link_release_fn(struct work_struct *work)
/* Ensure that all references to the link object have been dropped. */
device_link_synchronize_removal();
- pm_runtime_release_supplier(link, true);
+ pm_runtime_release_supplier(link);
+ /*
+ * If supplier_preactivated is set, the link has been dropped between
+ * the pm_runtime_get_suppliers() and pm_runtime_put_suppliers() calls
+ * in __driver_probe_device(). In that case, drop the supplier's
+ * PM-runtime usage counter to remove the reference taken by
+ * pm_runtime_get_suppliers().
+ */
+ if (link->supplier_preactivated)
+ pm_runtime_put_noidle(link->supplier);
+
+ pm_request_idle(link->supplier);
put_device(link->consumer);
put_device(link->supplier);
@@ -964,6 +977,12 @@ static void device_links_missing_supplier(struct device *dev)
}
}
+static bool dev_is_best_effort(struct device *dev)
+{
+ return (fw_devlink_best_effort && dev->can_match) ||
+ (dev->fwnode && (dev->fwnode->flags & FWNODE_FLAG_BEST_EFFORT));
+}
+
/**
* device_links_check_suppliers - Check presence of supplier drivers.
* @dev: Consumer device.
@@ -983,7 +1002,7 @@ static void device_links_missing_supplier(struct device *dev)
int device_links_check_suppliers(struct device *dev)
{
struct device_link *link;
- int ret = 0;
+ int ret = 0, fwnode_ret = 0;
struct fwnode_handle *sup_fw;
/*
@@ -996,12 +1015,17 @@ int device_links_check_suppliers(struct device *dev)
sup_fw = list_first_entry(&dev->fwnode->suppliers,
struct fwnode_link,
c_hook)->supplier;
- dev_err_probe(dev, -EPROBE_DEFER, "wait for supplier %pfwP\n",
- sup_fw);
- mutex_unlock(&fwnode_link_lock);
- return -EPROBE_DEFER;
+ if (!dev_is_best_effort(dev)) {
+ fwnode_ret = -EPROBE_DEFER;
+ dev_err_probe(dev, -EPROBE_DEFER,
+ "wait for supplier %pfwP\n", sup_fw);
+ } else {
+ fwnode_ret = -EAGAIN;
+ }
}
mutex_unlock(&fwnode_link_lock);
+ if (fwnode_ret == -EPROBE_DEFER)
+ return fwnode_ret;
device_links_write_lock();
@@ -1011,6 +1035,14 @@ int device_links_check_suppliers(struct device *dev)
if (link->status != DL_STATE_AVAILABLE &&
!(link->flags & DL_FLAG_SYNC_STATE_ONLY)) {
+
+ if (dev_is_best_effort(dev) &&
+ link->flags & DL_FLAG_INFERRED &&
+ !link->supplier->can_match) {
+ ret = -EAGAIN;
+ continue;
+ }
+
device_links_missing_supplier(dev);
dev_err_probe(dev, -EPROBE_DEFER,
"supplier %s not ready\n",
@@ -1023,7 +1055,8 @@ int device_links_check_suppliers(struct device *dev)
dev->links.status = DL_DEV_PROBING;
device_links_write_unlock();
- return ret;
+
+ return ret ? ret : fwnode_ret;
}
/**
@@ -1288,6 +1321,18 @@ void device_links_driver_bound(struct device *dev)
* save to drop the managed link completely.
*/
device_link_drop_managed(link);
+ } else if (dev_is_best_effort(dev) &&
+ link->flags & DL_FLAG_INFERRED &&
+ link->status != DL_STATE_CONSUMER_PROBE &&
+ !link->supplier->can_match) {
+ /*
+ * When dev_is_best_effort() is true, we ignore device
+ * links to suppliers that don't have a driver. If the
+ * consumer device still managed to probe, there's no
+ * point in maintaining a device link in a weird state
+ * (consumer probed before supplier). So delete it.
+ */
+ device_link_drop_managed(link);
} else {
WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
WRITE_ONCE(link->status, DL_STATE_ACTIVE);
@@ -1654,6 +1699,62 @@ void fw_devlink_drivers_done(void)
device_links_write_unlock();
}
+/**
+ * wait_for_init_devices_probe - Try to probe any device needed for init
+ *
+ * Some devices might need to be probed and bound successfully before the kernel
+ * boot sequence can finish and move on to init/userspace. For example, a
+ * network interface might need to be bound to be able to mount a NFS rootfs.
+ *
+ * With fw_devlink=on by default, some of these devices might be blocked from
+ * probing because they are waiting on a optional supplier that doesn't have a
+ * driver. While fw_devlink will eventually identify such devices and unblock
+ * the probing automatically, it might be too late by the time it unblocks the
+ * probing of devices. For example, the IP4 autoconfig might timeout before
+ * fw_devlink unblocks probing of the network interface.
+ *
+ * This function is available to temporarily try and probe all devices that have
+ * a driver even if some of their suppliers haven't been added or don't have
+ * drivers.
+ *
+ * The drivers can then decide which of the suppliers are optional vs mandatory
+ * and probe the device if possible. By the time this function returns, all such
+ * "best effort" probes are guaranteed to be completed. If a device successfully
+ * probes in this mode, we delete all fw_devlink discovered dependencies of that
+ * device where the supplier hasn't yet probed successfully because they have to
+ * be optional dependencies.
+ *
+ * Any devices that didn't successfully probe go back to being treated as if
+ * this function was never called.
+ *
+ * This also means that some devices that aren't needed for init and could have
+ * waited for their optional supplier to probe (when the supplier's module is
+ * loaded later on) would end up probing prematurely with limited functionality.
+ * So call this function only when boot would fail without it.
+ */
+void __init wait_for_init_devices_probe(void)
+{
+ if (!fw_devlink_flags || fw_devlink_is_permissive())
+ return;
+
+ /*
+ * Wait for all ongoing probes to finish so that the "best effort" is
+ * only applied to devices that can't probe otherwise.
+ */
+ wait_for_device_probe();
+
+ pr_info("Trying to probe devices needed for running init ...\n");
+ fw_devlink_best_effort = true;
+ driver_deferred_probe_trigger();
+
+ /*
+ * Wait for all "best effort" probes to finish before going back to
+ * normal enforcement.
+ */
+ wait_for_device_probe();
+ fw_devlink_best_effort = false;
+}
+
static void fw_devlink_unblock_consumers(struct device *dev)
{
struct device_link *link;
@@ -2260,9 +2361,9 @@ static struct kobj_type device_ktype = {
};
-static int dev_uevent_filter(struct kset *kset, struct kobject *kobj)
+static int dev_uevent_filter(struct kobject *kobj)
{
- struct kobj_type *ktype = get_ktype(kobj);
+ const struct kobj_type *ktype = get_ktype(kobj);
if (ktype == &device_ktype) {
struct device *dev = kobj_to_dev(kobj);
@@ -2274,7 +2375,7 @@ static int dev_uevent_filter(struct kset *kset, struct kobject *kobj)
return 0;
}
-static const char *dev_uevent_name(struct kset *kset, struct kobject *kobj)
+static const char *dev_uevent_name(struct kobject *kobj)
{
struct device *dev = kobj_to_dev(kobj);
@@ -2285,8 +2386,7 @@ static const char *dev_uevent_name(struct kset *kset, struct kobject *kobj)
return NULL;
}
-static int dev_uevent(struct kset *kset, struct kobject *kobj,
- struct kobj_uevent_env *env)
+static int dev_uevent(struct kobject *kobj, struct kobj_uevent_env *env)
{
struct device *dev = kobj_to_dev(kobj);
int retval = 0;
@@ -2381,7 +2481,7 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
/* respect filter */
if (kset->uevent_ops && kset->uevent_ops->filter)
- if (!kset->uevent_ops->filter(kset, &dev->kobj))
+ if (!kset->uevent_ops->filter(&dev->kobj))
goto out;
env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
@@ -2389,7 +2489,7 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
return -ENOMEM;
/* let the kset specific function add its keys */
- retval = kset->uevent_ops->uevent(kset, &dev->kobj, env);
+ retval = kset->uevent_ops->uevent(&dev->kobj, env);
if (retval)
goto out;
@@ -2409,7 +2509,7 @@ static ssize_t uevent_store(struct device *dev, struct device_attribute *attr,
rc = kobject_synth_uevent(&dev->kobj, buf, count);
if (rc) {
- dev_err(dev, "uevent: failed to send synthetic uevent\n");
+ dev_err(dev, "uevent: failed to send synthetic uevent: %d\n", rc);
return rc;
}
@@ -2650,8 +2750,17 @@ static int device_add_attrs(struct device *dev)
goto err_remove_dev_waiting_for_supplier;
}
+ if (dev_add_physical_location(dev)) {
+ error = device_add_group(dev,
+ &dev_attr_physical_location_group);
+ if (error)
+ goto err_remove_dev_removable;
+ }
+
return 0;
+ err_remove_dev_removable:
+ device_remove_file(dev, &dev_attr_removable);
err_remove_dev_waiting_for_supplier:
device_remove_file(dev, &dev_attr_waiting_for_supplier);
err_remove_dev_online:
@@ -2673,6 +2782,11 @@ static void device_remove_attrs(struct device *dev)
struct class *class = dev->class;
const struct device_type *type = dev->type;
+ if (dev->physical_location) {
+ device_remove_group(dev, &dev_attr_physical_location_group);
+ kfree(dev->physical_location);
+ }
+
device_remove_file(dev, &dev_attr_removable);
device_remove_file(dev, &dev_attr_waiting_for_supplier);
device_remove_file(dev, &dev_attr_online);
@@ -2865,18 +2979,11 @@ void device_initialize(struct device *dev)
kobject_init(&dev->kobj, &device_ktype);
INIT_LIST_HEAD(&dev->dma_pools);
mutex_init(&dev->mutex);
-#ifdef CONFIG_PROVE_LOCKING
- mutex_init(&dev->lockdep_mutex);
-#endif
lockdep_set_novalidate_class(&dev->mutex);
spin_lock_init(&dev->devres_lock);
INIT_LIST_HEAD(&dev->devres_head);
device_pm_init(dev);
set_dev_node(dev, NUMA_NO_NODE);
-#ifdef CONFIG_GENERIC_MSI_IRQ
- raw_spin_lock_init(&dev->msi_lock);
- INIT_LIST_HEAD(&dev->msi_list);
-#endif
INIT_LIST_HEAD(&dev->links.consumers);
INIT_LIST_HEAD(&dev->links.suppliers);
INIT_LIST_HEAD(&dev->links.defer_sync);
@@ -3028,6 +3135,23 @@ static inline struct kobject *get_glue_dir(struct device *dev)
return dev->kobj.parent;
}
+/**
+ * kobject_has_children - Returns whether a kobject has children.
+ * @kobj: the object to test
+ *
+ * This will return whether a kobject has other kobjects as children.
+ *
+ * It does NOT account for the presence of attribute files, only sub
+ * directories. It also assumes there is no concurrent addition or
+ * removal of such children, and thus relies on external locking.
+ */
+static inline bool kobject_has_children(struct kobject *kobj)
+{
+ WARN_ON_ONCE(kref_read(&kobj->kref) == 0);
+
+ return kobj->sd && kobj->sd->dir.subdirs;
+}
+
/*
* make sure cleaning up dir as the last step, we need to make
* sure .release handler of kobject is run with holding the
@@ -3808,6 +3932,26 @@ struct device *device_find_child_by_name(struct device *parent,
}
EXPORT_SYMBOL_GPL(device_find_child_by_name);
+static int match_any(struct device *dev, void *unused)
+{
+ return 1;
+}
+
+/**
+ * device_find_any_child - device iterator for locating a child device, if any.
+ * @parent: parent struct device
+ *
+ * This is similar to the device_find_child() function above, but it
+ * returns a reference to a child device, if any.
+ *
+ * NOTE: you will need to drop the reference with put_device() after use.
+ */
+struct device *device_find_any_child(struct device *parent)
+{
+ return device_find_child(parent, NULL, match_any);
+}
+EXPORT_SYMBOL_GPL(device_find_any_child);
+
int __init devices_init(void)
{
devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL);
@@ -4026,7 +4170,7 @@ device_create_groups_vargs(struct class *class, struct device *parent,
struct device *dev = NULL;
int retval = -ENODEV;
- if (class == NULL || IS_ERR(class))
+ if (IS_ERR_OR_NULL(class))
goto error;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 5fc258073bc7..4c98849577d4 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -275,7 +275,7 @@ static ssize_t print_cpus_isolated(struct device *dev,
return -ENOMEM;
cpumask_andnot(isolated, cpu_possible_mask,
- housekeeping_cpumask(HK_FLAG_DOMAIN));
+ housekeeping_cpumask(HK_TYPE_DOMAIN));
len = sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(isolated));
free_cpumask_var(isolated);
@@ -564,6 +564,18 @@ ssize_t __weak cpu_show_srbds(struct device *dev,
return sysfs_emit(buf, "Not affected\n");
}
+ssize_t __weak cpu_show_mmio_stale_data(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_retbleed(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "Not affected\n");
+}
+
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
@@ -573,6 +585,8 @@ static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL);
static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL);
static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL);
static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL);
+static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL);
+static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@@ -584,6 +598,8 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_tsx_async_abort.attr,
&dev_attr_itlb_multihit.attr,
&dev_attr_srbds.attr,
+ &dev_attr_mmio_stale_data.attr,
+ &dev_attr_retbleed.attr,
NULL
};
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 68ea1f949daa..3dda62503102 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -60,6 +60,7 @@ static bool initcalls_done;
/* Save the async probe drivers' name from kernel cmdline */
#define ASYNC_DRV_NAMES_MAX_LEN 256
static char async_probe_drv_names[ASYNC_DRV_NAMES_MAX_LEN];
+static bool async_probe_default;
/*
* In some cases, like suspend to RAM or hibernation, It might be reasonable
@@ -152,7 +153,7 @@ void driver_deferred_probe_del(struct device *dev)
mutex_unlock(&deferred_probe_mutex);
}
-static bool driver_deferred_probe_enable = false;
+static bool driver_deferred_probe_enable;
/**
* driver_deferred_probe_trigger() - Kick off re-probing deferred devices
*
@@ -171,7 +172,7 @@ static bool driver_deferred_probe_enable = false;
* changes in the midst of a probe, then deferred processing should be triggered
* again.
*/
-static void driver_deferred_probe_trigger(void)
+void driver_deferred_probe_trigger(void)
{
if (!driver_deferred_probe_enable)
return;
@@ -255,9 +256,13 @@ static int deferred_devs_show(struct seq_file *s, void *data)
}
DEFINE_SHOW_ATTRIBUTE(deferred_devs);
+#ifdef CONFIG_MODULES
+int driver_deferred_probe_timeout = 10;
+#else
int driver_deferred_probe_timeout;
+#endif
+
EXPORT_SYMBOL_GPL(driver_deferred_probe_timeout);
-static DECLARE_WAIT_QUEUE_HEAD(probe_timeout_waitqueue);
static int __init deferred_probe_timeout_setup(char *str)
{
@@ -274,10 +279,10 @@ __setup("deferred_probe_timeout=", deferred_probe_timeout_setup);
* @dev: device to check
*
* Return:
- * -ENODEV if initcalls have completed and modules are disabled.
- * -ETIMEDOUT if the deferred probe timeout was set and has expired
- * and modules are enabled.
- * -EPROBE_DEFER in other cases.
+ * * -ENODEV if initcalls have completed and modules are disabled.
+ * * -ETIMEDOUT if the deferred probe timeout was set and has expired
+ * and modules are enabled.
+ * * -EPROBE_DEFER in other cases.
*
* Drivers or subsystems can opt-in to calling this function instead of directly
* returning -EPROBE_DEFER.
@@ -296,6 +301,7 @@ int driver_deferred_probe_check_state(struct device *dev)
return -EPROBE_DEFER;
}
+EXPORT_SYMBOL_GPL(driver_deferred_probe_check_state);
static void deferred_probe_timeout_work_func(struct work_struct *work)
{
@@ -311,10 +317,23 @@ static void deferred_probe_timeout_work_func(struct work_struct *work)
list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe)
dev_info(p->device, "deferred probe pending\n");
mutex_unlock(&deferred_probe_mutex);
- wake_up_all(&probe_timeout_waitqueue);
}
static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func);
+void deferred_probe_extend_timeout(void)
+{
+ /*
+ * If the work hasn't been queued yet or if the work expired, don't
+ * start a new one.
+ */
+ if (cancel_delayed_work(&deferred_probe_timeout_work)) {
+ schedule_delayed_work(&deferred_probe_timeout_work,
+ driver_deferred_probe_timeout * HZ);
+ pr_debug("Extended deferred probe timeout by %d secs\n",
+ driver_deferred_probe_timeout);
+ }
+}
+
/**
* deferred_probe_initcall() - Enable probing of deferred devices
*
@@ -506,6 +525,30 @@ static ssize_t state_synced_show(struct device *dev,
}
static DEVICE_ATTR_RO(state_synced);
+static void device_unbind_cleanup(struct device *dev)
+{
+ devres_release_all(dev);
+ arch_teardown_dma_ops(dev);
+ kfree(dev->dma_range_map);
+ dev->dma_range_map = NULL;
+ dev->driver = NULL;
+ dev_set_drvdata(dev, NULL);
+ if (dev->pm_domain && dev->pm_domain->dismiss)
+ dev->pm_domain->dismiss(dev);
+ pm_runtime_reinit(dev);
+ dev_pm_set_driver_flags(dev, 0);
+}
+
+static void device_remove(struct device *dev)
+{
+ device_remove_file(dev, &dev_attr_state_synced);
+ device_remove_groups(dev, dev->driver->dev_groups);
+
+ if (dev->bus && dev->bus->remove)
+ dev->bus->remove(dev);
+ else if (dev->driver->remove)
+ dev->driver->remove(dev);
+}
static int call_driver_probe(struct device *dev, struct device_driver *drv)
{
@@ -542,7 +585,7 @@ static int really_probe(struct device *dev, struct device_driver *drv)
{
bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE) &&
!drv->suppress_bind_attrs;
- int ret;
+ int ret, link_ret;
if (defer_all_probes) {
/*
@@ -554,9 +597,9 @@ static int really_probe(struct device *dev, struct device_driver *drv)
return -EPROBE_DEFER;
}
- ret = device_links_check_suppliers(dev);
- if (ret)
- return ret;
+ link_ret = device_links_check_suppliers(dev);
+ if (link_ret == -EPROBE_DEFER)
+ return link_ret;
pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
drv->bus->name, __func__, drv->name, dev_name(dev));
@@ -577,14 +620,14 @@ re_probe:
if (dev->bus->dma_configure) {
ret = dev->bus->dma_configure(dev);
if (ret)
- goto probe_failed;
+ goto pinctrl_bind_failed;
}
ret = driver_sysfs_add(dev);
if (ret) {
pr_err("%s: driver_sysfs_add(%s) failed\n",
__func__, dev_name(dev));
- goto probe_failed;
+ goto sysfs_failed;
}
if (dev->pm_domain && dev->pm_domain->activate) {
@@ -596,6 +639,15 @@ re_probe:
ret = call_driver_probe(dev, drv);
if (ret) {
/*
+ * If fw_devlink_best_effort is active (denoted by -EAGAIN), the
+ * device might actually probe properly once some of its missing
+ * suppliers have probed. So, treat this as if the driver
+ * returned -EPROBE_DEFER.
+ */
+ if (link_ret == -EAGAIN)
+ ret = -EPROBE_DEFER;
+
+ /*
* Return probe errors as positive values so that the callers
* can distinguish them from other errors.
*/
@@ -620,21 +672,9 @@ re_probe:
if (test_remove) {
test_remove = false;
- device_remove_file(dev, &dev_attr_state_synced);
- device_remove_groups(dev, drv->dev_groups);
-
- if (dev->bus->remove)
- dev->bus->remove(dev);
- else if (drv->remove)
- drv->remove(dev);
-
- devres_release_all(dev);
+ device_remove(dev);
driver_sysfs_remove(dev);
- dev->driver = NULL;
- dev_set_drvdata(dev, NULL);
- if (dev->pm_domain && dev->pm_domain->dismiss)
- dev->pm_domain->dismiss(dev);
- pm_runtime_reinit(dev);
+ device_unbind_cleanup(dev);
goto re_probe;
}
@@ -650,29 +690,19 @@ re_probe:
goto done;
dev_sysfs_state_synced_failed:
- device_remove_groups(dev, drv->dev_groups);
dev_groups_failed:
- if (dev->bus->remove)
- dev->bus->remove(dev);
- else if (drv->remove)
- drv->remove(dev);
+ device_remove(dev);
probe_failed:
+ driver_sysfs_remove(dev);
+sysfs_failed:
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
+ if (dev->bus && dev->bus->dma_cleanup)
+ dev->bus->dma_cleanup(dev);
pinctrl_bind_failed:
device_links_no_driver(dev);
- devres_release_all(dev);
- arch_teardown_dma_ops(dev);
- kfree(dev->dma_range_map);
- dev->dma_range_map = NULL;
- driver_sysfs_remove(dev);
- dev->driver = NULL;
- dev_set_drvdata(dev, NULL);
- if (dev->pm_domain && dev->pm_domain->dismiss)
- dev->pm_domain->dismiss(dev);
- pm_runtime_reinit(dev);
- dev_pm_set_driver_flags(dev, 0);
+ device_unbind_cleanup(dev);
done:
return ret;
}
@@ -715,9 +745,6 @@ int driver_probe_done(void)
*/
void wait_for_device_probe(void)
{
- /* wait for probe timeout */
- wait_event(probe_timeout_waitqueue, !driver_deferred_probe_timeout);
-
/* wait for the deferred probe workqueue to finish */
flush_work(&deferred_probe_work);
@@ -796,7 +823,11 @@ static int driver_probe_device(struct device_driver *drv, struct device *dev)
static inline bool cmdline_requested_async_probing(const char *drv_name)
{
- return parse_option_str(async_probe_drv_names, drv_name);
+ bool async_drv;
+
+ async_drv = parse_option_str(async_probe_drv_names, drv_name);
+
+ return (async_probe_default != async_drv);
}
/* The option format is "driver_async_probe=drv_name1,drv_name2,..." */
@@ -805,8 +836,10 @@ static int __init save_async_options(char *buf)
if (strlen(buf) >= ASYNC_DRV_NAMES_MAX_LEN)
pr_warn("Too long list of driver names for 'driver_async_probe'!\n");
- strlcpy(async_probe_drv_names, buf, ASYNC_DRV_NAMES_MAX_LEN);
- return 0;
+ strscpy(async_probe_drv_names, buf, ASYNC_DRV_NAMES_MAX_LEN);
+ async_probe_default = parse_option_str(async_probe_drv_names, "*");
+
+ return 1;
}
__setup("driver_async_probe=", save_async_options);
@@ -834,7 +867,7 @@ struct device_attach_data {
struct device *dev;
/*
- * Indicates whether we are are considering asynchronous probing or
+ * Indicates whether we are considering asynchronous probing or
* not. Only initial binding after device or driver registration
* (including deferral processing) may be done asynchronously, the
* rest is always synchronous, as we expect it is being done by
@@ -878,6 +911,11 @@ static int __device_attach_driver(struct device_driver *drv, void *_data)
dev_dbg(dev, "Device match requests probe deferral\n");
dev->can_match = true;
driver_deferred_probe_add(dev);
+ /*
+ * Device can't match with a driver right now, so don't attempt
+ * to match or bind with other drivers on the bus.
+ */
+ return ret;
} else if (ret < 0) {
dev_dbg(dev, "Bus failed to match device: %d\n", ret);
return ret;
@@ -940,6 +978,7 @@ out_unlock:
static int __device_attach(struct device *dev, bool allow_async)
{
int ret = 0;
+ bool async = false;
device_lock(dev);
if (dev->p->dead) {
@@ -978,7 +1017,7 @@ static int __device_attach(struct device *dev, bool allow_async)
*/
dev_dbg(dev, "scheduling asynchronous probe\n");
get_device(dev);
- async_schedule_dev(__device_attach_async_helper, dev);
+ async = true;
} else {
pm_request_idle(dev);
}
@@ -988,6 +1027,8 @@ static int __device_attach(struct device *dev, bool allow_async)
}
out_unlock:
device_unlock(dev);
+ if (async)
+ async_schedule_dev(__device_attach_async_helper, dev);
return ret;
}
@@ -1038,7 +1079,7 @@ static void __device_driver_lock(struct device *dev, struct device *parent)
* @parent: Parent device. Needed if the bus requires parent lock
*
* This function will release the required locks for manipulating dev->drv.
- * Normally this will just be the the @dev lock, but when called for a
+ * Normally this will just be the @dev lock, but when called for a
* USB interface, @parent lock will be released as well.
*/
static void __device_driver_unlock(struct device *dev, struct device *parent)
@@ -1081,6 +1122,7 @@ static void __driver_attach_async_helper(void *_dev, async_cookie_t cookie)
__device_driver_lock(dev, dev->parent);
drv = dev->p->async_driver;
+ dev->p->async_driver = NULL;
ret = driver_probe_device(drv, dev);
__device_driver_unlock(dev, dev->parent);
@@ -1092,6 +1134,7 @@ static void __driver_attach_async_helper(void *_dev, async_cookie_t cookie)
static int __driver_attach(struct device *dev, void *data)
{
struct device_driver *drv = data;
+ bool async = false;
int ret;
/*
@@ -1112,6 +1155,11 @@ static int __driver_attach(struct device *dev, void *data)
dev_dbg(dev, "Device match requests probe deferral\n");
dev->can_match = true;
driver_deferred_probe_add(dev);
+ /*
+ * Driver could not match with device, but may match with
+ * another device on the bus.
+ */
+ return 0;
} else if (ret < 0) {
dev_dbg(dev, "Bus failed to match device: %d\n", ret);
return ret;
@@ -1127,12 +1175,14 @@ static int __driver_attach(struct device *dev, void *data)
*/
dev_dbg(dev, "probing driver %s asynchronously\n", drv->name);
device_lock(dev);
- if (!dev->driver) {
+ if (!dev->driver && !dev->p->async_driver) {
get_device(dev);
dev->p->async_driver = drv;
- async_schedule_dev(__driver_attach_async_helper, dev);
+ async = true;
}
device_unlock(dev);
+ if (async)
+ async_schedule_dev(__driver_attach_async_helper, dev);
return 0;
}
@@ -1196,24 +1246,13 @@ static void __device_release_driver(struct device *dev, struct device *parent)
pm_runtime_put_sync(dev);
- device_remove_file(dev, &dev_attr_state_synced);
- device_remove_groups(dev, drv->dev_groups);
+ device_remove(dev);
- if (dev->bus && dev->bus->remove)
- dev->bus->remove(dev);
- else if (drv->remove)
- drv->remove(dev);
+ if (dev->bus && dev->bus->dma_cleanup)
+ dev->bus->dma_cleanup(dev);
device_links_driver_cleanup(dev);
-
- devres_release_all(dev);
- arch_teardown_dma_ops(dev);
- dev->driver = NULL;
- dev_set_drvdata(dev, NULL);
- if (dev->pm_domain && dev->pm_domain->dismiss)
- dev->pm_domain->dismiss(dev);
- pm_runtime_reinit(dev);
- dev_pm_set_driver_flags(dev, 0);
+ device_unbind_cleanup(dev);
klist_remove(&dev->p->knode_driver);
device_pm_check_callbacks(dev);
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
index f4d794d6bb85..1c06781f7114 100644
--- a/drivers/base/devcoredump.c
+++ b/drivers/base/devcoredump.c
@@ -25,6 +25,47 @@ struct devcd_entry {
struct device devcd_dev;
void *data;
size_t datalen;
+ /*
+ * Here, mutex is required to serialize the calls to del_wk work between
+ * user/kernel space which happens when devcd is added with device_add()
+ * and that sends uevent to user space. User space reads the uevents,
+ * and calls to devcd_data_write() which try to modify the work which is
+ * not even initialized/queued from devcoredump.
+ *
+ *
+ *
+ * cpu0(X) cpu1(Y)
+ *
+ * dev_coredump() uevent sent to user space
+ * device_add() ======================> user space process Y reads the
+ * uevents writes to devcd fd
+ * which results into writes to
+ *
+ * devcd_data_write()
+ * mod_delayed_work()
+ * try_to_grab_pending()
+ * del_timer()
+ * debug_assert_init()
+ * INIT_DELAYED_WORK()
+ * schedule_delayed_work()
+ *
+ *
+ * Also, mutex alone would not be enough to avoid scheduling of
+ * del_wk work after it get flush from a call to devcd_free()
+ * mentioned as below.
+ *
+ * disabled_store()
+ * devcd_free()
+ * mutex_lock() devcd_data_write()
+ * flush_delayed_work()
+ * mutex_unlock()
+ * mutex_lock()
+ * mod_delayed_work()
+ * mutex_unlock()
+ * So, delete_work flag is required.
+ */
+ struct mutex mutex;
+ bool delete_work;
struct module *owner;
ssize_t (*read)(char *buffer, loff_t offset, size_t count,
void *data, size_t datalen);
@@ -84,7 +125,12 @@ static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct devcd_entry *devcd = dev_to_devcd(dev);
- mod_delayed_work(system_wq, &devcd->del_wk, 0);
+ mutex_lock(&devcd->mutex);
+ if (!devcd->delete_work) {
+ devcd->delete_work = true;
+ mod_delayed_work(system_wq, &devcd->del_wk, 0);
+ }
+ mutex_unlock(&devcd->mutex);
return count;
}
@@ -112,7 +158,12 @@ static int devcd_free(struct device *dev, void *data)
{
struct devcd_entry *devcd = dev_to_devcd(dev);
+ mutex_lock(&devcd->mutex);
+ if (!devcd->delete_work)
+ devcd->delete_work = true;
+
flush_delayed_work(&devcd->del_wk);
+ mutex_unlock(&devcd->mutex);
return 0;
}
@@ -122,6 +173,30 @@ static ssize_t disabled_show(struct class *class, struct class_attribute *attr,
return sysfs_emit(buf, "%d\n", devcd_disabled);
}
+/*
+ *
+ * disabled_store() worker()
+ * class_for_each_device(&devcd_class,
+ * NULL, NULL, devcd_free)
+ * ...
+ * ...
+ * while ((dev = class_dev_iter_next(&iter))
+ * devcd_del()
+ * device_del()
+ * put_device() <- last reference
+ * error = fn(dev, data) devcd_dev_release()
+ * devcd_free(dev, data) kfree(devcd)
+ * mutex_lock(&devcd->mutex);
+ *
+ *
+ * In the above diagram, It looks like disabled_store() would be racing with parallely
+ * running devcd_del() and result in memory abort while acquiring devcd->mutex which
+ * is called after kfree of devcd memory after dropping its last reference with
+ * put_device(). However, this will not happens as fn(dev, data) runs
+ * with its own reference to device via klist_node so it is not its last reference.
+ * so, above situation would not occur.
+ */
+
static ssize_t disabled_store(struct class *class, struct class_attribute *attr,
const char *buf, size_t count)
{
@@ -278,13 +353,16 @@ void dev_coredumpm(struct device *dev, struct module *owner,
devcd->read = read;
devcd->free = free;
devcd->failing_dev = get_device(dev);
+ devcd->delete_work = false;
+ mutex_init(&devcd->mutex);
device_initialize(&devcd->devcd_dev);
dev_set_name(&devcd->devcd_dev, "devcd%d",
atomic_inc_return(&devcd_count));
devcd->devcd_dev.class = &devcd_class;
+ mutex_lock(&devcd->mutex);
if (device_add(&devcd->devcd_dev))
goto put_device;
@@ -301,10 +379,11 @@ void dev_coredumpm(struct device *dev, struct module *owner,
INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT);
-
+ mutex_unlock(&devcd->mutex);
return;
put_device:
put_device(&devcd->devcd_dev);
+ mutex_unlock(&devcd->mutex);
put_module:
module_put(owner);
free:
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index eaa9a5cd1db9..4ab2b50ee38f 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -117,7 +117,9 @@ static __always_inline struct devres * alloc_dr(dr_release_t release,
if (unlikely(!dr))
return NULL;
- memset(dr, 0, offsetof(struct devres, data));
+ /* No need to clear memory twice */
+ if (!(gfp & __GFP_ZERO))
+ memset(dr, 0, offsetof(struct devres, data));
INIT_LIST_HEAD(&dr->node.entry);
dr->node.release = release;
@@ -692,7 +694,7 @@ EXPORT_SYMBOL_GPL(devres_release_group);
/*
* Custom devres actions allow inserting a simple function call
- * into the teadown sequence.
+ * into the teardown sequence.
*/
struct action_devres {
@@ -916,7 +918,7 @@ void *devm_krealloc(struct device *dev, void *ptr, size_t new_size, gfp_t gfp)
/*
* We can copy the memory contents after releasing the lock as we're
- * no longer modyfing the list links.
+ * no longer modifying the list links.
*/
memcpy(new_dr->data, old_dr->data,
total_old_size - offsetof(struct devres, data));
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 8be352ab4ddb..e4bffeabf344 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -17,7 +17,7 @@
#include <linux/syscalls.h>
#include <linux/mount.h>
#include <linux/device.h>
-#include <linux/genhd.h>
+#include <linux/blkdev.h>
#include <linux/namei.h>
#include <linux/fs.h>
#include <linux/shmem_fs.h>
@@ -29,6 +29,12 @@
#include <uapi/linux/mount.h>
#include "base.h"
+#ifdef CONFIG_DEVTMPFS_SAFE
+#define DEVTMPFS_MFLAGS (MS_SILENT | MS_NOEXEC | MS_NOSUID)
+#else
+#define DEVTMPFS_MFLAGS (MS_SILENT)
+#endif
+
static struct task_struct *thread;
static int __initdata mount_dev = IS_ENABLED(CONFIG_DEVTMPFS_MOUNT);
@@ -59,8 +65,15 @@ static struct dentry *public_dev_mount(struct file_system_type *fs_type, int fla
const char *dev_name, void *data)
{
struct super_block *s = mnt->mnt_sb;
+ int err;
+
atomic_inc(&s->s_active);
down_write(&s->s_umount);
+ err = reconfigure_single(s, flags, data);
+ if (err < 0) {
+ deactivate_locked_super(s);
+ return ERR_PTR(err);
+ }
return dget(s->s_root);
}
@@ -68,10 +81,8 @@ static struct file_system_type internal_fs_type = {
.name = "devtmpfs",
#ifdef CONFIG_TMPFS
.init_fs_context = shmem_init_fs_context,
- .parameters = shmem_fs_parameters,
#else
.init_fs_context = ramfs_init_fs_context,
- .parameters = ramfs_fs_parameters,
#endif
.kill_sb = kill_litter_super,
};
@@ -363,7 +374,7 @@ int __init devtmpfs_mount(void)
if (!thread)
return 0;
- err = init_mount("devtmpfs", "dev", "devtmpfs", MS_SILENT, NULL);
+ err = init_mount("devtmpfs", "dev", "devtmpfs", DEVTMPFS_MFLAGS, NULL);
if (err)
printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
else
@@ -412,7 +423,7 @@ static noinline int __init devtmpfs_setup(void *p)
err = ksys_unshare(CLONE_NEWNS);
if (err)
goto out;
- err = init_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, NULL);
+ err = init_mount("devtmpfs", "/", "devtmpfs", DEVTMPFS_MFLAGS, NULL);
if (err)
goto out;
init_chdir("/.."); /* will traverse into overmounted root */
@@ -471,6 +482,7 @@ int __init devtmpfs_init(void)
if (err) {
printk(KERN_ERR "devtmpfs: unable to create devtmpfs %i\n", err);
unregister_filesystem(&dev_fs_type);
+ thread = NULL;
return err;
}
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index 8c0d33e182fd..676b6275d5b5 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -31,6 +31,81 @@ static struct device *next_device(struct klist_iter *i)
}
/**
+ * driver_set_override() - Helper to set or clear driver override.
+ * @dev: Device to change
+ * @override: Address of string to change (e.g. &device->driver_override);
+ * The contents will be freed and hold newly allocated override.
+ * @s: NUL-terminated string, new driver name to force a match, pass empty
+ * string to clear it ("" or "\n", where the latter is only for sysfs
+ * interface).
+ * @len: length of @s
+ *
+ * Helper to set or clear driver override in a device, intended for the cases
+ * when the driver_override field is allocated by driver/bus code.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int driver_set_override(struct device *dev, const char **override,
+ const char *s, size_t len)
+{
+ const char *new, *old;
+ char *cp;
+
+ if (!override || !s)
+ return -EINVAL;
+
+ /*
+ * The stored value will be used in sysfs show callback (sysfs_emit()),
+ * which has a length limit of PAGE_SIZE and adds a trailing newline.
+ * Thus we can store one character less to avoid truncation during sysfs
+ * show.
+ */
+ if (len >= (PAGE_SIZE - 1))
+ return -EINVAL;
+
+ /*
+ * Compute the real length of the string in case userspace sends us a
+ * bunch of \0 characters like python likes to do.
+ */
+ len = strlen(s);
+
+ if (!len) {
+ /* Empty string passed - clear override */
+ device_lock(dev);
+ old = *override;
+ *override = NULL;
+ device_unlock(dev);
+ kfree(old);
+
+ return 0;
+ }
+
+ cp = strnchr(s, len, '\n');
+ if (cp)
+ len = cp - s;
+
+ new = kstrndup(s, len, GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+
+ device_lock(dev);
+ old = *override;
+ if (cp != s) {
+ *override = new;
+ } else {
+ /* "\n" passed - clear override */
+ kfree(new);
+ *override = NULL;
+ }
+ device_unlock(dev);
+
+ kfree(old);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(driver_set_override);
+
+/**
* driver_for_each_device - Iterator for devices bound to a driver.
* @drv: Driver we're iterating.
* @start: Device to begin with
@@ -177,6 +252,7 @@ int driver_register(struct device_driver *drv)
return ret;
}
kobject_uevent(&drv->p->kobj, KOBJ_ADD);
+ deferred_probe_extend_timeout();
return ret;
}
diff --git a/drivers/base/firmware_loader/Kconfig b/drivers/base/firmware_loader/Kconfig
index 5b24f3959255..5166b323a0f8 100644
--- a/drivers/base/firmware_loader/Kconfig
+++ b/drivers/base/firmware_loader/Kconfig
@@ -29,6 +29,9 @@ if FW_LOADER
config FW_LOADER_PAGED_BUF
bool
+config FW_LOADER_SYSFS
+ bool
+
config EXTRA_FIRMWARE
string "Build named firmware blobs into the kernel binary"
help
@@ -60,6 +63,8 @@ config EXTRA_FIRMWARE
image since it combines both GPL and non-GPL work. You should
consult a lawyer of your own before distributing such an image.
+ NOTE: Compressed files are not supported in EXTRA_FIRMWARE.
+
config EXTRA_FIRMWARE_DIR
string "Firmware blobs root directory"
depends on EXTRA_FIRMWARE != ""
@@ -70,6 +75,7 @@ config EXTRA_FIRMWARE_DIR
config FW_LOADER_USER_HELPER
bool "Enable the firmware sysfs fallback mechanism"
+ select FW_LOADER_SYSFS
select FW_LOADER_PAGED_BUF
help
This option enables a sysfs loading facility to enable firmware
@@ -157,17 +163,33 @@ config FW_LOADER_USER_HELPER_FALLBACK
config FW_LOADER_COMPRESS
bool "Enable compressed firmware support"
- select FW_LOADER_PAGED_BUF
- select XZ_DEC
help
This option enables the support for loading compressed firmware
files. The caller of firmware API receives the decompressed file
content. The compressed file is loaded as a fallback, only after
loading the raw file failed at first.
- Currently only XZ-compressed files are supported, and they have to
- be compressed with either none or crc32 integrity check type (pass
- "-C crc32" option to xz command).
+ Compressed firmware support does not apply to firmware images
+ that are built into the kernel image (CONFIG_EXTRA_FIRMWARE).
+
+if FW_LOADER_COMPRESS
+config FW_LOADER_COMPRESS_XZ
+ bool "Enable XZ-compressed firmware support"
+ select FW_LOADER_PAGED_BUF
+ select XZ_DEC
+ default y
+ help
+ This option adds the support for XZ-compressed files.
+ The files have to be compressed with either none or crc32
+ integrity check type (pass "-C crc32" option to xz command).
+
+config FW_LOADER_COMPRESS_ZSTD
+ bool "Enable ZSTD-compressed firmware support"
+ select ZSTD_DECOMPRESS
+ help
+ This option adds the support for ZSTD-compressed files.
+
+endif # FW_LOADER_COMPRESS
config FW_CACHE
bool "Enable firmware caching during suspend"
@@ -181,5 +203,19 @@ config FW_CACHE
If unsure, say Y.
+config FW_UPLOAD
+ bool "Enable users to initiate firmware updates using sysfs"
+ select FW_LOADER_SYSFS
+ select FW_LOADER_PAGED_BUF
+ help
+ Enabling this option will allow device drivers to expose a persistent
+ sysfs interface that allows firmware updates to be initiated from
+ userspace. For example, FPGA based PCIe cards load firmware and FPGA
+ images from local FLASH when the card boots. The images in FLASH may
+ be updated with new images provided by the user. Enable this device
+ to support cards that rely on user-initiated updates for firmware files.
+
+ If unsure, say N.
+
endif # FW_LOADER
endmenu
diff --git a/drivers/base/firmware_loader/Makefile b/drivers/base/firmware_loader/Makefile
index e87843408fe6..60d19f9e0ddc 100644
--- a/drivers/base/firmware_loader/Makefile
+++ b/drivers/base/firmware_loader/Makefile
@@ -6,5 +6,7 @@ obj-$(CONFIG_FW_LOADER) += firmware_class.o
firmware_class-objs := main.o
firmware_class-$(CONFIG_FW_LOADER_USER_HELPER) += fallback.o
firmware_class-$(CONFIG_EFI_EMBEDDED_FIRMWARE) += fallback_platform.o
+firmware_class-$(CONFIG_FW_LOADER_SYSFS) += sysfs.o
+firmware_class-$(CONFIG_FW_UPLOAD) += sysfs_upload.o
obj-y += builtin/
diff --git a/drivers/base/firmware_loader/builtin/Makefile b/drivers/base/firmware_loader/builtin/Makefile
index eb4be452062a..6c067dedc01e 100644
--- a/drivers/base/firmware_loader/builtin/Makefile
+++ b/drivers/base/firmware_loader/builtin/Makefile
@@ -3,10 +3,10 @@ obj-y += main.o
# Create $(fwdir) from $(CONFIG_EXTRA_FIRMWARE_DIR) -- if it doesn't have a
# leading /, it's relative to $(srctree).
-fwdir := $(subst $(quote),,$(CONFIG_EXTRA_FIRMWARE_DIR))
+fwdir := $(CONFIG_EXTRA_FIRMWARE_DIR)
fwdir := $(addprefix $(srctree)/,$(filter-out /%,$(fwdir)))$(filter /%,$(fwdir))
-firmware := $(addsuffix .gen.o, $(subst $(quote),,$(CONFIG_EXTRA_FIRMWARE)))
+firmware := $(addsuffix .gen.o, $(CONFIG_EXTRA_FIRMWARE))
obj-y += $(firmware)
FWNAME = $(patsubst $(obj)/%.gen.S,%,$@)
diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c
index d7d63c1aa993..bf68e3947814 100644
--- a/drivers/base/firmware_loader/fallback.c
+++ b/drivers/base/firmware_loader/fallback.c
@@ -3,12 +3,9 @@
#include <linux/types.h>
#include <linux/kconfig.h>
#include <linux/list.h>
-#include <linux/slab.h>
#include <linux/security.h>
-#include <linux/highmem.h>
#include <linux/umh.h>
#include <linux/sysctl.h>
-#include <linux/vmalloc.h>
#include <linux/module.h>
#include "fallback.h"
@@ -18,22 +15,6 @@
* firmware fallback mechanism
*/
-MODULE_IMPORT_NS(FIRMWARE_LOADER_PRIVATE);
-
-extern struct firmware_fallback_config fw_fallback_config;
-
-/* These getters are vetted to use int properly */
-static inline int __firmware_loading_timeout(void)
-{
- return fw_fallback_config.loading_timeout;
-}
-
-/* These setters are vetted to use int properly */
-static void __fw_fallback_set_timeout(int timeout)
-{
- fw_fallback_config.loading_timeout = timeout;
-}
-
/*
* use small loading timeout for caching devices' firmware because all these
* firmware images have been loaded successfully at lease once, also system is
@@ -58,52 +39,11 @@ static long firmware_loading_timeout(void)
__firmware_loading_timeout() * HZ : MAX_JIFFY_OFFSET;
}
-static inline bool fw_sysfs_done(struct fw_priv *fw_priv)
-{
- return __fw_state_check(fw_priv, FW_STATUS_DONE);
-}
-
-static inline bool fw_sysfs_loading(struct fw_priv *fw_priv)
-{
- return __fw_state_check(fw_priv, FW_STATUS_LOADING);
-}
-
static inline int fw_sysfs_wait_timeout(struct fw_priv *fw_priv, long timeout)
{
return __fw_state_wait_common(fw_priv, timeout);
}
-struct fw_sysfs {
- bool nowait;
- struct device dev;
- struct fw_priv *fw_priv;
- struct firmware *fw;
-};
-
-static struct fw_sysfs *to_fw_sysfs(struct device *dev)
-{
- return container_of(dev, struct fw_sysfs, dev);
-}
-
-static void __fw_load_abort(struct fw_priv *fw_priv)
-{
- /*
- * There is a small window in which user can write to 'loading'
- * between loading done/aborted and disappearance of 'loading'
- */
- if (fw_state_is_aborted(fw_priv) || fw_sysfs_done(fw_priv))
- return;
-
- fw_state_aborted(fw_priv);
-}
-
-static void fw_load_abort(struct fw_sysfs *fw_sysfs)
-{
- struct fw_priv *fw_priv = fw_sysfs->fw_priv;
-
- __fw_load_abort(fw_priv);
-}
-
static LIST_HEAD(pending_fw_head);
void kill_pending_fw_fallback_reqs(bool only_kill_custom)
@@ -120,371 +60,6 @@ void kill_pending_fw_fallback_reqs(bool only_kill_custom)
mutex_unlock(&fw_lock);
}
-static ssize_t timeout_show(struct class *class, struct class_attribute *attr,
- char *buf)
-{
- return sysfs_emit(buf, "%d\n", __firmware_loading_timeout());
-}
-
-/**
- * timeout_store() - set number of seconds to wait for firmware
- * @class: device class pointer
- * @attr: device attribute pointer
- * @buf: buffer to scan for timeout value
- * @count: number of bytes in @buf
- *
- * Sets the number of seconds to wait for the firmware. Once
- * this expires an error will be returned to the driver and no
- * firmware will be provided.
- *
- * Note: zero means 'wait forever'.
- **/
-static ssize_t timeout_store(struct class *class, struct class_attribute *attr,
- const char *buf, size_t count)
-{
- int tmp_loading_timeout = simple_strtol(buf, NULL, 10);
-
- if (tmp_loading_timeout < 0)
- tmp_loading_timeout = 0;
-
- __fw_fallback_set_timeout(tmp_loading_timeout);
-
- return count;
-}
-static CLASS_ATTR_RW(timeout);
-
-static struct attribute *firmware_class_attrs[] = {
- &class_attr_timeout.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(firmware_class);
-
-static void fw_dev_release(struct device *dev)
-{
- struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
-
- kfree(fw_sysfs);
-}
-
-static int do_firmware_uevent(struct fw_sysfs *fw_sysfs, struct kobj_uevent_env *env)
-{
- if (add_uevent_var(env, "FIRMWARE=%s", fw_sysfs->fw_priv->fw_name))
- return -ENOMEM;
- if (add_uevent_var(env, "TIMEOUT=%i", __firmware_loading_timeout()))
- return -ENOMEM;
- if (add_uevent_var(env, "ASYNC=%d", fw_sysfs->nowait))
- return -ENOMEM;
-
- return 0;
-}
-
-static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
-{
- struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
- int err = 0;
-
- mutex_lock(&fw_lock);
- if (fw_sysfs->fw_priv)
- err = do_firmware_uevent(fw_sysfs, env);
- mutex_unlock(&fw_lock);
- return err;
-}
-
-static struct class firmware_class = {
- .name = "firmware",
- .class_groups = firmware_class_groups,
- .dev_uevent = firmware_uevent,
- .dev_release = fw_dev_release,
-};
-
-int register_sysfs_loader(void)
-{
- return class_register(&firmware_class);
-}
-
-void unregister_sysfs_loader(void)
-{
- class_unregister(&firmware_class);
-}
-
-static ssize_t firmware_loading_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
- int loading = 0;
-
- mutex_lock(&fw_lock);
- if (fw_sysfs->fw_priv)
- loading = fw_sysfs_loading(fw_sysfs->fw_priv);
- mutex_unlock(&fw_lock);
-
- return sysfs_emit(buf, "%d\n", loading);
-}
-
-/**
- * firmware_loading_store() - set value in the 'loading' control file
- * @dev: device pointer
- * @attr: device attribute pointer
- * @buf: buffer to scan for loading control value
- * @count: number of bytes in @buf
- *
- * The relevant values are:
- *
- * 1: Start a load, discarding any previous partial load.
- * 0: Conclude the load and hand the data to the driver code.
- * -1: Conclude the load with an error and discard any written data.
- **/
-static ssize_t firmware_loading_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
- struct fw_priv *fw_priv;
- ssize_t written = count;
- int loading = simple_strtol(buf, NULL, 10);
-
- mutex_lock(&fw_lock);
- fw_priv = fw_sysfs->fw_priv;
- if (fw_state_is_aborted(fw_priv))
- goto out;
-
- switch (loading) {
- case 1:
- /* discarding any previous partial load */
- if (!fw_sysfs_done(fw_priv)) {
- fw_free_paged_buf(fw_priv);
- fw_state_start(fw_priv);
- }
- break;
- case 0:
- if (fw_sysfs_loading(fw_priv)) {
- int rc;
-
- /*
- * Several loading requests may be pending on
- * one same firmware buf, so let all requests
- * see the mapped 'buf->data' once the loading
- * is completed.
- * */
- rc = fw_map_paged_buf(fw_priv);
- if (rc)
- dev_err(dev, "%s: map pages failed\n",
- __func__);
- else
- rc = security_kernel_post_load_data(fw_priv->data,
- fw_priv->size,
- LOADING_FIRMWARE, "blob");
-
- /*
- * Same logic as fw_load_abort, only the DONE bit
- * is ignored and we set ABORT only on failure.
- */
- if (rc) {
- fw_state_aborted(fw_priv);
- written = rc;
- } else {
- fw_state_done(fw_priv);
- }
- break;
- }
- fallthrough;
- default:
- dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading);
- fallthrough;
- case -1:
- fw_load_abort(fw_sysfs);
- break;
- }
-out:
- mutex_unlock(&fw_lock);
- return written;
-}
-
-static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
-
-static void firmware_rw_data(struct fw_priv *fw_priv, char *buffer,
- loff_t offset, size_t count, bool read)
-{
- if (read)
- memcpy(buffer, fw_priv->data + offset, count);
- else
- memcpy(fw_priv->data + offset, buffer, count);
-}
-
-static void firmware_rw(struct fw_priv *fw_priv, char *buffer,
- loff_t offset, size_t count, bool read)
-{
- while (count) {
- void *page_data;
- int page_nr = offset >> PAGE_SHIFT;
- int page_ofs = offset & (PAGE_SIZE-1);
- int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
-
- page_data = kmap(fw_priv->pages[page_nr]);
-
- if (read)
- memcpy(buffer, page_data + page_ofs, page_cnt);
- else
- memcpy(page_data + page_ofs, buffer, page_cnt);
-
- kunmap(fw_priv->pages[page_nr]);
- buffer += page_cnt;
- offset += page_cnt;
- count -= page_cnt;
- }
-}
-
-static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buffer, loff_t offset, size_t count)
-{
- struct device *dev = kobj_to_dev(kobj);
- struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
- struct fw_priv *fw_priv;
- ssize_t ret_count;
-
- mutex_lock(&fw_lock);
- fw_priv = fw_sysfs->fw_priv;
- if (!fw_priv || fw_sysfs_done(fw_priv)) {
- ret_count = -ENODEV;
- goto out;
- }
- if (offset > fw_priv->size) {
- ret_count = 0;
- goto out;
- }
- if (count > fw_priv->size - offset)
- count = fw_priv->size - offset;
-
- ret_count = count;
-
- if (fw_priv->data)
- firmware_rw_data(fw_priv, buffer, offset, count, true);
- else
- firmware_rw(fw_priv, buffer, offset, count, true);
-
-out:
- mutex_unlock(&fw_lock);
- return ret_count;
-}
-
-static int fw_realloc_pages(struct fw_sysfs *fw_sysfs, int min_size)
-{
- int err;
-
- err = fw_grow_paged_buf(fw_sysfs->fw_priv,
- PAGE_ALIGN(min_size) >> PAGE_SHIFT);
- if (err)
- fw_load_abort(fw_sysfs);
- return err;
-}
-
-/**
- * firmware_data_write() - write method for firmware
- * @filp: open sysfs file
- * @kobj: kobject for the device
- * @bin_attr: bin_attr structure
- * @buffer: buffer being written
- * @offset: buffer offset for write in total data store area
- * @count: buffer size
- *
- * Data written to the 'data' attribute will be later handed to
- * the driver as a firmware image.
- **/
-static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buffer, loff_t offset, size_t count)
-{
- struct device *dev = kobj_to_dev(kobj);
- struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
- struct fw_priv *fw_priv;
- ssize_t retval;
-
- if (!capable(CAP_SYS_RAWIO))
- return -EPERM;
-
- mutex_lock(&fw_lock);
- fw_priv = fw_sysfs->fw_priv;
- if (!fw_priv || fw_sysfs_done(fw_priv)) {
- retval = -ENODEV;
- goto out;
- }
-
- if (fw_priv->data) {
- if (offset + count > fw_priv->allocated_size) {
- retval = -ENOMEM;
- goto out;
- }
- firmware_rw_data(fw_priv, buffer, offset, count, false);
- retval = count;
- } else {
- retval = fw_realloc_pages(fw_sysfs, offset + count);
- if (retval)
- goto out;
-
- retval = count;
- firmware_rw(fw_priv, buffer, offset, count, false);
- }
-
- fw_priv->size = max_t(size_t, offset + count, fw_priv->size);
-out:
- mutex_unlock(&fw_lock);
- return retval;
-}
-
-static struct bin_attribute firmware_attr_data = {
- .attr = { .name = "data", .mode = 0644 },
- .size = 0,
- .read = firmware_data_read,
- .write = firmware_data_write,
-};
-
-static struct attribute *fw_dev_attrs[] = {
- &dev_attr_loading.attr,
- NULL
-};
-
-static struct bin_attribute *fw_dev_bin_attrs[] = {
- &firmware_attr_data,
- NULL
-};
-
-static const struct attribute_group fw_dev_attr_group = {
- .attrs = fw_dev_attrs,
- .bin_attrs = fw_dev_bin_attrs,
-};
-
-static const struct attribute_group *fw_dev_attr_groups[] = {
- &fw_dev_attr_group,
- NULL
-};
-
-static struct fw_sysfs *
-fw_create_instance(struct firmware *firmware, const char *fw_name,
- struct device *device, u32 opt_flags)
-{
- struct fw_sysfs *fw_sysfs;
- struct device *f_dev;
-
- fw_sysfs = kzalloc(sizeof(*fw_sysfs), GFP_KERNEL);
- if (!fw_sysfs) {
- fw_sysfs = ERR_PTR(-ENOMEM);
- goto exit;
- }
-
- fw_sysfs->nowait = !!(opt_flags & FW_OPT_NOWAIT);
- fw_sysfs->fw = firmware;
- f_dev = &fw_sysfs->dev;
-
- device_initialize(f_dev);
- dev_set_name(f_dev, "%s", fw_name);
- f_dev->parent = device;
- f_dev->class = &firmware_class;
- f_dev->groups = fw_dev_attr_groups;
-exit:
- return fw_sysfs;
-}
-
/**
* fw_load_sysfs_fallback() - load a firmware via the sysfs fallback mechanism
* @fw_sysfs: firmware sysfs information for the firmware to load
diff --git a/drivers/base/firmware_loader/fallback.h b/drivers/base/firmware_loader/fallback.h
index 3af7205b302f..144148595660 100644
--- a/drivers/base/firmware_loader/fallback.h
+++ b/drivers/base/firmware_loader/fallback.h
@@ -6,29 +6,7 @@
#include <linux/device.h>
#include "firmware.h"
-
-/**
- * struct firmware_fallback_config - firmware fallback configuration settings
- *
- * Helps describe and fine tune the fallback mechanism.
- *
- * @force_sysfs_fallback: force the sysfs fallback mechanism to be used
- * as if one had enabled CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y.
- * Useful to help debug a CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
- * functionality on a kernel where that config entry has been disabled.
- * @ignore_sysfs_fallback: force to disable the sysfs fallback mechanism.
- * This emulates the behaviour as if we had set the kernel
- * config CONFIG_FW_LOADER_USER_HELPER=n.
- * @old_timeout: for internal use
- * @loading_timeout: the timeout to wait for the fallback mechanism before
- * giving up, in seconds.
- */
-struct firmware_fallback_config {
- unsigned int force_sysfs_fallback;
- unsigned int ignore_sysfs_fallback;
- int old_timeout;
- int loading_timeout;
-};
+#include "sysfs.h"
#ifdef CONFIG_FW_LOADER_USER_HELPER
int firmware_fallback_sysfs(struct firmware *fw, const char *name,
@@ -40,8 +18,6 @@ void kill_pending_fw_fallback_reqs(bool only_kill_custom);
void fw_fallback_set_cache_timeout(void);
void fw_fallback_set_default_timeout(void);
-int register_sysfs_loader(void);
-void unregister_sysfs_loader(void);
#else /* CONFIG_FW_LOADER_USER_HELPER */
static inline int firmware_fallback_sysfs(struct firmware *fw, const char *name,
struct device *device,
@@ -55,15 +31,6 @@ static inline int firmware_fallback_sysfs(struct firmware *fw, const char *name,
static inline void kill_pending_fw_fallback_reqs(bool only_kill_custom) { }
static inline void fw_fallback_set_cache_timeout(void) { }
static inline void fw_fallback_set_default_timeout(void) { }
-
-static inline int register_sysfs_loader(void)
-{
- return 0;
-}
-
-static inline void unregister_sysfs_loader(void)
-{
-}
#endif /* CONFIG_FW_LOADER_USER_HELPER */
#ifdef CONFIG_EFI_EMBEDDED_FIRMWARE
diff --git a/drivers/base/firmware_loader/fallback_table.c b/drivers/base/firmware_loader/fallback_table.c
index 46a731dede6f..e5ac098d0742 100644
--- a/drivers/base/firmware_loader/fallback_table.c
+++ b/drivers/base/firmware_loader/fallback_table.c
@@ -4,6 +4,7 @@
#include <linux/kconfig.h>
#include <linux/list.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <linux/security.h>
#include <linux/highmem.h>
#include <linux/umh.h>
@@ -24,7 +25,7 @@ struct firmware_fallback_config fw_fallback_config = {
EXPORT_SYMBOL_NS_GPL(fw_fallback_config, FIRMWARE_LOADER_PRIVATE);
#ifdef CONFIG_SYSCTL
-struct ctl_table firmware_config_table[] = {
+static struct ctl_table firmware_config_table[] = {
{
.procname = "force_sysfs_fallback",
.data = &fw_fallback_config.force_sysfs_fallback,
@@ -45,4 +46,24 @@ struct ctl_table firmware_config_table[] = {
},
{ }
};
-#endif
+
+static struct ctl_table_header *firmware_config_sysct_table_header;
+int register_firmware_config_sysctl(void)
+{
+ firmware_config_sysct_table_header =
+ register_sysctl("kernel/firmware_config",
+ firmware_config_table);
+ if (!firmware_config_sysct_table_header)
+ return -ENOMEM;
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(register_firmware_config_sysctl, FIRMWARE_LOADER_PRIVATE);
+
+void unregister_firmware_config_sysctl(void)
+{
+ unregister_sysctl_table(firmware_config_sysct_table_header);
+ firmware_config_sysct_table_header = NULL;
+}
+EXPORT_SYMBOL_NS_GPL(unregister_firmware_config_sysctl, FIRMWARE_LOADER_PRIVATE);
+
+#endif /* CONFIG_SYSCTL */
diff --git a/drivers/base/firmware_loader/firmware.h b/drivers/base/firmware_loader/firmware.h
index 2889f446ad41..fe77e91c38a2 100644
--- a/drivers/base/firmware_loader/firmware.h
+++ b/drivers/base/firmware_loader/firmware.h
@@ -87,6 +87,7 @@ struct fw_priv {
};
extern struct mutex fw_lock;
+extern struct firmware_cache fw_cache;
static inline bool __fw_state_check(struct fw_priv *fw_priv,
enum fw_status status)
@@ -149,7 +150,22 @@ static inline void fw_state_done(struct fw_priv *fw_priv)
__fw_state_set(fw_priv, FW_STATUS_DONE);
}
+static inline bool fw_state_is_done(struct fw_priv *fw_priv)
+{
+ return __fw_state_check(fw_priv, FW_STATUS_DONE);
+}
+
+static inline bool fw_state_is_loading(struct fw_priv *fw_priv)
+{
+ return __fw_state_check(fw_priv, FW_STATUS_LOADING);
+}
+
+int alloc_lookup_fw_priv(const char *fw_name, struct firmware_cache *fwc,
+ struct fw_priv **fw_priv, void *dbuf, size_t size,
+ size_t offset, u32 opt_flags);
int assign_fw(struct firmware *fw, struct device *device);
+void free_fw_priv(struct fw_priv *fw_priv);
+void fw_state_init(struct fw_priv *fw_priv);
#ifdef CONFIG_FW_LOADER
bool firmware_is_builtin(const struct firmware *fw);
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index 94d1789a233e..7c3590fd97c2 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -35,6 +35,7 @@
#include <linux/syscore_ops.h>
#include <linux/reboot.h>
#include <linux/security.h>
+#include <linux/zstd.h>
#include <linux/xz.h>
#include <generated/utsrelease.h>
@@ -91,9 +92,9 @@ static inline struct fw_priv *to_fw_priv(struct kref *ref)
* guarding for corner cases a global lock should be OK */
DEFINE_MUTEX(fw_lock);
-static struct firmware_cache fw_cache;
+struct firmware_cache fw_cache;
-static void fw_state_init(struct fw_priv *fw_priv)
+void fw_state_init(struct fw_priv *fw_priv)
{
struct fw_state *fw_st = &fw_priv->fw_st;
@@ -163,13 +164,9 @@ static struct fw_priv *__lookup_fw_priv(const char *fw_name)
}
/* Returns 1 for batching firmware requests with the same name */
-static int alloc_lookup_fw_priv(const char *fw_name,
- struct firmware_cache *fwc,
- struct fw_priv **fw_priv,
- void *dbuf,
- size_t size,
- size_t offset,
- u32 opt_flags)
+int alloc_lookup_fw_priv(const char *fw_name, struct firmware_cache *fwc,
+ struct fw_priv **fw_priv, void *dbuf, size_t size,
+ size_t offset, u32 opt_flags)
{
struct fw_priv *tmp;
@@ -224,7 +221,7 @@ static void __free_fw_priv(struct kref *ref)
kfree(fw_priv);
}
-static void free_fw_priv(struct fw_priv *fw_priv)
+void free_fw_priv(struct fw_priv *fw_priv)
{
struct firmware_cache *fwc = fw_priv->fwc;
spin_lock(&fwc->lock);
@@ -253,6 +250,8 @@ void fw_free_paged_buf(struct fw_priv *fw_priv)
fw_priv->pages = NULL;
fw_priv->page_array_size = 0;
fw_priv->nr_pages = 0;
+ fw_priv->data = NULL;
+ fw_priv->size = 0;
}
int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed)
@@ -305,9 +304,73 @@ int fw_map_paged_buf(struct fw_priv *fw_priv)
#endif
/*
+ * ZSTD-compressed firmware support
+ */
+#ifdef CONFIG_FW_LOADER_COMPRESS_ZSTD
+static int fw_decompress_zstd(struct device *dev, struct fw_priv *fw_priv,
+ size_t in_size, const void *in_buffer)
+{
+ size_t len, out_size, workspace_size;
+ void *workspace, *out_buf;
+ zstd_dctx *ctx;
+ int err;
+
+ if (fw_priv->allocated_size) {
+ out_size = fw_priv->allocated_size;
+ out_buf = fw_priv->data;
+ } else {
+ zstd_frame_header params;
+
+ if (zstd_get_frame_header(&params, in_buffer, in_size) ||
+ params.frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN) {
+ dev_dbg(dev, "%s: invalid zstd header\n", __func__);
+ return -EINVAL;
+ }
+ out_size = params.frameContentSize;
+ out_buf = vzalloc(out_size);
+ if (!out_buf)
+ return -ENOMEM;
+ }
+
+ workspace_size = zstd_dctx_workspace_bound();
+ workspace = kvzalloc(workspace_size, GFP_KERNEL);
+ if (!workspace) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ ctx = zstd_init_dctx(workspace, workspace_size);
+ if (!ctx) {
+ dev_dbg(dev, "%s: failed to initialize context\n", __func__);
+ err = -EINVAL;
+ goto error;
+ }
+
+ len = zstd_decompress_dctx(ctx, out_buf, out_size, in_buffer, in_size);
+ if (zstd_is_error(len)) {
+ dev_dbg(dev, "%s: failed to decompress: %d\n", __func__,
+ zstd_get_error_code(len));
+ err = -EINVAL;
+ goto error;
+ }
+
+ if (!fw_priv->allocated_size)
+ fw_priv->data = out_buf;
+ fw_priv->size = len;
+ err = 0;
+
+ error:
+ kvfree(workspace);
+ if (err && !fw_priv->allocated_size)
+ vfree(out_buf);
+ return err;
+}
+#endif /* CONFIG_FW_LOADER_COMPRESS_ZSTD */
+
+/*
* XZ-compressed firmware support
*/
-#ifdef CONFIG_FW_LOADER_COMPRESS
+#ifdef CONFIG_FW_LOADER_COMPRESS_XZ
/* show an error and return the standard error code */
static int fw_decompress_xz_error(struct device *dev, enum xz_ret xz_ret)
{
@@ -372,11 +435,11 @@ static int fw_decompress_xz_pages(struct device *dev, struct fw_priv *fw_priv,
/* decompress onto the new allocated page */
page = fw_priv->pages[fw_priv->nr_pages - 1];
- xz_buf.out = kmap(page);
+ xz_buf.out = kmap_local_page(page);
xz_buf.out_pos = 0;
xz_buf.out_size = PAGE_SIZE;
xz_ret = xz_dec_run(xz_dec, &xz_buf);
- kunmap(page);
+ kunmap_local(xz_buf.out);
fw_priv->size += xz_buf.out_pos;
/* partial decompression means either end or error */
if (xz_buf.out_pos != PAGE_SIZE)
@@ -401,7 +464,7 @@ static int fw_decompress_xz(struct device *dev, struct fw_priv *fw_priv,
else
return fw_decompress_xz_pages(dev, fw_priv, in_size, in_buffer);
}
-#endif /* CONFIG_FW_LOADER_COMPRESS */
+#endif /* CONFIG_FW_LOADER_COMPRESS_XZ */
/* direct firmware loading support */
static char fw_path_para[256];
@@ -735,6 +798,8 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
size_t offset, u32 opt_flags)
{
struct firmware *fw = NULL;
+ struct cred *kern_cred = NULL;
+ const struct cred *old_cred;
bool nondirect = false;
int ret;
@@ -751,13 +816,30 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
if (ret <= 0) /* error or already assigned */
goto out;
+ /*
+ * We are about to try to access the firmware file. Because we may have been
+ * called by a driver when serving an unrelated request from userland, we use
+ * the kernel credentials to read the file.
+ */
+ kern_cred = prepare_kernel_cred(NULL);
+ if (!kern_cred) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ old_cred = override_creds(kern_cred);
+
ret = fw_get_filesystem_firmware(device, fw->priv, "", NULL);
/* Only full reads can support decompression, platform, and sysfs. */
if (!(opt_flags & FW_OPT_PARTIAL))
nondirect = true;
-#ifdef CONFIG_FW_LOADER_COMPRESS
+#ifdef CONFIG_FW_LOADER_COMPRESS_ZSTD
+ if (ret == -ENOENT && nondirect)
+ ret = fw_get_filesystem_firmware(device, fw->priv, ".zst",
+ fw_decompress_zstd);
+#endif
+#ifdef CONFIG_FW_LOADER_COMPRESS_XZ
if (ret == -ENOENT && nondirect)
ret = fw_get_filesystem_firmware(device, fw->priv, ".xz",
fw_decompress_xz);
@@ -776,6 +858,9 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
} else
ret = assign_fw(fw, device);
+ revert_creds(old_cred);
+ put_cred(kern_cred);
+
out:
if (ret < 0) {
fw_abort_batch_reqs(fw);
diff --git a/drivers/base/firmware_loader/sysfs.c b/drivers/base/firmware_loader/sysfs.c
new file mode 100644
index 000000000000..5b66b3d1fa16
--- /dev/null
+++ b/drivers/base/firmware_loader/sysfs.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/security.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "sysfs.h"
+
+/*
+ * sysfs support for firmware loader
+ */
+
+void __fw_load_abort(struct fw_priv *fw_priv)
+{
+ /*
+ * There is a small window in which user can write to 'loading'
+ * between loading done/aborted and disappearance of 'loading'
+ */
+ if (fw_state_is_aborted(fw_priv) || fw_state_is_done(fw_priv))
+ return;
+
+ fw_state_aborted(fw_priv);
+}
+
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+static ssize_t timeout_show(struct class *class, struct class_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%d\n", __firmware_loading_timeout());
+}
+
+/**
+ * timeout_store() - set number of seconds to wait for firmware
+ * @class: device class pointer
+ * @attr: device attribute pointer
+ * @buf: buffer to scan for timeout value
+ * @count: number of bytes in @buf
+ *
+ * Sets the number of seconds to wait for the firmware. Once
+ * this expires an error will be returned to the driver and no
+ * firmware will be provided.
+ *
+ * Note: zero means 'wait forever'.
+ **/
+static ssize_t timeout_store(struct class *class, struct class_attribute *attr,
+ const char *buf, size_t count)
+{
+ int tmp_loading_timeout = simple_strtol(buf, NULL, 10);
+
+ if (tmp_loading_timeout < 0)
+ tmp_loading_timeout = 0;
+
+ __fw_fallback_set_timeout(tmp_loading_timeout);
+
+ return count;
+}
+static CLASS_ATTR_RW(timeout);
+
+static struct attribute *firmware_class_attrs[] = {
+ &class_attr_timeout.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(firmware_class);
+
+static int do_firmware_uevent(struct fw_sysfs *fw_sysfs, struct kobj_uevent_env *env)
+{
+ if (add_uevent_var(env, "FIRMWARE=%s", fw_sysfs->fw_priv->fw_name))
+ return -ENOMEM;
+ if (add_uevent_var(env, "TIMEOUT=%i", __firmware_loading_timeout()))
+ return -ENOMEM;
+ if (add_uevent_var(env, "ASYNC=%d", fw_sysfs->nowait))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+ int err = 0;
+
+ mutex_lock(&fw_lock);
+ if (fw_sysfs->fw_priv)
+ err = do_firmware_uevent(fw_sysfs, env);
+ mutex_unlock(&fw_lock);
+ return err;
+}
+#endif /* CONFIG_FW_LOADER_USER_HELPER */
+
+static void fw_dev_release(struct device *dev)
+{
+ struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+
+ if (fw_sysfs->fw_upload_priv)
+ fw_upload_free(fw_sysfs);
+
+ kfree(fw_sysfs);
+}
+
+static struct class firmware_class = {
+ .name = "firmware",
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+ .class_groups = firmware_class_groups,
+ .dev_uevent = firmware_uevent,
+#endif
+ .dev_release = fw_dev_release,
+};
+
+int register_sysfs_loader(void)
+{
+ int ret = class_register(&firmware_class);
+
+ if (ret != 0)
+ return ret;
+ return register_firmware_config_sysctl();
+}
+
+void unregister_sysfs_loader(void)
+{
+ unregister_firmware_config_sysctl();
+ class_unregister(&firmware_class);
+}
+
+static ssize_t firmware_loading_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+ int loading = 0;
+
+ mutex_lock(&fw_lock);
+ if (fw_sysfs->fw_priv)
+ loading = fw_state_is_loading(fw_sysfs->fw_priv);
+ mutex_unlock(&fw_lock);
+
+ return sysfs_emit(buf, "%d\n", loading);
+}
+
+/**
+ * firmware_loading_store() - set value in the 'loading' control file
+ * @dev: device pointer
+ * @attr: device attribute pointer
+ * @buf: buffer to scan for loading control value
+ * @count: number of bytes in @buf
+ *
+ * The relevant values are:
+ *
+ * 1: Start a load, discarding any previous partial load.
+ * 0: Conclude the load and hand the data to the driver code.
+ * -1: Conclude the load with an error and discard any written data.
+ **/
+static ssize_t firmware_loading_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+ struct fw_priv *fw_priv;
+ ssize_t written = count;
+ int loading = simple_strtol(buf, NULL, 10);
+
+ mutex_lock(&fw_lock);
+ fw_priv = fw_sysfs->fw_priv;
+ if (fw_state_is_aborted(fw_priv) || fw_state_is_done(fw_priv))
+ goto out;
+
+ switch (loading) {
+ case 1:
+ /* discarding any previous partial load */
+ fw_free_paged_buf(fw_priv);
+ fw_state_start(fw_priv);
+ break;
+ case 0:
+ if (fw_state_is_loading(fw_priv)) {
+ int rc;
+
+ /*
+ * Several loading requests may be pending on
+ * one same firmware buf, so let all requests
+ * see the mapped 'buf->data' once the loading
+ * is completed.
+ */
+ rc = fw_map_paged_buf(fw_priv);
+ if (rc)
+ dev_err(dev, "%s: map pages failed\n",
+ __func__);
+ else
+ rc = security_kernel_post_load_data(fw_priv->data,
+ fw_priv->size,
+ LOADING_FIRMWARE,
+ "blob");
+
+ /*
+ * Same logic as fw_load_abort, only the DONE bit
+ * is ignored and we set ABORT only on failure.
+ */
+ if (rc) {
+ fw_state_aborted(fw_priv);
+ written = rc;
+ } else {
+ fw_state_done(fw_priv);
+
+ /*
+ * If this is a user-initiated firmware upload
+ * then start the upload in a worker thread now.
+ */
+ rc = fw_upload_start(fw_sysfs);
+ if (rc)
+ written = rc;
+ }
+ break;
+ }
+ fallthrough;
+ default:
+ dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading);
+ fallthrough;
+ case -1:
+ fw_load_abort(fw_sysfs);
+ if (fw_sysfs->fw_upload_priv)
+ fw_state_init(fw_sysfs->fw_priv);
+
+ break;
+ }
+out:
+ mutex_unlock(&fw_lock);
+ return written;
+}
+
+DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
+
+static void firmware_rw_data(struct fw_priv *fw_priv, char *buffer,
+ loff_t offset, size_t count, bool read)
+{
+ if (read)
+ memcpy(buffer, fw_priv->data + offset, count);
+ else
+ memcpy(fw_priv->data + offset, buffer, count);
+}
+
+static void firmware_rw(struct fw_priv *fw_priv, char *buffer,
+ loff_t offset, size_t count, bool read)
+{
+ while (count) {
+ int page_nr = offset >> PAGE_SHIFT;
+ int page_ofs = offset & (PAGE_SIZE - 1);
+ int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
+
+ if (read)
+ memcpy_from_page(buffer, fw_priv->pages[page_nr],
+ page_ofs, page_cnt);
+ else
+ memcpy_to_page(fw_priv->pages[page_nr], page_ofs,
+ buffer, page_cnt);
+
+ buffer += page_cnt;
+ offset += page_cnt;
+ count -= page_cnt;
+ }
+}
+
+static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t offset, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+ struct fw_priv *fw_priv;
+ ssize_t ret_count;
+
+ mutex_lock(&fw_lock);
+ fw_priv = fw_sysfs->fw_priv;
+ if (!fw_priv || fw_state_is_done(fw_priv)) {
+ ret_count = -ENODEV;
+ goto out;
+ }
+ if (offset > fw_priv->size) {
+ ret_count = 0;
+ goto out;
+ }
+ if (count > fw_priv->size - offset)
+ count = fw_priv->size - offset;
+
+ ret_count = count;
+
+ if (fw_priv->data)
+ firmware_rw_data(fw_priv, buffer, offset, count, true);
+ else
+ firmware_rw(fw_priv, buffer, offset, count, true);
+
+out:
+ mutex_unlock(&fw_lock);
+ return ret_count;
+}
+
+static int fw_realloc_pages(struct fw_sysfs *fw_sysfs, int min_size)
+{
+ int err;
+
+ err = fw_grow_paged_buf(fw_sysfs->fw_priv,
+ PAGE_ALIGN(min_size) >> PAGE_SHIFT);
+ if (err)
+ fw_load_abort(fw_sysfs);
+ return err;
+}
+
+/**
+ * firmware_data_write() - write method for firmware
+ * @filp: open sysfs file
+ * @kobj: kobject for the device
+ * @bin_attr: bin_attr structure
+ * @buffer: buffer being written
+ * @offset: buffer offset for write in total data store area
+ * @count: buffer size
+ *
+ * Data written to the 'data' attribute will be later handed to
+ * the driver as a firmware image.
+ **/
+static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t offset, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+ struct fw_priv *fw_priv;
+ ssize_t retval;
+
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ mutex_lock(&fw_lock);
+ fw_priv = fw_sysfs->fw_priv;
+ if (!fw_priv || fw_state_is_done(fw_priv)) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ if (fw_priv->data) {
+ if (offset + count > fw_priv->allocated_size) {
+ retval = -ENOMEM;
+ goto out;
+ }
+ firmware_rw_data(fw_priv, buffer, offset, count, false);
+ retval = count;
+ } else {
+ retval = fw_realloc_pages(fw_sysfs, offset + count);
+ if (retval)
+ goto out;
+
+ retval = count;
+ firmware_rw(fw_priv, buffer, offset, count, false);
+ }
+
+ fw_priv->size = max_t(size_t, offset + count, fw_priv->size);
+out:
+ mutex_unlock(&fw_lock);
+ return retval;
+}
+
+static struct bin_attribute firmware_attr_data = {
+ .attr = { .name = "data", .mode = 0644 },
+ .size = 0,
+ .read = firmware_data_read,
+ .write = firmware_data_write,
+};
+
+static struct attribute *fw_dev_attrs[] = {
+ &dev_attr_loading.attr,
+#ifdef CONFIG_FW_UPLOAD
+ &dev_attr_cancel.attr,
+ &dev_attr_status.attr,
+ &dev_attr_error.attr,
+ &dev_attr_remaining_size.attr,
+#endif
+ NULL
+};
+
+static struct bin_attribute *fw_dev_bin_attrs[] = {
+ &firmware_attr_data,
+ NULL
+};
+
+static const struct attribute_group fw_dev_attr_group = {
+ .attrs = fw_dev_attrs,
+ .bin_attrs = fw_dev_bin_attrs,
+#ifdef CONFIG_FW_UPLOAD
+ .is_visible = fw_upload_is_visible,
+#endif
+};
+
+static const struct attribute_group *fw_dev_attr_groups[] = {
+ &fw_dev_attr_group,
+ NULL
+};
+
+struct fw_sysfs *
+fw_create_instance(struct firmware *firmware, const char *fw_name,
+ struct device *device, u32 opt_flags)
+{
+ struct fw_sysfs *fw_sysfs;
+ struct device *f_dev;
+
+ fw_sysfs = kzalloc(sizeof(*fw_sysfs), GFP_KERNEL);
+ if (!fw_sysfs) {
+ fw_sysfs = ERR_PTR(-ENOMEM);
+ goto exit;
+ }
+
+ fw_sysfs->nowait = !!(opt_flags & FW_OPT_NOWAIT);
+ fw_sysfs->fw = firmware;
+ f_dev = &fw_sysfs->dev;
+
+ device_initialize(f_dev);
+ dev_set_name(f_dev, "%s", fw_name);
+ f_dev->parent = device;
+ f_dev->class = &firmware_class;
+ f_dev->groups = fw_dev_attr_groups;
+exit:
+ return fw_sysfs;
+}
diff --git a/drivers/base/firmware_loader/sysfs.h b/drivers/base/firmware_loader/sysfs.h
new file mode 100644
index 000000000000..df1d5add698f
--- /dev/null
+++ b/drivers/base/firmware_loader/sysfs.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __FIRMWARE_SYSFS_H
+#define __FIRMWARE_SYSFS_H
+
+#include <linux/device.h>
+
+#include "firmware.h"
+
+MODULE_IMPORT_NS(FIRMWARE_LOADER_PRIVATE);
+
+extern struct firmware_fallback_config fw_fallback_config;
+extern struct device_attribute dev_attr_loading;
+
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+/**
+ * struct firmware_fallback_config - firmware fallback configuration settings
+ *
+ * Helps describe and fine tune the fallback mechanism.
+ *
+ * @force_sysfs_fallback: force the sysfs fallback mechanism to be used
+ * as if one had enabled CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y.
+ * Useful to help debug a CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+ * functionality on a kernel where that config entry has been disabled.
+ * @ignore_sysfs_fallback: force to disable the sysfs fallback mechanism.
+ * This emulates the behaviour as if we had set the kernel
+ * config CONFIG_FW_LOADER_USER_HELPER=n.
+ * @old_timeout: for internal use
+ * @loading_timeout: the timeout to wait for the fallback mechanism before
+ * giving up, in seconds.
+ */
+struct firmware_fallback_config {
+ unsigned int force_sysfs_fallback;
+ unsigned int ignore_sysfs_fallback;
+ int old_timeout;
+ int loading_timeout;
+};
+
+/* These getters are vetted to use int properly */
+static inline int __firmware_loading_timeout(void)
+{
+ return fw_fallback_config.loading_timeout;
+}
+
+/* These setters are vetted to use int properly */
+static inline void __fw_fallback_set_timeout(int timeout)
+{
+ fw_fallback_config.loading_timeout = timeout;
+}
+#endif
+
+#ifdef CONFIG_FW_LOADER_SYSFS
+int register_sysfs_loader(void);
+void unregister_sysfs_loader(void);
+#if defined(CONFIG_FW_LOADER_USER_HELPER) && defined(CONFIG_SYSCTL)
+int register_firmware_config_sysctl(void);
+void unregister_firmware_config_sysctl(void);
+#else
+static inline int register_firmware_config_sysctl(void)
+{
+ return 0;
+}
+
+static inline void unregister_firmware_config_sysctl(void) { }
+#endif /* CONFIG_FW_LOADER_USER_HELPER && CONFIG_SYSCTL */
+#else /* CONFIG_FW_LOADER_SYSFS */
+static inline int register_sysfs_loader(void)
+{
+ return 0;
+}
+
+static inline void unregister_sysfs_loader(void)
+{
+}
+#endif /* CONFIG_FW_LOADER_SYSFS */
+
+struct fw_sysfs {
+ bool nowait;
+ struct device dev;
+ struct fw_priv *fw_priv;
+ struct firmware *fw;
+ void *fw_upload_priv;
+};
+
+static inline struct fw_sysfs *to_fw_sysfs(struct device *dev)
+{
+ return container_of(dev, struct fw_sysfs, dev);
+}
+
+void __fw_load_abort(struct fw_priv *fw_priv);
+
+static inline void fw_load_abort(struct fw_sysfs *fw_sysfs)
+{
+ struct fw_priv *fw_priv = fw_sysfs->fw_priv;
+
+ __fw_load_abort(fw_priv);
+}
+
+struct fw_sysfs *
+fw_create_instance(struct firmware *firmware, const char *fw_name,
+ struct device *device, u32 opt_flags);
+
+#ifdef CONFIG_FW_UPLOAD
+extern struct device_attribute dev_attr_status;
+extern struct device_attribute dev_attr_error;
+extern struct device_attribute dev_attr_cancel;
+extern struct device_attribute dev_attr_remaining_size;
+
+int fw_upload_start(struct fw_sysfs *fw_sysfs);
+void fw_upload_free(struct fw_sysfs *fw_sysfs);
+umode_t fw_upload_is_visible(struct kobject *kobj, struct attribute *attr, int n);
+#else
+static inline int fw_upload_start(struct fw_sysfs *fw_sysfs)
+{
+ return 0;
+}
+
+static inline void fw_upload_free(struct fw_sysfs *fw_sysfs)
+{
+}
+#endif
+
+#endif /* __FIRMWARE_SYSFS_H */
diff --git a/drivers/base/firmware_loader/sysfs_upload.c b/drivers/base/firmware_loader/sysfs_upload.c
new file mode 100644
index 000000000000..a0af8f5f13d8
--- /dev/null
+++ b/drivers/base/firmware_loader/sysfs_upload.c
@@ -0,0 +1,407 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "sysfs_upload.h"
+
+/*
+ * Support for user-space to initiate a firmware upload to a device.
+ */
+
+static const char * const fw_upload_prog_str[] = {
+ [FW_UPLOAD_PROG_IDLE] = "idle",
+ [FW_UPLOAD_PROG_RECEIVING] = "receiving",
+ [FW_UPLOAD_PROG_PREPARING] = "preparing",
+ [FW_UPLOAD_PROG_TRANSFERRING] = "transferring",
+ [FW_UPLOAD_PROG_PROGRAMMING] = "programming"
+};
+
+static const char * const fw_upload_err_str[] = {
+ [FW_UPLOAD_ERR_NONE] = "none",
+ [FW_UPLOAD_ERR_HW_ERROR] = "hw-error",
+ [FW_UPLOAD_ERR_TIMEOUT] = "timeout",
+ [FW_UPLOAD_ERR_CANCELED] = "user-abort",
+ [FW_UPLOAD_ERR_BUSY] = "device-busy",
+ [FW_UPLOAD_ERR_INVALID_SIZE] = "invalid-file-size",
+ [FW_UPLOAD_ERR_RW_ERROR] = "read-write-error",
+ [FW_UPLOAD_ERR_WEAROUT] = "flash-wearout",
+};
+
+static const char *fw_upload_progress(struct device *dev,
+ enum fw_upload_prog prog)
+{
+ const char *status = "unknown-status";
+
+ if (prog < FW_UPLOAD_PROG_MAX)
+ status = fw_upload_prog_str[prog];
+ else
+ dev_err(dev, "Invalid status during secure update: %d\n", prog);
+
+ return status;
+}
+
+static const char *fw_upload_error(struct device *dev,
+ enum fw_upload_err err_code)
+{
+ const char *error = "unknown-error";
+
+ if (err_code < FW_UPLOAD_ERR_MAX)
+ error = fw_upload_err_str[err_code];
+ else
+ dev_err(dev, "Invalid error code during secure update: %d\n",
+ err_code);
+
+ return error;
+}
+
+static ssize_t
+status_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct fw_upload_priv *fwlp = to_fw_sysfs(dev)->fw_upload_priv;
+
+ return sysfs_emit(buf, "%s\n", fw_upload_progress(dev, fwlp->progress));
+}
+DEVICE_ATTR_RO(status);
+
+static ssize_t
+error_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct fw_upload_priv *fwlp = to_fw_sysfs(dev)->fw_upload_priv;
+ int ret;
+
+ mutex_lock(&fwlp->lock);
+
+ if (fwlp->progress != FW_UPLOAD_PROG_IDLE)
+ ret = -EBUSY;
+ else if (!fwlp->err_code)
+ ret = 0;
+ else
+ ret = sysfs_emit(buf, "%s:%s\n",
+ fw_upload_progress(dev, fwlp->err_progress),
+ fw_upload_error(dev, fwlp->err_code));
+
+ mutex_unlock(&fwlp->lock);
+
+ return ret;
+}
+DEVICE_ATTR_RO(error);
+
+static ssize_t cancel_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fw_upload_priv *fwlp = to_fw_sysfs(dev)->fw_upload_priv;
+ int ret = count;
+ bool cancel;
+
+ if (kstrtobool(buf, &cancel) || !cancel)
+ return -EINVAL;
+
+ mutex_lock(&fwlp->lock);
+ if (fwlp->progress == FW_UPLOAD_PROG_IDLE)
+ ret = -ENODEV;
+
+ fwlp->ops->cancel(fwlp->fw_upload);
+ mutex_unlock(&fwlp->lock);
+
+ return ret;
+}
+DEVICE_ATTR_WO(cancel);
+
+static ssize_t remaining_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fw_upload_priv *fwlp = to_fw_sysfs(dev)->fw_upload_priv;
+
+ return sysfs_emit(buf, "%u\n", fwlp->remaining_size);
+}
+DEVICE_ATTR_RO(remaining_size);
+
+umode_t
+fw_upload_is_visible(struct kobject *kobj, struct attribute *attr, int n)
+{
+ static struct fw_sysfs *fw_sysfs;
+
+ fw_sysfs = to_fw_sysfs(kobj_to_dev(kobj));
+
+ if (fw_sysfs->fw_upload_priv || attr == &dev_attr_loading.attr)
+ return attr->mode;
+
+ return 0;
+}
+
+static void fw_upload_update_progress(struct fw_upload_priv *fwlp,
+ enum fw_upload_prog new_progress)
+{
+ mutex_lock(&fwlp->lock);
+ fwlp->progress = new_progress;
+ mutex_unlock(&fwlp->lock);
+}
+
+static void fw_upload_set_error(struct fw_upload_priv *fwlp,
+ enum fw_upload_err err_code)
+{
+ mutex_lock(&fwlp->lock);
+ fwlp->err_progress = fwlp->progress;
+ fwlp->err_code = err_code;
+ mutex_unlock(&fwlp->lock);
+}
+
+static void fw_upload_prog_complete(struct fw_upload_priv *fwlp)
+{
+ mutex_lock(&fwlp->lock);
+ fwlp->progress = FW_UPLOAD_PROG_IDLE;
+ mutex_unlock(&fwlp->lock);
+}
+
+static void fw_upload_main(struct work_struct *work)
+{
+ struct fw_upload_priv *fwlp;
+ struct fw_sysfs *fw_sysfs;
+ u32 written = 0, offset = 0;
+ enum fw_upload_err ret;
+ struct device *fw_dev;
+ struct fw_upload *fwl;
+
+ fwlp = container_of(work, struct fw_upload_priv, work);
+ fwl = fwlp->fw_upload;
+ fw_sysfs = (struct fw_sysfs *)fwl->priv;
+ fw_dev = &fw_sysfs->dev;
+
+ fw_upload_update_progress(fwlp, FW_UPLOAD_PROG_PREPARING);
+ ret = fwlp->ops->prepare(fwl, fwlp->data, fwlp->remaining_size);
+ if (ret != FW_UPLOAD_ERR_NONE) {
+ fw_upload_set_error(fwlp, ret);
+ goto putdev_exit;
+ }
+
+ fw_upload_update_progress(fwlp, FW_UPLOAD_PROG_TRANSFERRING);
+ while (fwlp->remaining_size) {
+ ret = fwlp->ops->write(fwl, fwlp->data, offset,
+ fwlp->remaining_size, &written);
+ if (ret != FW_UPLOAD_ERR_NONE || !written) {
+ if (ret == FW_UPLOAD_ERR_NONE) {
+ dev_warn(fw_dev, "write-op wrote zero data\n");
+ ret = FW_UPLOAD_ERR_RW_ERROR;
+ }
+ fw_upload_set_error(fwlp, ret);
+ goto done;
+ }
+
+ fwlp->remaining_size -= written;
+ offset += written;
+ }
+
+ fw_upload_update_progress(fwlp, FW_UPLOAD_PROG_PROGRAMMING);
+ ret = fwlp->ops->poll_complete(fwl);
+ if (ret != FW_UPLOAD_ERR_NONE)
+ fw_upload_set_error(fwlp, ret);
+
+done:
+ if (fwlp->ops->cleanup)
+ fwlp->ops->cleanup(fwl);
+
+putdev_exit:
+ put_device(fw_dev->parent);
+
+ /*
+ * Note: fwlp->remaining_size is left unmodified here to provide
+ * additional information on errors. It will be reinitialized when
+ * the next firmeware upload begins.
+ */
+ mutex_lock(&fw_lock);
+ fw_free_paged_buf(fw_sysfs->fw_priv);
+ fw_state_init(fw_sysfs->fw_priv);
+ mutex_unlock(&fw_lock);
+ fwlp->data = NULL;
+ fw_upload_prog_complete(fwlp);
+}
+
+/*
+ * Start a worker thread to upload data to the parent driver.
+ * Must be called with fw_lock held.
+ */
+int fw_upload_start(struct fw_sysfs *fw_sysfs)
+{
+ struct fw_priv *fw_priv = fw_sysfs->fw_priv;
+ struct device *fw_dev = &fw_sysfs->dev;
+ struct fw_upload_priv *fwlp;
+
+ if (!fw_sysfs->fw_upload_priv)
+ return 0;
+
+ if (!fw_priv->size) {
+ fw_free_paged_buf(fw_priv);
+ fw_state_init(fw_sysfs->fw_priv);
+ return 0;
+ }
+
+ fwlp = fw_sysfs->fw_upload_priv;
+ mutex_lock(&fwlp->lock);
+
+ /* Do not interfere with an on-going fw_upload */
+ if (fwlp->progress != FW_UPLOAD_PROG_IDLE) {
+ mutex_unlock(&fwlp->lock);
+ return -EBUSY;
+ }
+
+ get_device(fw_dev->parent); /* released in fw_upload_main */
+
+ fwlp->progress = FW_UPLOAD_PROG_RECEIVING;
+ fwlp->err_code = 0;
+ fwlp->remaining_size = fw_priv->size;
+ fwlp->data = fw_priv->data;
+
+ pr_debug("%s: fw-%s fw_priv=%p data=%p size=%u\n",
+ __func__, fw_priv->fw_name,
+ fw_priv, fw_priv->data,
+ (unsigned int)fw_priv->size);
+
+ queue_work(system_long_wq, &fwlp->work);
+ mutex_unlock(&fwlp->lock);
+
+ return 0;
+}
+
+void fw_upload_free(struct fw_sysfs *fw_sysfs)
+{
+ struct fw_upload_priv *fw_upload_priv = fw_sysfs->fw_upload_priv;
+
+ free_fw_priv(fw_sysfs->fw_priv);
+ kfree(fw_upload_priv->fw_upload);
+ kfree(fw_upload_priv);
+}
+
+/**
+ * firmware_upload_register() - register for the firmware upload sysfs API
+ * @module: kernel module of this device
+ * @parent: parent device instantiating firmware upload
+ * @name: firmware name to be associated with this device
+ * @ops: pointer to structure of firmware upload ops
+ * @dd_handle: pointer to parent driver private data
+ *
+ * @name must be unique among all users of firmware upload. The firmware
+ * sysfs files for this device will be found at /sys/class/firmware/@name.
+ *
+ * Return: struct fw_upload pointer or ERR_PTR()
+ *
+ **/
+struct fw_upload *
+firmware_upload_register(struct module *module, struct device *parent,
+ const char *name, const struct fw_upload_ops *ops,
+ void *dd_handle)
+{
+ u32 opt_flags = FW_OPT_NOCACHE;
+ struct fw_upload *fw_upload;
+ struct fw_upload_priv *fw_upload_priv;
+ struct fw_sysfs *fw_sysfs;
+ struct fw_priv *fw_priv;
+ struct device *fw_dev;
+ int ret;
+
+ if (!name || name[0] == '\0')
+ return ERR_PTR(-EINVAL);
+
+ if (!ops || !ops->cancel || !ops->prepare ||
+ !ops->write || !ops->poll_complete) {
+ dev_err(parent, "Attempt to register without all required ops\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!try_module_get(module))
+ return ERR_PTR(-EFAULT);
+
+ fw_upload = kzalloc(sizeof(*fw_upload), GFP_KERNEL);
+ if (!fw_upload) {
+ ret = -ENOMEM;
+ goto exit_module_put;
+ }
+
+ fw_upload_priv = kzalloc(sizeof(*fw_upload_priv), GFP_KERNEL);
+ if (!fw_upload_priv) {
+ ret = -ENOMEM;
+ goto free_fw_upload;
+ }
+
+ fw_upload_priv->fw_upload = fw_upload;
+ fw_upload_priv->ops = ops;
+ mutex_init(&fw_upload_priv->lock);
+ fw_upload_priv->module = module;
+ fw_upload_priv->name = name;
+ fw_upload_priv->err_code = 0;
+ fw_upload_priv->progress = FW_UPLOAD_PROG_IDLE;
+ INIT_WORK(&fw_upload_priv->work, fw_upload_main);
+ fw_upload->dd_handle = dd_handle;
+
+ fw_sysfs = fw_create_instance(NULL, name, parent, opt_flags);
+ if (IS_ERR(fw_sysfs)) {
+ ret = PTR_ERR(fw_sysfs);
+ goto free_fw_upload_priv;
+ }
+ fw_upload->priv = fw_sysfs;
+ fw_sysfs->fw_upload_priv = fw_upload_priv;
+ fw_dev = &fw_sysfs->dev;
+
+ ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, NULL, 0, 0,
+ FW_OPT_NOCACHE);
+ if (ret != 0) {
+ if (ret > 0)
+ ret = -EINVAL;
+ goto free_fw_sysfs;
+ }
+ fw_priv->is_paged_buf = true;
+ fw_sysfs->fw_priv = fw_priv;
+
+ ret = device_add(fw_dev);
+ if (ret) {
+ dev_err(fw_dev, "%s: device_register failed\n", __func__);
+ put_device(fw_dev);
+ goto exit_module_put;
+ }
+
+ return fw_upload;
+
+free_fw_sysfs:
+ kfree(fw_sysfs);
+
+free_fw_upload_priv:
+ kfree(fw_upload_priv);
+
+free_fw_upload:
+ kfree(fw_upload);
+
+exit_module_put:
+ module_put(module);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(firmware_upload_register);
+
+/**
+ * firmware_upload_unregister() - Unregister firmware upload interface
+ * @fw_upload: pointer to struct fw_upload
+ **/
+void firmware_upload_unregister(struct fw_upload *fw_upload)
+{
+ struct fw_sysfs *fw_sysfs = fw_upload->priv;
+ struct fw_upload_priv *fw_upload_priv = fw_sysfs->fw_upload_priv;
+ struct module *module = fw_upload_priv->module;
+
+ mutex_lock(&fw_upload_priv->lock);
+ if (fw_upload_priv->progress == FW_UPLOAD_PROG_IDLE) {
+ mutex_unlock(&fw_upload_priv->lock);
+ goto unregister;
+ }
+
+ fw_upload_priv->ops->cancel(fw_upload);
+ mutex_unlock(&fw_upload_priv->lock);
+
+ /* Ensure lower-level device-driver is finished */
+ flush_work(&fw_upload_priv->work);
+
+unregister:
+ device_unregister(&fw_sysfs->dev);
+ module_put(module);
+}
+EXPORT_SYMBOL_GPL(firmware_upload_unregister);
diff --git a/drivers/base/firmware_loader/sysfs_upload.h b/drivers/base/firmware_loader/sysfs_upload.h
new file mode 100644
index 000000000000..31931ff7808a
--- /dev/null
+++ b/drivers/base/firmware_loader/sysfs_upload.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __SYSFS_UPLOAD_H
+#define __SYSFS_UPLOAD_H
+
+#include <linux/device.h>
+
+#include "sysfs.h"
+
+/**
+ * enum fw_upload_prog - firmware upload progress codes
+ * @FW_UPLOAD_PROG_IDLE: there is no firmware upload in progress
+ * @FW_UPLOAD_PROG_RECEIVING: worker thread is receiving firmware data
+ * @FW_UPLOAD_PROG_PREPARING: target device is preparing for firmware upload
+ * @FW_UPLOAD_PROG_TRANSFERRING: data is being copied to the device
+ * @FW_UPLOAD_PROG_PROGRAMMING: device is performing the firmware update
+ * @FW_UPLOAD_PROG_MAX: Maximum progress code marker
+ */
+enum fw_upload_prog {
+ FW_UPLOAD_PROG_IDLE,
+ FW_UPLOAD_PROG_RECEIVING,
+ FW_UPLOAD_PROG_PREPARING,
+ FW_UPLOAD_PROG_TRANSFERRING,
+ FW_UPLOAD_PROG_PROGRAMMING,
+ FW_UPLOAD_PROG_MAX
+};
+
+struct fw_upload_priv {
+ struct fw_upload *fw_upload;
+ struct module *module;
+ const char *name;
+ const struct fw_upload_ops *ops;
+ struct mutex lock; /* protect data structure contents */
+ struct work_struct work;
+ const u8 *data; /* pointer to update data */
+ u32 remaining_size; /* size remaining to transfer */
+ enum fw_upload_prog progress;
+ enum fw_upload_prog err_progress; /* progress at time of failure */
+ enum fw_upload_err err_code; /* security manager error code */
+};
+
+#endif /* __SYSFS_UPLOAD_H */
diff --git a/drivers/base/init.c b/drivers/base/init.c
index a9f57c22fb9e..397eb9880cec 100644
--- a/drivers/base/init.c
+++ b/drivers/base/init.c
@@ -8,6 +8,7 @@
#include <linux/init.h>
#include <linux/memory.h>
#include <linux/of.h>
+#include <linux/backing-dev.h>
#include "base.h"
@@ -20,6 +21,7 @@
void __init driver_init(void)
{
/* These are the core pieces */
+ bdi_init(&noop_backing_dev_info);
devtmpfs_init();
devices_init();
buses_init();
@@ -35,5 +37,6 @@ void __init driver_init(void)
auxiliary_bus_init();
cpu_dev_init();
memory_dev_init();
+ node_dev_init();
container_dev_init();
}
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 365cd4a7f239..9aa0da991cfb 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -215,6 +215,7 @@ static int memory_block_online(struct memory_block *mem)
adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
nr_vmemmap_pages);
+ mem->zone = zone;
return ret;
}
@@ -225,6 +226,9 @@ static int memory_block_offline(struct memory_block *mem)
unsigned long nr_vmemmap_pages = mem->nr_vmemmap_pages;
int ret;
+ if (!mem->zone)
+ return -EINVAL;
+
/*
* Unaccount before offlining, such that unpopulated zone and kthreads
* can properly be torn down in offline_pages().
@@ -234,7 +238,7 @@ static int memory_block_offline(struct memory_block *mem)
-nr_vmemmap_pages);
ret = offline_pages(start_pfn + nr_vmemmap_pages,
- nr_pages - nr_vmemmap_pages, mem->group);
+ nr_pages - nr_vmemmap_pages, mem->zone, mem->group);
if (ret) {
/* offline_pages() failed. Account back. */
if (nr_vmemmap_pages)
@@ -246,6 +250,7 @@ static int memory_block_offline(struct memory_block *mem)
if (nr_vmemmap_pages)
mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
+ mem->zone = NULL;
return ret;
}
@@ -411,11 +416,10 @@ static ssize_t valid_zones_show(struct device *dev,
*/
if (mem->state == MEM_ONLINE) {
/*
- * The block contains more than one zone can not be offlined.
- * This can happen e.g. for ZONE_DMA and ZONE_DMA32
+ * If !mem->zone, the memory block spans multiple zones and
+ * cannot get offlined.
*/
- default_zone = test_pages_in_a_zone(start_pfn,
- start_pfn + nr_pages);
+ default_zone = mem->zone;
if (!default_zone)
return sysfs_emit(buf, "%s\n", "none");
len += sysfs_emit_at(buf, len, "%s", default_zone->name);
@@ -554,7 +558,9 @@ static ssize_t hard_offline_page_store(struct device *dev,
if (kstrtoull(buf, 0, &pfn) < 0)
return -EINVAL;
pfn >>= PAGE_SHIFT;
- ret = memory_failure(pfn, 0);
+ ret = memory_failure(pfn, MF_SW_SIMULATED);
+ if (ret == -EOPNOTSUPP)
+ ret = 0;
return ret ? ret : count;
}
@@ -613,11 +619,7 @@ static const struct attribute_group *memory_memblk_attr_groups[] = {
NULL,
};
-/*
- * register_memory - Setup a sysfs device for a memory block
- */
-static
-int register_memory(struct memory_block *memory)
+static int __add_memory_block(struct memory_block *memory)
{
int ret;
@@ -634,16 +636,91 @@ int register_memory(struct memory_block *memory)
}
ret = xa_err(xa_store(&memory_blocks, memory->dev.id, memory,
GFP_KERNEL));
- if (ret) {
- put_device(&memory->dev);
+ if (ret)
device_unregister(&memory->dev);
- }
+
return ret;
}
-static int init_memory_block(unsigned long block_id, unsigned long state,
- unsigned long nr_vmemmap_pages,
- struct memory_group *group)
+static struct zone *early_node_zone_for_memory_block(struct memory_block *mem,
+ int nid)
+{
+ const unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
+ const unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
+ struct zone *zone, *matching_zone = NULL;
+ pg_data_t *pgdat = NODE_DATA(nid);
+ int i;
+
+ /*
+ * This logic only works for early memory, when the applicable zones
+ * already span the memory block. We don't expect overlapping zones on
+ * a single node for early memory. So if we're told that some PFNs
+ * of a node fall into this memory block, we can assume that all node
+ * zones that intersect with the memory block are actually applicable.
+ * No need to look at the memmap.
+ */
+ for (i = 0; i < MAX_NR_ZONES; i++) {
+ zone = pgdat->node_zones + i;
+ if (!populated_zone(zone))
+ continue;
+ if (!zone_intersects(zone, start_pfn, nr_pages))
+ continue;
+ if (!matching_zone) {
+ matching_zone = zone;
+ continue;
+ }
+ /* Spans multiple zones ... */
+ matching_zone = NULL;
+ break;
+ }
+ return matching_zone;
+}
+
+#ifdef CONFIG_NUMA
+/**
+ * memory_block_add_nid() - Indicate that system RAM falling into this memory
+ * block device (partially) belongs to the given node.
+ * @mem: The memory block device.
+ * @nid: The node id.
+ * @context: The memory initialization context.
+ *
+ * Indicate that system RAM falling into this memory block (partially) belongs
+ * to the given node. If the context indicates ("early") that we are adding the
+ * node during node device subsystem initialization, this will also properly
+ * set/adjust mem->zone based on the zone ranges of the given node.
+ */
+void memory_block_add_nid(struct memory_block *mem, int nid,
+ enum meminit_context context)
+{
+ if (context == MEMINIT_EARLY && mem->nid != nid) {
+ /*
+ * For early memory we have to determine the zone when setting
+ * the node id and handle multiple nodes spanning a single
+ * memory block by indicate via zone == NULL that we're not
+ * dealing with a single zone. So if we're setting the node id
+ * the first time, determine if there is a single zone. If we're
+ * setting the node id a second time to a different node,
+ * invalidate the single detected zone.
+ */
+ if (mem->nid == NUMA_NO_NODE)
+ mem->zone = early_node_zone_for_memory_block(mem, nid);
+ else
+ mem->zone = NULL;
+ }
+
+ /*
+ * If this memory block spans multiple nodes, we only indicate
+ * the last processed node. If we span multiple nodes (not applicable
+ * to hotplugged memory), zone == NULL will prohibit memory offlining
+ * and consequently unplug.
+ */
+ mem->nid = nid;
+}
+#endif
+
+static int add_memory_block(unsigned long block_id, unsigned long state,
+ unsigned long nr_vmemmap_pages,
+ struct memory_group *group)
{
struct memory_block *mem;
int ret = 0;
@@ -663,17 +740,30 @@ static int init_memory_block(unsigned long block_id, unsigned long state,
mem->nr_vmemmap_pages = nr_vmemmap_pages;
INIT_LIST_HEAD(&mem->group_next);
+#ifndef CONFIG_NUMA
+ if (state == MEM_ONLINE)
+ /*
+ * MEM_ONLINE at this point implies early memory. With NUMA,
+ * we'll determine the zone when setting the node id via
+ * memory_block_add_nid(). Memory hotplug updated the zone
+ * manually when memory onlining/offlining succeeds.
+ */
+ mem->zone = early_node_zone_for_memory_block(mem, NUMA_NO_NODE);
+#endif /* CONFIG_NUMA */
+
+ ret = __add_memory_block(mem);
+ if (ret)
+ return ret;
+
if (group) {
mem->group = group;
list_add(&mem->group_next, &group->memory_blocks);
}
- ret = register_memory(mem);
-
- return ret;
+ return 0;
}
-static int add_memory_block(unsigned long base_section_nr)
+static int __init add_boot_memory_block(unsigned long base_section_nr)
{
int section_count = 0;
unsigned long nr;
@@ -685,11 +775,18 @@ static int add_memory_block(unsigned long base_section_nr)
if (section_count == 0)
return 0;
- return init_memory_block(memory_block_id(base_section_nr),
- MEM_ONLINE, 0, NULL);
+ return add_memory_block(memory_block_id(base_section_nr),
+ MEM_ONLINE, 0, NULL);
}
-static void unregister_memory(struct memory_block *memory)
+static int add_hotplug_memory_block(unsigned long block_id,
+ unsigned long nr_vmemmap_pages,
+ struct memory_group *group)
+{
+ return add_memory_block(block_id, MEM_OFFLINE, nr_vmemmap_pages, group);
+}
+
+static void remove_memory_block(struct memory_block *memory)
{
if (WARN_ON_ONCE(memory->dev.bus != &memory_subsys))
return;
@@ -728,8 +825,7 @@ int create_memory_block_devices(unsigned long start, unsigned long size,
return -EINVAL;
for (block_id = start_block_id; block_id != end_block_id; block_id++) {
- ret = init_memory_block(block_id, MEM_OFFLINE, vmemmap_pages,
- group);
+ ret = add_hotplug_memory_block(block_id, vmemmap_pages, group);
if (ret)
break;
}
@@ -740,7 +836,7 @@ int create_memory_block_devices(unsigned long start, unsigned long size,
mem = find_memory_block_by_id(block_id);
if (WARN_ON_ONCE(!mem))
continue;
- unregister_memory(mem);
+ remove_memory_block(mem);
}
}
return ret;
@@ -769,16 +865,10 @@ void remove_memory_block_devices(unsigned long start, unsigned long size)
if (WARN_ON_ONCE(!mem))
continue;
unregister_memory_block_under_nodes(mem);
- unregister_memory(mem);
+ remove_memory_block(mem);
}
}
-/* return true if the memory block is offlined, otherwise, return false */
-bool is_memblock_offlined(struct memory_block *mem)
-{
- return mem->state == MEM_OFFLINE;
-}
-
static struct attribute *memory_root_attrs[] = {
#ifdef CONFIG_ARCH_MEMORY_PROBE
&dev_attr_probe.attr,
@@ -829,7 +919,7 @@ void __init memory_dev_init(void)
*/
for (nr = 0; nr <= __highest_present_section_nr;
nr += sections_per_block) {
- ret = add_memory_block(nr);
+ ret = add_boot_memory_block(nr);
if (ret)
panic("%s() failed to add memory block: %d\n", __func__,
ret);
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 87acc47e8951..faf3597a96da 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -20,6 +20,7 @@
#include <linux/pm_runtime.h>
#include <linux/swap.h>
#include <linux/slab.h>
+#include <linux/hugetlb.h>
static struct bus_type node_subsys = {
.name = "node",
@@ -45,7 +46,7 @@ static inline ssize_t cpumap_read(struct file *file, struct kobject *kobj,
return n;
}
-static BIN_ATTR_RO(cpumap, 0);
+static BIN_ATTR_RO(cpumap, CPUMAP_FILE_MAX_BYTES);
static inline ssize_t cpulist_read(struct file *file, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
@@ -66,7 +67,7 @@ static inline ssize_t cpulist_read(struct file *file, struct kobject *kobj,
return n;
}
-static BIN_ATTR_RO(cpulist, 0);
+static BIN_ATTR_RO(cpulist, CPULIST_FILE_MAX_BYTES);
/**
* struct node_access_nodes - Access class device to hold user visible
@@ -433,6 +434,7 @@ static ssize_t node_read_meminfo(struct device *dev,
"Node %d ShadowCallStack:%8lu kB\n"
#endif
"Node %d PageTables: %8lu kB\n"
+ "Node %d SecPageTables: %8lu kB\n"
"Node %d NFS_Unstable: %8lu kB\n"
"Node %d Bounce: %8lu kB\n"
"Node %d WritebackTmp: %8lu kB\n"
@@ -459,6 +461,7 @@ static ssize_t node_read_meminfo(struct device *dev,
nid, node_page_state(pgdat, NR_KERNEL_SCS_KB),
#endif
nid, K(node_page_state(pgdat, NR_PAGETABLE)),
+ nid, K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)),
nid, 0UL,
nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
@@ -587,64 +590,9 @@ static const struct attribute_group *node_dev_groups[] = {
NULL
};
-#ifdef CONFIG_HUGETLBFS
-/*
- * hugetlbfs per node attributes registration interface:
- * When/if hugetlb[fs] subsystem initializes [sometime after this module],
- * it will register its per node attributes for all online nodes with
- * memory. It will also call register_hugetlbfs_with_node(), below, to
- * register its attribute registration functions with this node driver.
- * Once these hooks have been initialized, the node driver will call into
- * the hugetlb module to [un]register attributes for hot-plugged nodes.
- */
-static node_registration_func_t __hugetlb_register_node;
-static node_registration_func_t __hugetlb_unregister_node;
-
-static inline bool hugetlb_register_node(struct node *node)
-{
- if (__hugetlb_register_node &&
- node_state(node->dev.id, N_MEMORY)) {
- __hugetlb_register_node(node);
- return true;
- }
- return false;
-}
-
-static inline void hugetlb_unregister_node(struct node *node)
-{
- if (__hugetlb_unregister_node)
- __hugetlb_unregister_node(node);
-}
-
-void register_hugetlbfs_with_node(node_registration_func_t doregister,
- node_registration_func_t unregister)
-{
- __hugetlb_register_node = doregister;
- __hugetlb_unregister_node = unregister;
-}
-#else
-static inline void hugetlb_register_node(struct node *node) {}
-
-static inline void hugetlb_unregister_node(struct node *node) {}
-#endif
-
static void node_device_release(struct device *dev)
{
- struct node *node = to_node(dev);
-
-#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_HUGETLBFS)
- /*
- * We schedule the work only when a memory section is
- * onlined/offlined on this node. When we come here,
- * all the memory on this node has been offlined,
- * so we won't enqueue new work to this work.
- *
- * The work is using node->node_work, so we should
- * flush work before freeing the memory.
- */
- flush_work(&node->node_work);
-#endif
- kfree(node);
+ kfree(to_node(dev));
}
/*
@@ -663,13 +611,13 @@ static int register_node(struct node *node, int num)
node->dev.groups = node_dev_groups;
error = device_register(&node->dev);
- if (error)
+ if (error) {
put_device(&node->dev);
- else {
+ } else {
hugetlb_register_node(node);
-
compaction_register_node(node);
}
+
return error;
}
@@ -682,7 +630,8 @@ static int register_node(struct node *node, int num)
*/
void unregister_node(struct node *node)
{
- hugetlb_unregister_node(node); /* no-op, if memoryless node */
+ hugetlb_unregister_node(node);
+ compaction_unregister_node(node);
node_remove_accesses(node);
node_remove_caches(node);
device_unregister(&node->dev);
@@ -796,15 +745,12 @@ static int __ref get_nid_for_pfn(unsigned long pfn)
}
static void do_register_memory_block_under_node(int nid,
- struct memory_block *mem_blk)
+ struct memory_block *mem_blk,
+ enum meminit_context context)
{
int ret;
- /*
- * If this memory block spans multiple nodes, we only indicate
- * the last processed node.
- */
- mem_blk->nid = nid;
+ memory_block_add_nid(mem_blk, nid, context);
ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
&mem_blk->dev.kobj,
@@ -857,7 +803,7 @@ static int register_mem_block_under_node_early(struct memory_block *mem_blk,
if (page_nid != nid)
continue;
- do_register_memory_block_under_node(nid, mem_blk);
+ do_register_memory_block_under_node(nid, mem_blk, MEMINIT_EARLY);
return 0;
}
/* mem section does not span the specified node */
@@ -873,7 +819,7 @@ static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk,
{
int nid = *(int *)arg;
- do_register_memory_block_under_node(nid, mem_blk);
+ do_register_memory_block_under_node(nid, mem_blk, MEMINIT_HOTPLUG);
return 0;
}
@@ -892,8 +838,9 @@ void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
kobject_name(&node_devices[mem_blk->nid]->dev.kobj));
}
-void link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn,
- enum meminit_context context)
+void register_memory_blocks_under_node(int nid, unsigned long start_pfn,
+ unsigned long end_pfn,
+ enum meminit_context context)
{
walk_memory_blocks_func_t func;
@@ -906,74 +853,8 @@ void link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn,
(void *)&nid, func);
return;
}
-
-#ifdef CONFIG_HUGETLBFS
-/*
- * Handle per node hstate attribute [un]registration on transistions
- * to/from memoryless state.
- */
-static void node_hugetlb_work(struct work_struct *work)
-{
- struct node *node = container_of(work, struct node, node_work);
-
- /*
- * We only get here when a node transitions to/from memoryless state.
- * We can detect which transition occurred by examining whether the
- * node has memory now. hugetlb_register_node() already check this
- * so we try to register the attributes. If that fails, then the
- * node has transitioned to memoryless, try to unregister the
- * attributes.
- */
- if (!hugetlb_register_node(node))
- hugetlb_unregister_node(node);
-}
-
-static void init_node_hugetlb_work(int nid)
-{
- INIT_WORK(&node_devices[nid]->node_work, node_hugetlb_work);
-}
-
-static int node_memory_callback(struct notifier_block *self,
- unsigned long action, void *arg)
-{
- struct memory_notify *mnb = arg;
- int nid = mnb->status_change_nid;
-
- switch (action) {
- case MEM_ONLINE:
- case MEM_OFFLINE:
- /*
- * offload per node hstate [un]registration to a work thread
- * when transitioning to/from memoryless state.
- */
- if (nid != NUMA_NO_NODE)
- schedule_work(&node_devices[nid]->node_work);
- break;
-
- case MEM_GOING_ONLINE:
- case MEM_GOING_OFFLINE:
- case MEM_CANCEL_ONLINE:
- case MEM_CANCEL_OFFLINE:
- default:
- break;
- }
-
- return NOTIFY_OK;
-}
-#endif /* CONFIG_HUGETLBFS */
#endif /* CONFIG_MEMORY_HOTPLUG */
-#if !defined(CONFIG_MEMORY_HOTPLUG) || !defined(CONFIG_HUGETLBFS)
-static inline int node_memory_callback(struct notifier_block *self,
- unsigned long action, void *arg)
-{
- return NOTIFY_OK;
-}
-
-static void init_node_hugetlb_work(int nid) { }
-
-#endif
-
int __register_one_node(int nid)
{
int error;
@@ -992,8 +873,6 @@ int __register_one_node(int nid)
}
INIT_LIST_HEAD(&node_devices[nid]->access_list);
- /* initialize work queue for memory hot plug */
- init_node_hugetlb_work(nid);
node_init_caches(nid);
return error;
@@ -1064,27 +943,24 @@ static const struct attribute_group *cpu_root_attr_groups[] = {
NULL,
};
-#define NODE_CALLBACK_PRI 2 /* lower than SLAB */
-static int __init register_node_type(void)
+void __init node_dev_init(void)
{
- int ret;
+ int ret, i;
BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES);
BUILD_BUG_ON(ARRAY_SIZE(node_state_attrs)-1 != NR_NODE_STATES);
ret = subsys_system_register(&node_subsys, cpu_root_attr_groups);
- if (!ret) {
- static struct notifier_block node_memory_callback_nb = {
- .notifier_call = node_memory_callback,
- .priority = NODE_CALLBACK_PRI,
- };
- register_hotmemory_notifier(&node_memory_callback_nb);
- }
+ if (ret)
+ panic("%s() failed to register subsystem: %d\n", __func__, ret);
/*
- * Note: we're not going to unregister the node class if we fail
- * to register the node state class attribute files.
+ * Create all node devices, which will properly link the node
+ * to applicable memory block devices and already created cpu devices.
*/
- return ret;
+ for_each_online_node(i) {
+ ret = register_one_node(i);
+ if (ret)
+ panic("%s() failed to add node: %d\n", __func__, ret);
+ }
}
-postcore_initcall(register_node_type);
diff --git a/drivers/base/physical_location.c b/drivers/base/physical_location.c
new file mode 100644
index 000000000000..87af641cfe1a
--- /dev/null
+++ b/drivers/base/physical_location.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device physical location support
+ *
+ * Author: Won Chung <wonchung@google.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/sysfs.h>
+
+#include "physical_location.h"
+
+bool dev_add_physical_location(struct device *dev)
+{
+ struct acpi_pld_info *pld;
+ acpi_status status;
+
+ if (!has_acpi_companion(dev))
+ return false;
+
+ status = acpi_get_physical_device_location(ACPI_HANDLE(dev), &pld);
+ if (ACPI_FAILURE(status))
+ return false;
+
+ dev->physical_location =
+ kzalloc(sizeof(*dev->physical_location), GFP_KERNEL);
+ if (!dev->physical_location)
+ return false;
+ dev->physical_location->panel = pld->panel;
+ dev->physical_location->vertical_position = pld->vertical_position;
+ dev->physical_location->horizontal_position = pld->horizontal_position;
+ dev->physical_location->dock = pld->dock;
+ dev->physical_location->lid = pld->lid;
+
+ ACPI_FREE(pld);
+ return true;
+}
+
+static ssize_t panel_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ const char *panel;
+
+ switch (dev->physical_location->panel) {
+ case DEVICE_PANEL_TOP:
+ panel = "top";
+ break;
+ case DEVICE_PANEL_BOTTOM:
+ panel = "bottom";
+ break;
+ case DEVICE_PANEL_LEFT:
+ panel = "left";
+ break;
+ case DEVICE_PANEL_RIGHT:
+ panel = "right";
+ break;
+ case DEVICE_PANEL_FRONT:
+ panel = "front";
+ break;
+ case DEVICE_PANEL_BACK:
+ panel = "back";
+ break;
+ default:
+ panel = "unknown";
+ }
+ return sysfs_emit(buf, "%s\n", panel);
+}
+static DEVICE_ATTR_RO(panel);
+
+static ssize_t vertical_position_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const char *vertical_position;
+
+ switch (dev->physical_location->vertical_position) {
+ case DEVICE_VERT_POS_UPPER:
+ vertical_position = "upper";
+ break;
+ case DEVICE_VERT_POS_CENTER:
+ vertical_position = "center";
+ break;
+ case DEVICE_VERT_POS_LOWER:
+ vertical_position = "lower";
+ break;
+ default:
+ vertical_position = "unknown";
+ }
+ return sysfs_emit(buf, "%s\n", vertical_position);
+}
+static DEVICE_ATTR_RO(vertical_position);
+
+static ssize_t horizontal_position_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const char *horizontal_position;
+
+ switch (dev->physical_location->horizontal_position) {
+ case DEVICE_HORI_POS_LEFT:
+ horizontal_position = "left";
+ break;
+ case DEVICE_HORI_POS_CENTER:
+ horizontal_position = "center";
+ break;
+ case DEVICE_HORI_POS_RIGHT:
+ horizontal_position = "right";
+ break;
+ default:
+ horizontal_position = "unknown";
+ }
+ return sysfs_emit(buf, "%s\n", horizontal_position);
+}
+static DEVICE_ATTR_RO(horizontal_position);
+
+static ssize_t dock_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%s\n",
+ dev->physical_location->dock ? "yes" : "no");
+}
+static DEVICE_ATTR_RO(dock);
+
+static ssize_t lid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%s\n",
+ dev->physical_location->lid ? "yes" : "no");
+}
+static DEVICE_ATTR_RO(lid);
+
+static struct attribute *dev_attr_physical_location[] = {
+ &dev_attr_panel.attr,
+ &dev_attr_vertical_position.attr,
+ &dev_attr_horizontal_position.attr,
+ &dev_attr_dock.attr,
+ &dev_attr_lid.attr,
+ NULL,
+};
+
+const struct attribute_group dev_attr_physical_location_group = {
+ .name = "physical_location",
+ .attrs = dev_attr_physical_location,
+};
+
diff --git a/drivers/base/physical_location.h b/drivers/base/physical_location.h
new file mode 100644
index 000000000000..82cde9f1b161
--- /dev/null
+++ b/drivers/base/physical_location.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Device physical location support
+ *
+ * Author: Won Chung <wonchung@google.com>
+ */
+
+#include <linux/device.h>
+
+#ifdef CONFIG_ACPI
+extern bool dev_add_physical_location(struct device *dev);
+extern const struct attribute_group dev_attr_physical_location_group;
+#else
+static inline bool dev_add_physical_location(struct device *dev) { return false; };
+static const struct attribute_group dev_attr_physical_location_group = {};
+#endif
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index 3d6c8f9caf43..12b044151298 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -23,7 +23,6 @@
struct platform_msi_priv_data {
struct device *dev;
void *host_data;
- const struct attribute_group **msi_irq_groups;
msi_alloc_info_t arg;
irq_write_msi_msg_t write_msg;
int devid;
@@ -39,11 +38,9 @@ static DEFINE_IDA(platform_msi_devid_ida);
*/
static irq_hw_number_t platform_msi_calc_hwirq(struct msi_desc *desc)
{
- u32 devid;
+ u32 devid = desc->dev->msi.data->platform_data->devid;
- devid = desc->platform.msi_priv_data->devid;
-
- return (devid << (32 - DEV_ID_SHIFT)) | desc->platform.msi_index;
+ return (devid << (32 - DEV_ID_SHIFT)) | desc->msi_index;
}
static void platform_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
@@ -86,11 +83,8 @@ static void platform_msi_update_dom_ops(struct msi_domain_info *info)
static void platform_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
{
struct msi_desc *desc = irq_data_get_msi_desc(data);
- struct platform_msi_priv_data *priv_data;
-
- priv_data = desc->platform.msi_priv_data;
- priv_data->write_msg(desc, msg);
+ desc->dev->msi.data->platform_data->write_msg(desc, msg);
}
static void platform_msi_update_chip_ops(struct msi_domain_info *info)
@@ -113,62 +107,6 @@ static void platform_msi_update_chip_ops(struct msi_domain_info *info)
info->flags &= ~MSI_FLAG_LEVEL_CAPABLE;
}
-static void platform_msi_free_descs(struct device *dev, int base, int nvec)
-{
- struct msi_desc *desc, *tmp;
-
- list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) {
- if (desc->platform.msi_index >= base &&
- desc->platform.msi_index < (base + nvec)) {
- list_del(&desc->list);
- free_msi_entry(desc);
- }
- }
-}
-
-static int platform_msi_alloc_descs_with_irq(struct device *dev, int virq,
- int nvec,
- struct platform_msi_priv_data *data)
-
-{
- struct msi_desc *desc;
- int i, base = 0;
-
- if (!list_empty(dev_to_msi_list(dev))) {
- desc = list_last_entry(dev_to_msi_list(dev),
- struct msi_desc, list);
- base = desc->platform.msi_index + 1;
- }
-
- for (i = 0; i < nvec; i++) {
- desc = alloc_msi_entry(dev, 1, NULL);
- if (!desc)
- break;
-
- desc->platform.msi_priv_data = data;
- desc->platform.msi_index = base + i;
- desc->irq = virq ? virq + i : 0;
-
- list_add_tail(&desc->list, dev_to_msi_list(dev));
- }
-
- if (i != nvec) {
- /* Clean up the mess */
- platform_msi_free_descs(dev, base, nvec);
-
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static int platform_msi_alloc_descs(struct device *dev, int nvec,
- struct platform_msi_priv_data *data)
-
-{
- return platform_msi_alloc_descs_with_irq(dev, 0, nvec, data);
-}
-
/**
* platform_msi_create_irq_domain - Create a platform MSI interrupt domain
* @fwnode: Optional fwnode of the interrupt controller
@@ -191,6 +129,8 @@ struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
platform_msi_update_dom_ops(info);
if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
platform_msi_update_chip_ops(info);
+ info->flags |= MSI_FLAG_DEV_SYSFS | MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS |
+ MSI_FLAG_FREE_MSI_DESCS;
domain = msi_create_irq_domain(fwnode, info, parent);
if (domain)
@@ -198,50 +138,59 @@ struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
return domain;
}
+EXPORT_SYMBOL_GPL(platform_msi_create_irq_domain);
-static struct platform_msi_priv_data *
-platform_msi_alloc_priv_data(struct device *dev, unsigned int nvec,
- irq_write_msi_msg_t write_msi_msg)
+static int platform_msi_alloc_priv_data(struct device *dev, unsigned int nvec,
+ irq_write_msi_msg_t write_msi_msg)
{
struct platform_msi_priv_data *datap;
+ int err;
+
/*
* Limit the number of interrupts to 2048 per device. Should we
* need to bump this up, DEV_ID_SHIFT should be adjusted
* accordingly (which would impact the max number of MSI
* capable devices).
*/
- if (!dev->msi_domain || !write_msi_msg || !nvec || nvec > MAX_DEV_MSIS)
- return ERR_PTR(-EINVAL);
+ if (!dev->msi.domain || !write_msi_msg || !nvec || nvec > MAX_DEV_MSIS)
+ return -EINVAL;
- if (dev->msi_domain->bus_token != DOMAIN_BUS_PLATFORM_MSI) {
+ if (dev->msi.domain->bus_token != DOMAIN_BUS_PLATFORM_MSI) {
dev_err(dev, "Incompatible msi_domain, giving up\n");
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
- /* Already had a helping of MSI? Greed... */
- if (!list_empty(dev_to_msi_list(dev)))
- return ERR_PTR(-EBUSY);
+ err = msi_setup_device_data(dev);
+ if (err)
+ return err;
+
+ /* Already initialized? */
+ if (dev->msi.data->platform_data)
+ return -EBUSY;
datap = kzalloc(sizeof(*datap), GFP_KERNEL);
if (!datap)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
datap->devid = ida_simple_get(&platform_msi_devid_ida,
0, 1 << DEV_ID_SHIFT, GFP_KERNEL);
if (datap->devid < 0) {
- int err = datap->devid;
+ err = datap->devid;
kfree(datap);
- return ERR_PTR(err);
+ return err;
}
datap->write_msg = write_msi_msg;
datap->dev = dev;
-
- return datap;
+ dev->msi.data->platform_data = datap;
+ return 0;
}
-static void platform_msi_free_priv_data(struct platform_msi_priv_data *data)
+static void platform_msi_free_priv_data(struct device *dev)
{
+ struct platform_msi_priv_data *data = dev->msi.data->platform_data;
+
+ dev->msi.data->platform_data = NULL;
ida_simple_remove(&platform_msi_devid_ida, data->devid);
kfree(data);
}
@@ -258,35 +207,15 @@ static void platform_msi_free_priv_data(struct platform_msi_priv_data *data)
int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
irq_write_msi_msg_t write_msi_msg)
{
- struct platform_msi_priv_data *priv_data;
int err;
- priv_data = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg);
- if (IS_ERR(priv_data))
- return PTR_ERR(priv_data);
-
- err = platform_msi_alloc_descs(dev, nvec, priv_data);
+ err = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg);
if (err)
- goto out_free_priv_data;
+ return err;
- err = msi_domain_alloc_irqs(dev->msi_domain, dev, nvec);
+ err = msi_domain_alloc_irqs(dev->msi.domain, dev, nvec);
if (err)
- goto out_free_desc;
-
- priv_data->msi_irq_groups = msi_populate_sysfs(dev);
- if (IS_ERR(priv_data->msi_irq_groups)) {
- err = PTR_ERR(priv_data->msi_irq_groups);
- goto out_free_irqs;
- }
-
- return 0;
-
-out_free_irqs:
- msi_domain_free_irqs(dev->msi_domain, dev);
-out_free_desc:
- platform_msi_free_descs(dev, 0, nvec);
-out_free_priv_data:
- platform_msi_free_priv_data(priv_data);
+ platform_msi_free_priv_data(dev);
return err;
}
@@ -298,16 +227,8 @@ EXPORT_SYMBOL_GPL(platform_msi_domain_alloc_irqs);
*/
void platform_msi_domain_free_irqs(struct device *dev)
{
- if (!list_empty(dev_to_msi_list(dev))) {
- struct msi_desc *desc;
-
- desc = first_msi_entry(dev);
- msi_destroy_sysfs(dev, desc->platform.msi_priv_data->msi_irq_groups);
- platform_msi_free_priv_data(desc->platform.msi_priv_data);
- }
-
- msi_domain_free_irqs(dev->msi_domain, dev);
- platform_msi_free_descs(dev, 0, MAX_DEV_MSIS);
+ msi_domain_free_irqs(dev->msi.domain, dev);
+ platform_msi_free_priv_data(dev);
}
EXPORT_SYMBOL_GPL(platform_msi_domain_free_irqs);
@@ -316,17 +237,20 @@ EXPORT_SYMBOL_GPL(platform_msi_domain_free_irqs);
* a platform-msi domain
* @domain: The platform-msi domain
*
- * Returns the private data provided when calling
- * platform_msi_create_device_domain.
+ * Return: The private data provided when calling
+ * platform_msi_create_device_domain().
*/
void *platform_msi_get_host_data(struct irq_domain *domain)
{
struct platform_msi_priv_data *data = domain->host_data;
+
return data->host_data;
}
+static struct lock_class_key platform_device_msi_lock_class;
+
/**
- * __platform_msi_create_device_domain - Create a platform-msi domain
+ * __platform_msi_create_device_domain - Create a platform-msi device domain
*
* @dev: The device generating the MSIs
* @nvec: The number of MSIs that need to be allocated
@@ -335,7 +259,11 @@ void *platform_msi_get_host_data(struct irq_domain *domain)
* @ops: The hierarchy domain operations to use
* @host_data: Private data associated to this domain
*
- * Returns an irqdomain for @nvec interrupts
+ * Return: An irqdomain for @nvec interrupts on success, NULL in case of error.
+ *
+ * This is for interrupt domains which stack on a platform-msi domain
+ * created by platform_msi_create_irq_domain(). @dev->msi.domain points to
+ * that platform-msi domain which is the parent for the new domain.
*/
struct irq_domain *
__platform_msi_create_device_domain(struct device *dev,
@@ -349,12 +277,20 @@ __platform_msi_create_device_domain(struct device *dev,
struct irq_domain *domain;
int err;
- data = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg);
- if (IS_ERR(data))
+ err = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg);
+ if (err)
return NULL;
+ /*
+ * Use a separate lock class for the MSI descriptor mutex on
+ * platform MSI device domains because the descriptor mutex nests
+ * into the domain mutex. See alloc/free below.
+ */
+ lockdep_set_class(&dev->msi.data->mutex, &platform_device_msi_lock_class);
+
+ data = dev->msi.data->platform_data;
data->host_data = host_data;
- domain = irq_domain_create_hierarchy(dev->msi_domain, 0,
+ domain = irq_domain_create_hierarchy(dev->msi.domain, 0,
is_tree ? 0 : nvec,
dev->fwnode, ops, data);
if (!domain)
@@ -370,61 +306,46 @@ __platform_msi_create_device_domain(struct device *dev,
free_domain:
irq_domain_remove(domain);
free_priv:
- platform_msi_free_priv_data(data);
+ platform_msi_free_priv_data(dev);
return NULL;
}
/**
- * platform_msi_domain_free - Free interrupts associated with a platform-msi
- * domain
+ * platform_msi_device_domain_free - Free interrupts associated with a platform-msi
+ * device domain
*
- * @domain: The platform-msi domain
+ * @domain: The platform-msi device domain
* @virq: The base irq from which to perform the free operation
- * @nvec: How many interrupts to free from @virq
+ * @nr_irqs: How many interrupts to free from @virq
*/
-void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq,
- unsigned int nvec)
+void platform_msi_device_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
{
struct platform_msi_priv_data *data = domain->host_data;
- struct msi_desc *desc, *tmp;
- for_each_msi_entry_safe(desc, tmp, data->dev) {
- if (WARN_ON(!desc->irq || desc->nvec_used != 1))
- return;
- if (!(desc->irq >= virq && desc->irq < (virq + nvec)))
- continue;
-
- irq_domain_free_irqs_common(domain, desc->irq, 1);
- list_del(&desc->list);
- free_msi_entry(desc);
- }
+
+ msi_lock_descs(data->dev);
+ irq_domain_free_irqs_common(domain, virq, nr_irqs);
+ msi_free_msi_descs_range(data->dev, MSI_DESC_ALL, virq, virq + nr_irqs - 1);
+ msi_unlock_descs(data->dev);
}
/**
- * platform_msi_domain_alloc - Allocate interrupts associated with
- * a platform-msi domain
+ * platform_msi_device_domain_alloc - Allocate interrupts associated with
+ * a platform-msi device domain
*
- * @domain: The platform-msi domain
+ * @domain: The platform-msi device domain
* @virq: The base irq from which to perform the allocate operation
- * @nr_irqs: How many interrupts to free from @virq
+ * @nr_irqs: How many interrupts to allocate from @virq
*
* Return 0 on success, or an error code on failure. Must be called
* with irq_domain_mutex held (which can only be done as part of a
* top-level interrupt allocation).
*/
-int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs)
+int platform_msi_device_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
{
struct platform_msi_priv_data *data = domain->host_data;
- int err;
-
- err = platform_msi_alloc_descs_with_irq(data->dev, virq, nr_irqs, data);
- if (err)
- return err;
-
- err = msi_domain_populate_irqs(domain->parent, data->dev,
- virq, nr_irqs, &data->arg);
- if (err)
- platform_msi_domain_free(domain, virq, nr_irqs);
+ struct device *dev = data->dev;
- return err;
+ return msi_domain_populate_irqs(domain->parent, dev, virq, nr_irqs, &data->arg);
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 598acf93a360..51bb2289865c 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -30,6 +30,8 @@
#include <linux/property.h>
#include <linux/kmemleak.h>
#include <linux/types.h>
+#include <linux/iommu.h>
+#include <linux/dma-map-ops.h>
#include "base.h"
#include "power/power.h"
@@ -231,7 +233,8 @@ int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
out_not_found:
ret = -ENXIO;
out:
- WARN(ret == 0, "0 is an invalid IRQ number\n");
+ if (WARN(!ret, "0 is an invalid IRQ number\n"))
+ return -EINVAL;
return ret;
}
EXPORT_SYMBOL_GPL(platform_get_irq_optional);
@@ -258,8 +261,9 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
int ret;
ret = platform_get_irq_optional(dev, num);
- if (ret < 0 && ret != -EPROBE_DEFER)
- dev_err(&dev->dev, "IRQ index %u not found\n", num);
+ if (ret < 0)
+ return dev_err_probe(&dev->dev, ret,
+ "IRQ index %u not found\n", num);
return ret;
}
@@ -445,7 +449,8 @@ static int __platform_get_irq_byname(struct platform_device *dev,
r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
if (r) {
- WARN(r->start == 0, "0 is an invalid IRQ number\n");
+ if (WARN(!r->start, "0 is an invalid IRQ number\n"))
+ return -EINVAL;
return r->start;
}
@@ -466,9 +471,9 @@ int platform_get_irq_byname(struct platform_device *dev, const char *name)
int ret;
ret = __platform_get_irq_byname(dev, name);
- if (ret < 0 && ret != -EPROBE_DEFER)
- dev_err(&dev->dev, "IRQ %s not found\n", name);
-
+ if (ret < 0)
+ return dev_err_probe(&dev->dev, ret, "IRQ %s not found\n",
+ name);
return ret;
}
EXPORT_SYMBOL_GPL(platform_get_irq_byname);
@@ -762,6 +767,10 @@ EXPORT_SYMBOL_GPL(platform_device_del);
/**
* platform_device_register - add a platform-level device
* @pdev: platform device we're adding
+ *
+ * NOTE: _Never_ directly free @pdev after calling this function, even if it
+ * returned an error! Always use platform_device_put() to give up the
+ * reference initialised in this function instead.
*/
int platform_device_register(struct platform_device *pdev)
{
@@ -1270,31 +1279,11 @@ static ssize_t driver_override_store(struct device *dev,
const char *buf, size_t count)
{
struct platform_device *pdev = to_platform_device(dev);
- char *driver_override, *old, *cp;
-
- /* We need to keep extra room for a newline */
- if (count >= (PAGE_SIZE - 1))
- return -EINVAL;
-
- driver_override = kstrndup(buf, count, GFP_KERNEL);
- if (!driver_override)
- return -ENOMEM;
-
- cp = strchr(driver_override, '\n');
- if (cp)
- *cp = '\0';
-
- device_lock(dev);
- old = pdev->driver_override;
- if (strlen(driver_override)) {
- pdev->driver_override = driver_override;
- } else {
- kfree(driver_override);
- pdev->driver_override = NULL;
- }
- device_unlock(dev);
+ int ret;
- kfree(old);
+ ret = driver_set_override(dev, &pdev->driver_override, buf, count);
+ if (ret)
+ return ret;
return count;
}
@@ -1449,9 +1438,9 @@ static void platform_shutdown(struct device *_dev)
drv->shutdown(dev);
}
-
-int platform_dma_configure(struct device *dev)
+static int platform_dma_configure(struct device *dev)
{
+ struct platform_driver *drv = to_platform_driver(dev->driver);
enum dev_dma_attr attr;
int ret = 0;
@@ -1462,9 +1451,23 @@ int platform_dma_configure(struct device *dev)
ret = acpi_dma_configure(dev, attr);
}
+ if (!ret && !drv->driver_managed_dma) {
+ ret = iommu_device_use_default_domain(dev);
+ if (ret)
+ arch_teardown_dma_ops(dev);
+ }
+
return ret;
}
+static void platform_dma_cleanup(struct device *dev)
+{
+ struct platform_driver *drv = to_platform_driver(dev->driver);
+
+ if (!drv->driver_managed_dma)
+ iommu_device_unuse_default_domain(dev);
+}
+
static const struct dev_pm_ops platform_dev_pm_ops = {
SET_RUNTIME_PM_OPS(pm_generic_runtime_suspend, pm_generic_runtime_resume, NULL)
USE_PLATFORM_PM_SLEEP_OPS
@@ -1479,6 +1482,7 @@ struct bus_type platform_bus_type = {
.remove = platform_remove,
.shutdown = platform_shutdown,
.dma_configure = platform_dma_configure,
+ .dma_cleanup = platform_dma_cleanup,
.pm = &platform_dev_pm_ops,
};
EXPORT_SYMBOL_GPL(platform_bus_type);
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index bbddb267c2e6..72115917e0bd 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -172,10 +172,10 @@ EXPORT_SYMBOL_GPL(dev_pm_domain_attach_by_name);
* @dev: Device to detach.
* @power_off: Used to indicate whether we should power off the device.
*
- * This functions will reverse the actions from dev_pm_domain_attach() and
- * dev_pm_domain_attach_by_id(), thus it detaches @dev from its PM domain.
- * Typically it should be invoked during the remove phase, either from
- * subsystem level code or from drivers.
+ * This functions will reverse the actions from dev_pm_domain_attach(),
+ * dev_pm_domain_attach_by_id() and dev_pm_domain_attach_by_name(), thus it
+ * detaches @dev from its PM domain. Typically it should be invoked during the
+ * remove phase, either from subsystem level code or from drivers.
*
* Callers must ensure proper synchronization of this function with power
* management callbacks.
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 5db704f02e71..6471b559230e 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -131,7 +131,7 @@ static const struct genpd_lock_ops genpd_spin_ops = {
#define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN)
#define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
-static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
+static inline bool irq_safe_dev_in_sleep_domain(struct device *dev,
const struct generic_pm_domain *genpd)
{
bool ret;
@@ -139,11 +139,14 @@ static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
/*
- * Warn once if an IRQ safe device is attached to a no sleep domain, as
- * to indicate a suboptimal configuration for PM. For an always on
- * domain this isn't case, thus don't warn.
+ * Warn once if an IRQ safe device is attached to a domain, which
+ * callbacks are allowed to sleep. This indicates a suboptimal
+ * configuration for PM, but it doesn't matter for an always on domain.
*/
- if (ret && !genpd_is_always_on(genpd))
+ if (genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd))
+ return ret;
+
+ if (ret)
dev_warn_once(dev, "PM domain %s will not be powered off\n",
genpd->name);
@@ -219,30 +222,32 @@ static void genpd_debug_remove(struct generic_pm_domain *genpd)
{
struct dentry *d;
+ if (!genpd_debugfs_dir)
+ return;
+
d = debugfs_lookup(genpd->name, genpd_debugfs_dir);
debugfs_remove(d);
}
static void genpd_update_accounting(struct generic_pm_domain *genpd)
{
- ktime_t delta, now;
+ u64 delta, now;
+
+ now = ktime_get_mono_fast_ns();
+ if (now <= genpd->accounting_time)
+ return;
- now = ktime_get();
- delta = ktime_sub(now, genpd->accounting_time);
+ delta = now - genpd->accounting_time;
/*
* If genpd->status is active, it means we are just
* out of off and so update the idle time and vice
* versa.
*/
- if (genpd->status == GENPD_STATE_ON) {
- int state_idx = genpd->state_idx;
-
- genpd->states[state_idx].idle_time =
- ktime_add(genpd->states[state_idx].idle_time, delta);
- } else {
- genpd->on_time = ktime_add(genpd->on_time, delta);
- }
+ if (genpd->status == GENPD_STATE_ON)
+ genpd->states[genpd->state_idx].idle_time += delta;
+ else
+ genpd->on_time += delta;
genpd->accounting_time = now;
}
@@ -476,15 +481,16 @@ EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
*/
void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
{
- struct generic_pm_domain_data *gpd_data;
struct generic_pm_domain *genpd;
+ struct gpd_timing_data *td;
genpd = dev_to_genpd_safe(dev);
if (!genpd)
return;
- gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
- gpd_data->next_wakeup = next;
+ td = to_gpd_data(dev->power.subsys_data->domain_data)->td;
+ if (td)
+ td->next_wakeup = next;
}
EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup);
@@ -506,6 +512,7 @@ static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
if (!genpd->power_on)
goto out;
+ timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
if (!timed) {
ret = genpd->power_on(genpd);
if (ret)
@@ -524,7 +531,7 @@ static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
goto out;
genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
- genpd->max_off_time_changed = true;
+ genpd->gd->max_off_time_changed = true;
pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
genpd->name, "on", elapsed_ns);
@@ -555,6 +562,7 @@ static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
if (!genpd->power_off)
goto out;
+ timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
if (!timed) {
ret = genpd->power_off(genpd);
if (ret)
@@ -573,7 +581,7 @@ static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
goto out;
genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
- genpd->max_off_time_changed = true;
+ genpd->gd->max_off_time_changed = true;
pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
genpd->name, "off", elapsed_ns);
@@ -636,19 +644,25 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
atomic_read(&genpd->sd_count) > 0)
return -EBUSY;
- list_for_each_entry(pdd, &genpd->dev_list, list_node) {
- enum pm_qos_flags_status stat;
-
- stat = dev_pm_qos_flags(pdd->dev, PM_QOS_FLAG_NO_POWER_OFF);
- if (stat > PM_QOS_FLAGS_NONE)
+ /*
+ * The children must be in their deepest (powered-off) states to allow
+ * the parent to be powered off. Note that, there's no need for
+ * additional locking, as powering on a child, requires the parent's
+ * lock to be acquired first.
+ */
+ list_for_each_entry(link, &genpd->parent_links, parent_node) {
+ struct generic_pm_domain *child = link->child;
+ if (child->state_idx < child->state_count - 1)
return -EBUSY;
+ }
+ list_for_each_entry(pdd, &genpd->dev_list, list_node) {
/*
* Do not allow PM domain to be powered off, when an IRQ safe
* device is part of a non-IRQ safe domain.
*/
if (!pm_runtime_suspended(pdd->dev) ||
- irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
+ irq_safe_dev_in_sleep_domain(pdd->dev, genpd))
not_suspended++;
}
@@ -763,25 +777,27 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
dev = gpd_data->base.dev;
for (;;) {
- struct generic_pm_domain *genpd;
+ struct generic_pm_domain *genpd = ERR_PTR(-ENODATA);
struct pm_domain_data *pdd;
+ struct gpd_timing_data *td;
spin_lock_irq(&dev->power.lock);
pdd = dev->power.subsys_data ?
dev->power.subsys_data->domain_data : NULL;
if (pdd) {
- to_gpd_data(pdd)->td.constraint_changed = true;
- genpd = dev_to_genpd(dev);
- } else {
- genpd = ERR_PTR(-ENODATA);
+ td = to_gpd_data(pdd)->td;
+ if (td) {
+ td->constraint_changed = true;
+ genpd = dev_to_genpd(dev);
+ }
}
spin_unlock_irq(&dev->power.lock);
if (!IS_ERR(genpd)) {
genpd_lock(genpd);
- genpd->max_off_time_changed = true;
+ genpd->gd->max_off_time_changed = true;
genpd_unlock(genpd);
}
@@ -867,9 +883,9 @@ static int genpd_runtime_suspend(struct device *dev)
struct generic_pm_domain *genpd;
bool (*suspend_ok)(struct device *__dev);
struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
- struct gpd_timing_data *td = &gpd_data->td;
+ struct gpd_timing_data *td = gpd_data->td;
bool runtime_pm = pm_runtime_enabled(dev);
- ktime_t time_start;
+ ktime_t time_start = 0;
s64 elapsed_ns;
int ret;
@@ -890,8 +906,7 @@ static int genpd_runtime_suspend(struct device *dev)
return -EBUSY;
/* Measure suspend latency. */
- time_start = 0;
- if (runtime_pm)
+ if (td && runtime_pm)
time_start = ktime_get();
ret = __genpd_runtime_suspend(dev);
@@ -905,13 +920,13 @@ static int genpd_runtime_suspend(struct device *dev)
}
/* Update suspend latency value if the measured time exceeds it. */
- if (runtime_pm) {
+ if (td && runtime_pm) {
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
if (elapsed_ns > td->suspend_latency_ns) {
td->suspend_latency_ns = elapsed_ns;
dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
elapsed_ns);
- genpd->max_off_time_changed = true;
+ genpd->gd->max_off_time_changed = true;
td->constraint_changed = true;
}
}
@@ -920,7 +935,7 @@ static int genpd_runtime_suspend(struct device *dev)
* If power.irq_safe is set, this routine may be run with
* IRQs disabled, so suspend only if the PM domain also is irq_safe.
*/
- if (irq_safe_dev_in_no_sleep_domain(dev, genpd))
+ if (irq_safe_dev_in_sleep_domain(dev, genpd))
return 0;
genpd_lock(genpd);
@@ -943,12 +958,11 @@ static int genpd_runtime_resume(struct device *dev)
{
struct generic_pm_domain *genpd;
struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
- struct gpd_timing_data *td = &gpd_data->td;
- bool runtime_pm = pm_runtime_enabled(dev);
- ktime_t time_start;
+ struct gpd_timing_data *td = gpd_data->td;
+ bool timed = td && pm_runtime_enabled(dev);
+ ktime_t time_start = 0;
s64 elapsed_ns;
int ret;
- bool timed = true;
dev_dbg(dev, "%s()\n", __func__);
@@ -960,10 +974,8 @@ static int genpd_runtime_resume(struct device *dev)
* As we don't power off a non IRQ safe domain, which holds
* an IRQ safe device, we don't need to restore power to it.
*/
- if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) {
- timed = false;
+ if (irq_safe_dev_in_sleep_domain(dev, genpd))
goto out;
- }
genpd_lock(genpd);
ret = genpd_power_on(genpd, 0);
@@ -976,8 +988,7 @@ static int genpd_runtime_resume(struct device *dev)
out:
/* Measure resume latency. */
- time_start = 0;
- if (timed && runtime_pm)
+ if (timed)
time_start = ktime_get();
ret = genpd_start_dev(genpd, dev);
@@ -989,13 +1000,13 @@ static int genpd_runtime_resume(struct device *dev)
goto err_stop;
/* Update resume latency value if the measured time exceeds it. */
- if (timed && runtime_pm) {
+ if (timed) {
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
if (elapsed_ns > td->resume_latency_ns) {
td->resume_latency_ns = elapsed_ns;
dev_dbg(dev, "resume latency exceeded, %lld ns\n",
elapsed_ns);
- genpd->max_off_time_changed = true;
+ genpd->gd->max_off_time_changed = true;
td->constraint_changed = true;
}
}
@@ -1073,6 +1084,13 @@ static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
|| atomic_read(&genpd->sd_count) > 0)
return;
+ /* Check that the children are in their deepest (powered-off) state. */
+ list_for_each_entry(link, &genpd->parent_links, parent_node) {
+ struct generic_pm_domain *child = link->child;
+ if (child->state_idx < child->state_count - 1)
+ return;
+ }
+
/* Choose the deepest state when suspending */
genpd->state_idx = genpd->state_count - 1;
if (_genpd_power_off(genpd, false))
@@ -1481,9 +1499,11 @@ EXPORT_SYMBOL_GPL(dev_pm_genpd_resume);
#endif /* CONFIG_PM_SLEEP */
-static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev)
+static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
+ bool has_governor)
{
struct generic_pm_domain_data *gpd_data;
+ struct gpd_timing_data *td;
int ret;
ret = dev_pm_get_subsys_data(dev);
@@ -1497,26 +1517,38 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev)
}
gpd_data->base.dev = dev;
- gpd_data->td.constraint_changed = true;
- gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
- gpd_data->next_wakeup = KTIME_MAX;
- spin_lock_irq(&dev->power.lock);
+ /* Allocate data used by a governor. */
+ if (has_governor) {
+ td = kzalloc(sizeof(*td), GFP_KERNEL);
+ if (!td) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
- if (dev->power.subsys_data->domain_data) {
- ret = -EINVAL;
- goto err_free;
+ td->constraint_changed = true;
+ td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
+ td->next_wakeup = KTIME_MAX;
+ gpd_data->td = td;
}
- dev->power.subsys_data->domain_data = &gpd_data->base;
+ spin_lock_irq(&dev->power.lock);
+
+ if (dev->power.subsys_data->domain_data)
+ ret = -EINVAL;
+ else
+ dev->power.subsys_data->domain_data = &gpd_data->base;
spin_unlock_irq(&dev->power.lock);
+ if (ret)
+ goto err_free;
+
return gpd_data;
err_free:
- spin_unlock_irq(&dev->power.lock);
+ kfree(gpd_data->td);
kfree(gpd_data);
err_put:
dev_pm_put_subsys_data(dev);
@@ -1532,6 +1564,7 @@ static void genpd_free_dev_data(struct device *dev,
spin_unlock_irq(&dev->power.lock);
+ kfree(gpd_data->td);
kfree(gpd_data);
dev_pm_put_subsys_data(dev);
}
@@ -1588,6 +1621,7 @@ static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
struct device *base_dev)
{
+ struct genpd_governor_data *gd = genpd->gd;
struct generic_pm_domain_data *gpd_data;
int ret;
@@ -1596,7 +1630,7 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
return -EINVAL;
- gpd_data = genpd_alloc_dev_data(dev);
+ gpd_data = genpd_alloc_dev_data(dev, gd);
if (IS_ERR(gpd_data))
return PTR_ERR(gpd_data);
@@ -1612,7 +1646,8 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
dev_pm_domain_set(dev, &genpd->domain);
genpd->device_count++;
- genpd->max_off_time_changed = true;
+ if (gd)
+ gd->max_off_time_changed = true;
list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
@@ -1666,7 +1701,8 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
}
genpd->device_count--;
- genpd->max_off_time_changed = true;
+ if (genpd->gd)
+ genpd->gd->max_off_time_changed = true;
genpd_clear_cpumask(genpd, gpd_data->cpu);
dev_pm_domain_set(dev, NULL);
@@ -1939,6 +1975,53 @@ static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
return 0;
}
+static int genpd_alloc_data(struct generic_pm_domain *genpd)
+{
+ struct genpd_governor_data *gd = NULL;
+ int ret;
+
+ if (genpd_is_cpu_domain(genpd) &&
+ !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
+ return -ENOMEM;
+
+ if (genpd->gov) {
+ gd = kzalloc(sizeof(*gd), GFP_KERNEL);
+ if (!gd) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ gd->max_off_time_ns = -1;
+ gd->max_off_time_changed = true;
+ gd->next_wakeup = KTIME_MAX;
+ }
+
+ /* Use only one "off" state if there were no states declared */
+ if (genpd->state_count == 0) {
+ ret = genpd_set_default_power_state(genpd);
+ if (ret)
+ goto free;
+ }
+
+ genpd->gd = gd;
+ return 0;
+
+free:
+ if (genpd_is_cpu_domain(genpd))
+ free_cpumask_var(genpd->cpus);
+ kfree(gd);
+ return ret;
+}
+
+static void genpd_free_data(struct generic_pm_domain *genpd)
+{
+ if (genpd_is_cpu_domain(genpd))
+ free_cpumask_var(genpd->cpus);
+ if (genpd->free_states)
+ genpd->free_states(genpd->states, genpd->state_count);
+ kfree(genpd->gd);
+}
+
static void genpd_lock_init(struct generic_pm_domain *genpd)
{
if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
@@ -1976,11 +2059,9 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
atomic_set(&genpd->sd_count, 0);
genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
genpd->device_count = 0;
- genpd->max_off_time_ns = -1;
- genpd->max_off_time_changed = true;
genpd->provider = NULL;
genpd->has_provider = false;
- genpd->accounting_time = ktime_get();
+ genpd->accounting_time = ktime_get_mono_fast_ns();
genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
genpd->domain.ops.runtime_resume = genpd_runtime_resume;
genpd->domain.ops.prepare = genpd_prepare;
@@ -1998,26 +2079,24 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
genpd->dev_ops.start = pm_clk_resume;
}
+ /* The always-on governor works better with the corresponding flag. */
+ if (gov == &pm_domain_always_on_gov)
+ genpd->flags |= GENPD_FLAG_RPM_ALWAYS_ON;
+
/* Always-on domains must be powered on at initialization. */
if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
- !genpd_status_on(genpd))
+ !genpd_status_on(genpd)) {
+ pr_err("always-on PM domain %s is not on\n", genpd->name);
return -EINVAL;
+ }
- if (genpd_is_cpu_domain(genpd) &&
- !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
- return -ENOMEM;
-
- /* Use only one "off" state if there were no states declared */
- if (genpd->state_count == 0) {
- ret = genpd_set_default_power_state(genpd);
- if (ret) {
- if (genpd_is_cpu_domain(genpd))
- free_cpumask_var(genpd->cpus);
- return ret;
- }
- } else if (!gov && genpd->state_count > 1) {
+ /* Multiple states but no governor doesn't make sense. */
+ if (!gov && genpd->state_count > 1)
pr_warn("%s: no governor for states\n", genpd->name);
- }
+
+ ret = genpd_alloc_data(genpd);
+ if (ret)
+ return ret;
device_initialize(&genpd->dev);
dev_set_name(&genpd->dev, "%s", genpd->name);
@@ -2058,14 +2137,11 @@ static int genpd_remove(struct generic_pm_domain *genpd)
kfree(link);
}
- genpd_debug_remove(genpd);
list_del(&genpd->gpd_list_node);
genpd_unlock(genpd);
+ genpd_debug_remove(genpd);
cancel_work_sync(&genpd->power_off_work);
- if (genpd_is_cpu_domain(genpd))
- free_cpumask_var(genpd->cpus);
- if (genpd->free_states)
- genpd->free_states(genpd->states, genpd->state_count);
+ genpd_free_data(genpd);
pr_debug("%s: removed %s\n", __func__, genpd->name);
@@ -2248,12 +2324,8 @@ int of_genpd_add_provider_simple(struct device_node *np,
/* Parse genpd OPP table */
if (genpd->set_performance_state) {
ret = dev_pm_opp_of_add_table(&genpd->dev);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&genpd->dev, "Failed to add OPP table: %d\n",
- ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&genpd->dev, ret, "Failed to add OPP table\n");
/*
* Save table for faster processing while setting performance
@@ -2312,9 +2384,8 @@ int of_genpd_add_provider_onecell(struct device_node *np,
if (genpd->set_performance_state) {
ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n",
- i, ret);
+ dev_err_probe(&genpd->dev, ret,
+ "Failed to add OPP table for index %d\n", i);
goto error;
}
@@ -2672,12 +2743,8 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
ret = genpd_add_device(pd, dev, base_dev);
mutex_unlock(&gpd_list_lock);
- if (ret < 0) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to add to PM domain %s: %d",
- pd->name, ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to add to PM domain %s\n", pd->name);
dev->pm_domain->detach = genpd_dev_pm_detach;
dev->pm_domain->sync = genpd_dev_pm_sync;
@@ -2885,6 +2952,10 @@ static int genpd_iterate_idle_states(struct device_node *dn,
np = it.node;
if (!of_match_node(idle_state_match, np))
continue;
+
+ if (!of_device_is_available(np))
+ continue;
+
if (states) {
ret = genpd_parse_state(&states[i], np);
if (ret) {
@@ -3153,6 +3224,7 @@ static int sub_domains_show(struct seq_file *s, void *data)
static int idle_states_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd = s->private;
+ u64 now, delta, idle_time = 0;
unsigned int i;
int ret = 0;
@@ -3163,17 +3235,19 @@ static int idle_states_show(struct seq_file *s, void *data)
seq_puts(s, "State Time Spent(ms) Usage Rejected\n");
for (i = 0; i < genpd->state_count; i++) {
- ktime_t delta = 0;
- s64 msecs;
+ idle_time += genpd->states[i].idle_time;
- if ((genpd->status == GENPD_STATE_OFF) &&
- (genpd->state_idx == i))
- delta = ktime_sub(ktime_get(), genpd->accounting_time);
+ if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
+ now = ktime_get_mono_fast_ns();
+ if (now > genpd->accounting_time) {
+ delta = now - genpd->accounting_time;
+ idle_time += delta;
+ }
+ }
- msecs = ktime_to_ms(
- ktime_add(genpd->states[i].idle_time, delta));
- seq_printf(s, "S%-13i %-14lld %-14llu %llu\n", i, msecs,
- genpd->states[i].usage, genpd->states[i].rejected);
+ do_div(idle_time, NSEC_PER_MSEC);
+ seq_printf(s, "S%-13i %-14llu %-14llu %llu\n", i, idle_time,
+ genpd->states[i].usage, genpd->states[i].rejected);
}
genpd_unlock(genpd);
@@ -3183,18 +3257,22 @@ static int idle_states_show(struct seq_file *s, void *data)
static int active_time_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd = s->private;
- ktime_t delta = 0;
+ u64 now, on_time, delta = 0;
int ret = 0;
ret = genpd_lock_interruptible(genpd);
if (ret)
return -ERESTARTSYS;
- if (genpd->status == GENPD_STATE_ON)
- delta = ktime_sub(ktime_get(), genpd->accounting_time);
+ if (genpd->status == GENPD_STATE_ON) {
+ now = ktime_get_mono_fast_ns();
+ if (now > genpd->accounting_time)
+ delta = now - genpd->accounting_time;
+ }
- seq_printf(s, "%lld ms\n", ktime_to_ms(
- ktime_add(genpd->on_time, delta)));
+ on_time = genpd->on_time + delta;
+ do_div(on_time, NSEC_PER_MSEC);
+ seq_printf(s, "%llu ms\n", on_time);
genpd_unlock(genpd);
return ret;
@@ -3203,7 +3281,7 @@ static int active_time_show(struct seq_file *s, void *data)
static int total_idle_time_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd = s->private;
- ktime_t delta = 0, total = 0;
+ u64 now, delta, total = 0;
unsigned int i;
int ret = 0;
@@ -3212,16 +3290,19 @@ static int total_idle_time_show(struct seq_file *s, void *data)
return -ERESTARTSYS;
for (i = 0; i < genpd->state_count; i++) {
+ total += genpd->states[i].idle_time;
- if ((genpd->status == GENPD_STATE_OFF) &&
- (genpd->state_idx == i))
- delta = ktime_sub(ktime_get(), genpd->accounting_time);
-
- total = ktime_add(total, genpd->states[i].idle_time);
+ if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
+ now = ktime_get_mono_fast_ns();
+ if (now > genpd->accounting_time) {
+ delta = now - genpd->accounting_time;
+ total += delta;
+ }
+ }
}
- total = ktime_add(total, delta);
- seq_printf(s, "%lld ms\n", ktime_to_ms(total));
+ do_div(total, NSEC_PER_MSEC);
+ seq_printf(s, "%llu ms\n", total);
genpd_unlock(genpd);
return ret;
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index cd08c5885190..282a3a135827 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -18,6 +18,8 @@ static int dev_update_qos_constraint(struct device *dev, void *data)
s64 constraint_ns;
if (dev->power.subsys_data && dev->power.subsys_data->domain_data) {
+ struct gpd_timing_data *td = dev_gpd_data(dev)->td;
+
/*
* Only take suspend-time QoS constraints of devices into
* account, because constraints updated after the device has
@@ -25,7 +27,8 @@ static int dev_update_qos_constraint(struct device *dev, void *data)
* anyway. In order for them to take effect, the device has to
* be resumed and suspended again.
*/
- constraint_ns = dev_gpd_data(dev)->td.effective_constraint_ns;
+ constraint_ns = td ? td->effective_constraint_ns :
+ PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
} else {
/*
* The child is not in a domain and there's no info on its
@@ -49,7 +52,7 @@ static int dev_update_qos_constraint(struct device *dev, void *data)
*/
static bool default_suspend_ok(struct device *dev)
{
- struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
+ struct gpd_timing_data *td = dev_gpd_data(dev)->td;
unsigned long flags;
s64 constraint_ns;
@@ -136,26 +139,28 @@ static void update_domain_next_wakeup(struct generic_pm_domain *genpd, ktime_t n
* is able to enter its optimal idle state.
*/
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
- next_wakeup = to_gpd_data(pdd)->next_wakeup;
+ next_wakeup = to_gpd_data(pdd)->td->next_wakeup;
if (next_wakeup != KTIME_MAX && !ktime_before(next_wakeup, now))
if (ktime_before(next_wakeup, domain_wakeup))
domain_wakeup = next_wakeup;
}
list_for_each_entry(link, &genpd->parent_links, parent_node) {
- next_wakeup = link->child->next_wakeup;
+ struct genpd_governor_data *cgd = link->child->gd;
+
+ next_wakeup = cgd ? cgd->next_wakeup : KTIME_MAX;
if (next_wakeup != KTIME_MAX && !ktime_before(next_wakeup, now))
if (ktime_before(next_wakeup, domain_wakeup))
domain_wakeup = next_wakeup;
}
- genpd->next_wakeup = domain_wakeup;
+ genpd->gd->next_wakeup = domain_wakeup;
}
static bool next_wakeup_allows_state(struct generic_pm_domain *genpd,
unsigned int state, ktime_t now)
{
- ktime_t domain_wakeup = genpd->next_wakeup;
+ ktime_t domain_wakeup = genpd->gd->next_wakeup;
s64 idle_time_ns, min_sleep_ns;
min_sleep_ns = genpd->states[state].power_off_latency_ns +
@@ -185,8 +190,9 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd,
* All subdomains have been powered off already at this point.
*/
list_for_each_entry(link, &genpd->parent_links, parent_node) {
- struct generic_pm_domain *sd = link->child;
- s64 sd_max_off_ns = sd->max_off_time_ns;
+ struct genpd_governor_data *cgd = link->child->gd;
+
+ s64 sd_max_off_ns = cgd ? cgd->max_off_time_ns : -1;
if (sd_max_off_ns < 0)
continue;
@@ -215,7 +221,7 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd,
* domain to turn off and on (that's how much time it will
* have to wait worst case).
*/
- td = &to_gpd_data(pdd)->td;
+ td = to_gpd_data(pdd)->td;
constraint_ns = td->effective_constraint_ns;
/*
* Zero means "no suspend at all" and this runs only when all
@@ -244,7 +250,7 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd,
* time and the time needed to turn the domain on is the maximum
* theoretical time this domain can spend in the "off" state.
*/
- genpd->max_off_time_ns = min_off_time_ns -
+ genpd->gd->max_off_time_ns = min_off_time_ns -
genpd->states[state].power_on_latency_ns;
return true;
}
@@ -259,6 +265,7 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd,
static bool _default_power_down_ok(struct dev_pm_domain *pd, ktime_t now)
{
struct generic_pm_domain *genpd = pd_to_genpd(pd);
+ struct genpd_governor_data *gd = genpd->gd;
int state_idx = genpd->state_count - 1;
struct gpd_link *link;
@@ -269,11 +276,11 @@ static bool _default_power_down_ok(struct dev_pm_domain *pd, ktime_t now)
* cannot be met.
*/
update_domain_next_wakeup(genpd, now);
- if ((genpd->flags & GENPD_FLAG_MIN_RESIDENCY) && (genpd->next_wakeup != KTIME_MAX)) {
+ if ((genpd->flags & GENPD_FLAG_MIN_RESIDENCY) && (gd->next_wakeup != KTIME_MAX)) {
/* Let's find out the deepest domain idle state, the devices prefer */
while (state_idx >= 0) {
if (next_wakeup_allows_state(genpd, state_idx, now)) {
- genpd->max_off_time_changed = true;
+ gd->max_off_time_changed = true;
break;
}
state_idx--;
@@ -281,14 +288,14 @@ static bool _default_power_down_ok(struct dev_pm_domain *pd, ktime_t now)
if (state_idx < 0) {
state_idx = 0;
- genpd->cached_power_down_ok = false;
+ gd->cached_power_down_ok = false;
goto done;
}
}
- if (!genpd->max_off_time_changed) {
- genpd->state_idx = genpd->cached_power_down_state_idx;
- return genpd->cached_power_down_ok;
+ if (!gd->max_off_time_changed) {
+ genpd->state_idx = gd->cached_power_down_state_idx;
+ return gd->cached_power_down_ok;
}
/*
@@ -297,12 +304,16 @@ static bool _default_power_down_ok(struct dev_pm_domain *pd, ktime_t now)
* going to be called for any parent until this instance
* returns.
*/
- list_for_each_entry(link, &genpd->child_links, child_node)
- link->parent->max_off_time_changed = true;
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ struct genpd_governor_data *pgd = link->parent->gd;
+
+ if (pgd)
+ pgd->max_off_time_changed = true;
+ }
- genpd->max_off_time_ns = -1;
- genpd->max_off_time_changed = false;
- genpd->cached_power_down_ok = true;
+ gd->max_off_time_ns = -1;
+ gd->max_off_time_changed = false;
+ gd->cached_power_down_ok = true;
/*
* Find a state to power down to, starting from the state
@@ -310,7 +321,7 @@ static bool _default_power_down_ok(struct dev_pm_domain *pd, ktime_t now)
*/
while (!__default_power_down_ok(pd, state_idx)) {
if (state_idx == 0) {
- genpd->cached_power_down_ok = false;
+ gd->cached_power_down_ok = false;
break;
}
state_idx--;
@@ -318,8 +329,8 @@ static bool _default_power_down_ok(struct dev_pm_domain *pd, ktime_t now)
done:
genpd->state_idx = state_idx;
- genpd->cached_power_down_state_idx = genpd->state_idx;
- return genpd->cached_power_down_ok;
+ gd->cached_power_down_state_idx = genpd->state_idx;
+ return gd->cached_power_down_ok;
}
static bool default_power_down_ok(struct dev_pm_domain *pd)
@@ -327,11 +338,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
return _default_power_down_ok(pd, ktime_get());
}
-static bool always_on_power_down_ok(struct dev_pm_domain *domain)
-{
- return false;
-}
-
#ifdef CONFIG_CPU_IDLE
static bool cpu_power_down_ok(struct dev_pm_domain *pd)
{
@@ -401,6 +407,5 @@ struct dev_power_governor simple_qos_governor = {
* pm_genpd_gov_always_on - A governor implementing an always-on policy
*/
struct dev_power_governor pm_domain_always_on_gov = {
- .power_down_ok = always_on_power_down_ok,
.suspend_ok = default_suspend_ok,
};
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 04ea92cbd9cf..c50139207794 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -485,7 +485,7 @@ static int dpm_run_callback(pm_callback_t cb, struct device *dev,
trace_device_pm_callback_start(dev, info, state.event);
error = cb(dev);
trace_device_pm_callback_end(dev, error);
- suspend_report_result(cb, error);
+ suspend_report_result(dev, cb, error);
initcall_debug_report(dev, calltime, cb, error);
@@ -1568,7 +1568,7 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
trace_device_pm_callback_start(dev, info, state.event);
error = cb(dev, state);
trace_device_pm_callback_end(dev, error);
- suspend_report_result(cb, error);
+ suspend_report_result(dev, cb, error);
initcall_debug_report(dev, calltime, cb, error);
@@ -1855,7 +1855,7 @@ unlock:
device_unlock(dev);
if (ret < 0) {
- suspend_report_result(callback, ret);
+ suspend_report_result(dev, callback, ret);
pm_runtime_put(dev);
return ret;
}
@@ -1960,10 +1960,10 @@ int dpm_suspend_start(pm_message_t state)
}
EXPORT_SYMBOL_GPL(dpm_suspend_start);
-void __suspend_report_result(const char *function, void *fn, int ret)
+void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
{
if (ret)
- pr_err("%s(): %pS returns %d\n", function, fn, ret);
+ dev_err(dev, "%s(): %pS returns %d\n", function, fn, ret);
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
@@ -2018,7 +2018,9 @@ static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
void device_pm_check_callbacks(struct device *dev)
{
- spin_lock_irq(&dev->power.lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
dev->power.no_pm_callbacks =
(!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
!dev->bus->suspend && !dev->bus->resume)) &&
@@ -2027,7 +2029,7 @@ void device_pm_check_callbacks(struct device *dev)
(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
(!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
!dev->driver->suspend && !dev->driver->resume));
- spin_unlock_irq(&dev->power.lock);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
}
bool dev_pm_skip_suspend(struct device *dev)
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 2f3cce17219b..b52049098d4e 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -263,7 +263,7 @@ static int rpm_check_suspend_allowed(struct device *dev)
retval = -EINVAL;
else if (dev->power.disable_depth > 0)
retval = -EACCES;
- else if (atomic_read(&dev->power.usage_count) > 0)
+ else if (atomic_read(&dev->power.usage_count))
retval = -EAGAIN;
else if (!dev->power.ignore_children &&
atomic_read(&dev->power.child_count))
@@ -308,13 +308,10 @@ static int rpm_get_suppliers(struct device *dev)
/**
* pm_runtime_release_supplier - Drop references to device link's supplier.
* @link: Target device link.
- * @check_idle: Whether or not to check if the supplier device is idle.
*
- * Drop all runtime PM references associated with @link to its supplier device
- * and if @check_idle is set, check if that device is idle (and so it can be
- * suspended).
+ * Drop all runtime PM references associated with @link to its supplier device.
*/
-void pm_runtime_release_supplier(struct device_link *link, bool check_idle)
+void pm_runtime_release_supplier(struct device_link *link)
{
struct device *supplier = link->supplier;
@@ -327,9 +324,6 @@ void pm_runtime_release_supplier(struct device_link *link, bool check_idle)
while (refcount_dec_not_one(&link->rpm_active) &&
atomic_read(&supplier->power.usage_count) > 0)
pm_runtime_put_noidle(supplier);
-
- if (check_idle)
- pm_request_idle(supplier);
}
static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
@@ -337,8 +331,11 @@ static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
struct device_link *link;
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
- device_links_read_lock_held())
- pm_runtime_release_supplier(link, try_to_suspend);
+ device_links_read_lock_held()) {
+ pm_runtime_release_supplier(link);
+ if (try_to_suspend)
+ pm_request_idle(link->supplier);
+ }
}
static void rpm_put_suppliers(struct device *dev)
@@ -795,10 +792,13 @@ static int rpm_resume(struct device *dev, int rpmflags)
DEFINE_WAIT(wait);
if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
- if (dev->power.runtime_status == RPM_SUSPENDING)
+ if (dev->power.runtime_status == RPM_SUSPENDING) {
dev->power.deferred_resume = true;
- else
+ if (rpmflags & RPM_NOWAIT)
+ retval = -EINPROGRESS;
+ } else {
retval = -EINPROGRESS;
+ }
goto out;
}
@@ -1039,13 +1039,33 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay)
}
EXPORT_SYMBOL_GPL(pm_schedule_suspend);
+static int rpm_drop_usage_count(struct device *dev)
+{
+ int ret;
+
+ ret = atomic_sub_return(1, &dev->power.usage_count);
+ if (ret >= 0)
+ return ret;
+
+ /*
+ * Because rpm_resume() does not check the usage counter, it will resume
+ * the device even if the usage counter is 0 or negative, so it is
+ * sufficient to increment the usage counter here to reverse the change
+ * made above.
+ */
+ atomic_inc(&dev->power.usage_count);
+ dev_warn(dev, "Runtime PM usage count underflow!\n");
+ return -EINVAL;
+}
+
/**
* __pm_runtime_idle - Entry point for runtime idle operations.
* @dev: Device to send idle notification for.
* @rpmflags: Flag bits.
*
* If the RPM_GET_PUT flag is set, decrement the device's usage count and
- * return immediately if it is larger than zero. Then carry out an idle
+ * return immediately if it is larger than zero (if it becomes negative, log a
+ * warning, increment it, and return an error). Then carry out an idle
* notification, either synchronous or asynchronous.
*
* This routine may be called in atomic context if the RPM_ASYNC flag is set,
@@ -1057,7 +1077,10 @@ int __pm_runtime_idle(struct device *dev, int rpmflags)
int retval;
if (rpmflags & RPM_GET_PUT) {
- if (!atomic_dec_and_test(&dev->power.usage_count)) {
+ retval = rpm_drop_usage_count(dev);
+ if (retval < 0) {
+ return retval;
+ } else if (retval > 0) {
trace_rpm_usage_rcuidle(dev, rpmflags);
return 0;
}
@@ -1079,7 +1102,8 @@ EXPORT_SYMBOL_GPL(__pm_runtime_idle);
* @rpmflags: Flag bits.
*
* If the RPM_GET_PUT flag is set, decrement the device's usage count and
- * return immediately if it is larger than zero. Then carry out a suspend,
+ * return immediately if it is larger than zero (if it becomes negative, log a
+ * warning, increment it, and return an error). Then carry out a suspend,
* either synchronous or asynchronous.
*
* This routine may be called in atomic context if the RPM_ASYNC flag is set,
@@ -1091,7 +1115,10 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags)
int retval;
if (rpmflags & RPM_GET_PUT) {
- if (!atomic_dec_and_test(&dev->power.usage_count)) {
+ retval = rpm_drop_usage_count(dev);
+ if (retval < 0) {
+ return retval;
+ } else if (retval > 0) {
trace_rpm_usage_rcuidle(dev, rpmflags);
return 0;
}
@@ -1210,12 +1237,13 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
{
struct device *parent = dev->parent;
bool notify_parent = false;
+ unsigned long flags;
int error = 0;
if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
return -EINVAL;
- spin_lock_irq(&dev->power.lock);
+ spin_lock_irqsave(&dev->power.lock, flags);
/*
* Prevent PM-runtime from being enabled for the device or return an
@@ -1226,7 +1254,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
else
error = -EAGAIN;
- spin_unlock_irq(&dev->power.lock);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
if (error)
return error;
@@ -1247,7 +1275,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
device_links_read_unlock(idx);
}
- spin_lock_irq(&dev->power.lock);
+ spin_lock_irqsave(&dev->power.lock, flags);
if (dev->power.runtime_status == status || !parent)
goto out_set;
@@ -1288,7 +1316,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
dev->power.runtime_error = 0;
out:
- spin_unlock_irq(&dev->power.lock);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
if (notify_parent)
pm_request_idle(parent);
@@ -1476,11 +1504,16 @@ EXPORT_SYMBOL_GPL(pm_runtime_enable);
static void pm_runtime_disable_action(void *data)
{
+ pm_runtime_dont_use_autosuspend(data);
pm_runtime_disable(data);
}
/**
* devm_pm_runtime_enable - devres-enabled version of pm_runtime_enable.
+ *
+ * NOTE: this will also handle calling pm_runtime_dont_use_autosuspend() for
+ * you at driver exit time if needed.
+ *
* @dev: Device to handle.
*/
int devm_pm_runtime_enable(struct device *dev)
@@ -1522,14 +1555,17 @@ EXPORT_SYMBOL_GPL(pm_runtime_forbid);
*/
void pm_runtime_allow(struct device *dev)
{
+ int ret;
+
spin_lock_irq(&dev->power.lock);
if (dev->power.runtime_auto)
goto out;
dev->power.runtime_auto = true;
- if (atomic_dec_and_test(&dev->power.usage_count))
+ ret = rpm_drop_usage_count(dev);
+ if (ret == 0)
rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
- else
+ else if (ret > 0)
trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC);
out:
@@ -1735,7 +1771,6 @@ void pm_runtime_get_suppliers(struct device *dev)
if (link->flags & DL_FLAG_PM_RUNTIME) {
link->supplier_preactivated = true;
pm_runtime_get_sync(link->supplier);
- refcount_inc(&link->rpm_active);
}
device_links_read_unlock(idx);
@@ -1755,19 +1790,8 @@ void pm_runtime_put_suppliers(struct device *dev)
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
device_links_read_lock_held())
if (link->supplier_preactivated) {
- bool put;
-
link->supplier_preactivated = false;
-
- spin_lock_irq(&dev->power.lock);
-
- put = pm_runtime_status_suspended(dev) &&
- refcount_dec_not_one(&link->rpm_active);
-
- spin_unlock_irq(&dev->power.lock);
-
- if (put)
- pm_runtime_put(link->supplier);
+ pm_runtime_put(link->supplier);
}
device_links_read_unlock(idx);
@@ -1802,7 +1826,8 @@ void pm_runtime_drop_link(struct device_link *link)
return;
pm_runtime_drop_link_count(link->consumer);
- pm_runtime_release_supplier(link, true);
+ pm_runtime_release_supplier(link);
+ pm_request_idle(link->supplier);
}
static bool pm_runtime_need_not_resume(struct device *dev)
@@ -1840,10 +1865,13 @@ int pm_runtime_force_suspend(struct device *dev)
callback = RPM_GET_CALLBACK(dev, runtime_suspend);
+ dev_pm_enable_wake_irq_check(dev, true);
ret = callback ? callback(dev) : 0;
if (ret)
goto err;
+ dev_pm_enable_wake_irq_complete(dev);
+
/*
* If the device can stay in suspend after the system-wide transition
* to the working state that will follow, drop the children counter of
@@ -1860,6 +1888,7 @@ int pm_runtime_force_suspend(struct device *dev)
return 0;
err:
+ dev_pm_disable_wake_irq_check(dev, true);
pm_runtime_enable(dev);
return ret;
}
@@ -1893,9 +1922,11 @@ int pm_runtime_force_resume(struct device *dev)
callback = RPM_GET_CALLBACK(dev, runtime_resume);
+ dev_pm_disable_wake_irq_check(dev, false);
ret = callback ? callback(dev) : 0;
if (ret) {
pm_runtime_set_suspended(dev);
+ dev_pm_enable_wake_irq_check(dev, false);
goto out;
}
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index 94665037f4a3..72b7a92337b1 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -120,7 +120,11 @@ static unsigned int read_magic_time(void)
struct rtc_time time;
unsigned int val;
- mc146818_get_time(&time);
+ if (mc146818_get_time(&time) < 0) {
+ pr_err("Unable to read current time from RTC\n");
+ return 0;
+ }
+
pr_info("RTC time: %ptRt, date: %ptRd\n", &time, &time);
val = time.tm_year; /* 100 years */
if (val > 100)
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index 0004db4a9d3b..d487a6bac630 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -289,7 +289,7 @@ EXPORT_SYMBOL_GPL(dev_pm_disable_wake_irq);
*
* Enables wakeirq conditionally. We need to enable wake-up interrupt
* lazily on the first rpm_suspend(). This is needed as the consumer device
- * starts in RPM_SUSPENDED state, and the the first pm_runtime_get() would
+ * starts in RPM_SUSPENDED state, and the first pm_runtime_get() would
* otherwise try to disable already disabled wakeirq. The wake-up interrupt
* starts disabled with IRQ_NOAUTOEN set.
*
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 99bda0da23a8..7cc0c0cf8eaa 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -34,7 +34,8 @@ suspend_state_t pm_suspend_target_state;
bool events_check_enabled __read_mostly;
/* First wakeup IRQ seen by the kernel in the last cycle. */
-unsigned int pm_wakeup_irq __read_mostly;
+static unsigned int wakeup_irq[2] __read_mostly;
+static DEFINE_RAW_SPINLOCK(wakeup_irq_lock);
/* If greater than 0 and the system is suspending, terminate the suspend. */
static atomic_t pm_abort_suspend __read_mostly;
@@ -500,36 +501,6 @@ void device_set_wakeup_capable(struct device *dev, bool capable)
EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
/**
- * device_init_wakeup - Device wakeup initialization.
- * @dev: Device to handle.
- * @enable: Whether or not to enable @dev as a wakeup device.
- *
- * By default, most devices should leave wakeup disabled. The exceptions are
- * devices that everyone expects to be wakeup sources: keyboards, power buttons,
- * possibly network interfaces, etc. Also, devices that don't generate their
- * own wakeup requests but merely forward requests from one bus to another
- * (like PCI bridges) should have wakeup enabled by default.
- */
-int device_init_wakeup(struct device *dev, bool enable)
-{
- int ret = 0;
-
- if (!dev)
- return -EINVAL;
-
- if (enable) {
- device_set_wakeup_capable(dev, true);
- ret = device_wakeup_enable(dev);
- } else {
- device_wakeup_disable(dev);
- device_set_wakeup_capable(dev, false);
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(device_init_wakeup);
-
-/**
* device_set_wakeup_enable - Enable or disable a device to wake up the system.
* @dev: Device to handle.
* @enable: enable/disable flag
@@ -586,7 +557,7 @@ static bool wakeup_source_not_registered(struct wakeup_source *ws)
* @ws: Wakeup source to handle.
*
* Update the @ws' statistics and, if @ws has just been activated, notify the PM
- * core of the event by incrementing the counter of of wakeup events being
+ * core of the event by incrementing the counter of the wakeup events being
* processed.
*/
static void wakeup_source_activate(struct wakeup_source *ws)
@@ -732,7 +703,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
/*
* Increment the counter of registered wakeup events and decrement the
- * couter of wakeup events in progress simultaneously.
+ * counter of wakeup events in progress simultaneously.
*/
cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
trace_wakeup_source_deactivate(ws->name, cec);
@@ -929,6 +900,7 @@ bool pm_wakeup_pending(void)
return ret || atomic_read(&pm_abort_suspend) > 0;
}
+EXPORT_SYMBOL_GPL(pm_wakeup_pending);
void pm_system_wakeup(void)
{
@@ -942,19 +914,47 @@ void pm_system_cancel_wakeup(void)
atomic_dec_if_positive(&pm_abort_suspend);
}
-void pm_wakeup_clear(bool reset)
+void pm_wakeup_clear(unsigned int irq_number)
{
- pm_wakeup_irq = 0;
- if (reset)
+ raw_spin_lock_irq(&wakeup_irq_lock);
+
+ if (irq_number && wakeup_irq[0] == irq_number)
+ wakeup_irq[0] = wakeup_irq[1];
+ else
+ wakeup_irq[0] = 0;
+
+ wakeup_irq[1] = 0;
+
+ raw_spin_unlock_irq(&wakeup_irq_lock);
+
+ if (!irq_number)
atomic_set(&pm_abort_suspend, 0);
}
void pm_system_irq_wakeup(unsigned int irq_number)
{
- if (pm_wakeup_irq == 0) {
- pm_wakeup_irq = irq_number;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&wakeup_irq_lock, flags);
+
+ if (wakeup_irq[0] == 0)
+ wakeup_irq[0] = irq_number;
+ else if (wakeup_irq[1] == 0)
+ wakeup_irq[1] = irq_number;
+ else
+ irq_number = 0;
+
+ pm_pr_dbg("Triggering wakeup from IRQ %d\n", irq_number);
+
+ raw_spin_unlock_irqrestore(&wakeup_irq_lock, flags);
+
+ if (irq_number)
pm_system_wakeup();
- }
+}
+
+unsigned int pm_wakeup_irq(void)
+{
+ return wakeup_irq[0];
}
/**
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 5379eae478b1..2a5a37fcd998 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -17,7 +17,7 @@
#include <linux/property.h>
#include <linux/phy.h>
-struct fwnode_handle *dev_fwnode(struct device *dev)
+struct fwnode_handle *dev_fwnode(const struct device *dev)
{
return IS_ENABLED(CONFIG_OF) && dev->of_node ?
of_fwnode_handle(dev->of_node) : dev->fwnode;
@@ -47,12 +47,14 @@ bool fwnode_property_present(const struct fwnode_handle *fwnode,
{
bool ret;
+ if (IS_ERR_OR_NULL(fwnode))
+ return false;
+
ret = fwnode_call_bool_op(fwnode, property_present, propname);
- if (ret == false && !IS_ERR_OR_NULL(fwnode) &&
- !IS_ERR_OR_NULL(fwnode->secondary))
- ret = fwnode_call_bool_op(fwnode->secondary, property_present,
- propname);
- return ret;
+ if (ret)
+ return ret;
+
+ return fwnode_call_bool_op(fwnode->secondary, property_present, propname);
}
EXPORT_SYMBOL_GPL(fwnode_property_present);
@@ -66,6 +68,9 @@ EXPORT_SYMBOL_GPL(fwnode_property_present);
* Function reads an array of u8 properties with @propname from the device
* firmware description and stores them to @val if found.
*
+ * It's recommended to call device_property_count_u8() instead of calling
+ * this function with @val equals %NULL and @nval equals 0.
+ *
* Return: number of values if @val was %NULL,
* %0 if the property was found (success),
* %-EINVAL if given arguments are not valid,
@@ -91,6 +96,9 @@ EXPORT_SYMBOL_GPL(device_property_read_u8_array);
* Function reads an array of u16 properties with @propname from the device
* firmware description and stores them to @val if found.
*
+ * It's recommended to call device_property_count_u16() instead of calling
+ * this function with @val equals %NULL and @nval equals 0.
+ *
* Return: number of values if @val was %NULL,
* %0 if the property was found (success),
* %-EINVAL if given arguments are not valid,
@@ -116,6 +124,9 @@ EXPORT_SYMBOL_GPL(device_property_read_u16_array);
* Function reads an array of u32 properties with @propname from the device
* firmware description and stores them to @val if found.
*
+ * It's recommended to call device_property_count_u32() instead of calling
+ * this function with @val equals %NULL and @nval equals 0.
+ *
* Return: number of values if @val was %NULL,
* %0 if the property was found (success),
* %-EINVAL if given arguments are not valid,
@@ -141,6 +152,9 @@ EXPORT_SYMBOL_GPL(device_property_read_u32_array);
* Function reads an array of u64 properties with @propname from the device
* firmware description and stores them to @val if found.
*
+ * It's recommended to call device_property_count_u64() instead of calling
+ * this function with @val equals %NULL and @nval equals 0.
+ *
* Return: number of values if @val was %NULL,
* %0 if the property was found (success),
* %-EINVAL if given arguments are not valid,
@@ -166,6 +180,9 @@ EXPORT_SYMBOL_GPL(device_property_read_u64_array);
* Function reads an array of string properties with @propname from the device
* firmware description and stores them to @val if found.
*
+ * It's recommended to call device_property_string_array_count() instead of calling
+ * this function with @val equals %NULL and @nval equals 0.
+ *
* Return: number of values read on success if @val is non-NULL,
* number of values available on success if @val is NULL,
* %-EINVAL if given arguments are not valid,
@@ -212,7 +229,7 @@ EXPORT_SYMBOL_GPL(device_property_read_string);
* Find a given string in a string array and if it is found return the
* index back.
*
- * Return: %0 if the property was found (success),
+ * Return: index, starting from %0, if the property was found (success),
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO if the property is not an array of strings,
@@ -232,15 +249,16 @@ static int fwnode_property_read_int_array(const struct fwnode_handle *fwnode,
{
int ret;
+ if (IS_ERR_OR_NULL(fwnode))
+ return -EINVAL;
+
ret = fwnode_call_int_op(fwnode, property_read_int_array, propname,
elem_size, val, nval);
- if (ret == -EINVAL && !IS_ERR_OR_NULL(fwnode) &&
- !IS_ERR_OR_NULL(fwnode->secondary))
- ret = fwnode_call_int_op(
- fwnode->secondary, property_read_int_array, propname,
- elem_size, val, nval);
+ if (ret != -EINVAL)
+ return ret;
- return ret;
+ return fwnode_call_int_op(fwnode->secondary, property_read_int_array, propname,
+ elem_size, val, nval);
}
/**
@@ -253,6 +271,9 @@ static int fwnode_property_read_int_array(const struct fwnode_handle *fwnode,
* Read an array of u8 properties with @propname from @fwnode and stores them to
* @val if found.
*
+ * It's recommended to call fwnode_property_count_u8() instead of calling
+ * this function with @val equals %NULL and @nval equals 0.
+ *
* Return: number of values if @val was %NULL,
* %0 if the property was found (success),
* %-EINVAL if given arguments are not valid,
@@ -279,6 +300,9 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_u8_array);
* Read an array of u16 properties with @propname from @fwnode and store them to
* @val if found.
*
+ * It's recommended to call fwnode_property_count_u16() instead of calling
+ * this function with @val equals %NULL and @nval equals 0.
+ *
* Return: number of values if @val was %NULL,
* %0 if the property was found (success),
* %-EINVAL if given arguments are not valid,
@@ -305,6 +329,9 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_u16_array);
* Read an array of u32 properties with @propname from @fwnode store them to
* @val if found.
*
+ * It's recommended to call fwnode_property_count_u32() instead of calling
+ * this function with @val equals %NULL and @nval equals 0.
+ *
* Return: number of values if @val was %NULL,
* %0 if the property was found (success),
* %-EINVAL if given arguments are not valid,
@@ -331,6 +358,9 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_u32_array);
* Read an array of u64 properties with @propname from @fwnode and store them to
* @val if found.
*
+ * It's recommended to call fwnode_property_count_u64() instead of calling
+ * this function with @val equals %NULL and @nval equals 0.
+ *
* Return: number of values if @val was %NULL,
* %0 if the property was found (success),
* %-EINVAL if given arguments are not valid,
@@ -357,6 +387,9 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_u64_array);
* Read an string list property @propname from the given firmware node and store
* them to @val if found.
*
+ * It's recommended to call fwnode_property_string_array_count() instead of calling
+ * this function with @val equals %NULL and @nval equals 0.
+ *
* Return: number of values read on success if @val is non-NULL,
* number of values available on success if @val is NULL,
* %-EINVAL if given arguments are not valid,
@@ -371,14 +404,16 @@ int fwnode_property_read_string_array(const struct fwnode_handle *fwnode,
{
int ret;
+ if (IS_ERR_OR_NULL(fwnode))
+ return -EINVAL;
+
ret = fwnode_call_int_op(fwnode, property_read_string_array, propname,
val, nval);
- if (ret == -EINVAL && !IS_ERR_OR_NULL(fwnode) &&
- !IS_ERR_OR_NULL(fwnode->secondary))
- ret = fwnode_call_int_op(fwnode->secondary,
- property_read_string_array, propname,
- val, nval);
- return ret;
+ if (ret != -EINVAL)
+ return ret;
+
+ return fwnode_call_int_op(fwnode->secondary, property_read_string_array, propname,
+ val, nval);
}
EXPORT_SYMBOL_GPL(fwnode_property_read_string_array);
@@ -415,7 +450,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_string);
* Find a given string in a string array and if it is found return the
* index back.
*
- * Return: %0 if the property was found (success),
+ * Return: index, starting from %0, if the property was found (success),
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO if the property is not an array of strings,
@@ -478,7 +513,20 @@ int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode,
unsigned int nargs, unsigned int index,
struct fwnode_reference_args *args)
{
- return fwnode_call_int_op(fwnode, get_reference_args, prop, nargs_prop,
+ int ret;
+
+ if (IS_ERR_OR_NULL(fwnode))
+ return -ENOENT;
+
+ ret = fwnode_call_int_op(fwnode, get_reference_args, prop, nargs_prop,
+ nargs, index, args);
+ if (ret == 0)
+ return ret;
+
+ if (IS_ERR_OR_NULL(fwnode->secondary))
+ return ret;
+
+ return fwnode_call_int_op(fwnode->secondary, get_reference_args, prop, nargs_prop,
nargs, index, args);
}
EXPORT_SYMBOL_GPL(fwnode_property_get_reference_args);
@@ -578,17 +626,17 @@ EXPORT_SYMBOL_GPL(fwnode_get_next_parent);
*/
struct device *fwnode_get_next_parent_dev(struct fwnode_handle *fwnode)
{
+ struct fwnode_handle *parent;
struct device *dev;
- fwnode_handle_get(fwnode);
- do {
- fwnode = fwnode_get_next_parent(fwnode);
- if (!fwnode)
- return NULL;
- dev = get_dev_from_fwnode(fwnode);
- } while (!dev);
- fwnode_handle_put(fwnode);
- return dev;
+ fwnode_for_each_parent_node(fwnode, parent) {
+ dev = get_dev_from_fwnode(parent);
+ if (dev) {
+ fwnode_handle_put(parent);
+ return dev;
+ }
+ }
+ return NULL;
}
/**
@@ -599,13 +647,11 @@ struct device *fwnode_get_next_parent_dev(struct fwnode_handle *fwnode)
*/
unsigned int fwnode_count_parents(const struct fwnode_handle *fwnode)
{
- struct fwnode_handle *__fwnode;
- unsigned int count;
-
- __fwnode = fwnode_get_parent(fwnode);
+ struct fwnode_handle *parent;
+ unsigned int count = 0;
- for (count = 0; __fwnode; count++)
- __fwnode = fwnode_get_next_parent(__fwnode);
+ fwnode_for_each_parent_node(fwnode, parent)
+ count++;
return count;
}
@@ -626,40 +672,43 @@ EXPORT_SYMBOL_GPL(fwnode_count_parents);
struct fwnode_handle *fwnode_get_nth_parent(struct fwnode_handle *fwnode,
unsigned int depth)
{
- unsigned int i;
+ struct fwnode_handle *parent;
- fwnode_handle_get(fwnode);
+ if (depth == 0)
+ return fwnode_handle_get(fwnode);
- for (i = 0; i < depth && fwnode; i++)
- fwnode = fwnode_get_next_parent(fwnode);
-
- return fwnode;
+ fwnode_for_each_parent_node(fwnode, parent) {
+ if (--depth == 0)
+ return parent;
+ }
+ return NULL;
}
EXPORT_SYMBOL_GPL(fwnode_get_nth_parent);
/**
- * fwnode_is_ancestor_of - Test if @test_ancestor is ancestor of @test_child
- * @test_ancestor: Firmware which is tested for being an ancestor
- * @test_child: Firmware which is tested for being the child
+ * fwnode_is_ancestor_of - Test if @ancestor is ancestor of @child
+ * @ancestor: Firmware which is tested for being an ancestor
+ * @child: Firmware which is tested for being the child
*
* A node is considered an ancestor of itself too.
*
- * Returns true if @test_ancestor is an ancestor of @test_child.
- * Otherwise, returns false.
+ * Returns true if @ancestor is an ancestor of @child. Otherwise, returns false.
*/
-bool fwnode_is_ancestor_of(struct fwnode_handle *test_ancestor,
- struct fwnode_handle *test_child)
+bool fwnode_is_ancestor_of(struct fwnode_handle *ancestor, struct fwnode_handle *child)
{
- if (!test_ancestor)
+ struct fwnode_handle *parent;
+
+ if (IS_ERR_OR_NULL(ancestor))
return false;
- fwnode_handle_get(test_child);
- while (test_child) {
- if (test_child == test_ancestor) {
- fwnode_handle_put(test_child);
+ if (child == ancestor)
+ return true;
+
+ fwnode_for_each_parent_node(child, parent) {
+ if (parent == ancestor) {
+ fwnode_handle_put(parent);
return true;
}
- test_child = fwnode_get_next_parent(test_child);
}
return false;
}
@@ -689,7 +738,7 @@ fwnode_get_next_available_child_node(const struct fwnode_handle *fwnode,
{
struct fwnode_handle *next_child = child;
- if (!fwnode)
+ if (IS_ERR_OR_NULL(fwnode))
return NULL;
do {
@@ -713,16 +762,16 @@ struct fwnode_handle *device_get_next_child_node(struct device *dev,
const struct fwnode_handle *fwnode = dev_fwnode(dev);
struct fwnode_handle *next;
+ if (IS_ERR_OR_NULL(fwnode))
+ return NULL;
+
/* Try to find a child in primary fwnode */
next = fwnode_get_next_child_node(fwnode, child);
if (next)
return next;
/* When no more children in primary, continue with secondary */
- if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary))
- next = fwnode_get_next_child_node(fwnode->secondary, child);
-
- return next;
+ return fwnode_get_next_child_node(fwnode->secondary, child);
}
EXPORT_SYMBOL_GPL(device_get_next_child_node);
@@ -789,6 +838,9 @@ EXPORT_SYMBOL_GPL(fwnode_handle_put);
*/
bool fwnode_device_is_available(const struct fwnode_handle *fwnode)
{
+ if (IS_ERR_OR_NULL(fwnode))
+ return false;
+
if (!fwnode_has_op(fwnode, device_is_available))
return true;
@@ -814,33 +866,16 @@ EXPORT_SYMBOL_GPL(device_get_child_node_count);
bool device_dma_supported(struct device *dev)
{
- const struct fwnode_handle *fwnode = dev_fwnode(dev);
-
- /* For DT, this is always supported.
- * For ACPI, this depends on CCA, which
- * is determined by the acpi_dma_supported().
- */
- if (is_of_node(fwnode))
- return true;
-
- return acpi_dma_supported(to_acpi_device_node(fwnode));
+ return fwnode_call_bool_op(dev_fwnode(dev), device_dma_supported);
}
EXPORT_SYMBOL_GPL(device_dma_supported);
enum dev_dma_attr device_get_dma_attr(struct device *dev)
{
- const struct fwnode_handle *fwnode = dev_fwnode(dev);
- enum dev_dma_attr attr = DEV_DMA_NOT_SUPPORTED;
-
- if (is_of_node(fwnode)) {
- if (of_dma_is_coherent(to_of_node(fwnode)))
- attr = DEV_DMA_COHERENT;
- else
- attr = DEV_DMA_NON_COHERENT;
- } else
- attr = acpi_get_dma_attr(to_acpi_device_node(fwnode));
+ if (!fwnode_has_op(dev_fwnode(dev), device_get_dma_attr))
+ return DEV_DMA_NOT_SUPPORTED;
- return attr;
+ return fwnode_call_int_op(dev_fwnode(dev), device_get_dma_attr);
}
EXPORT_SYMBOL_GPL(device_get_dma_attr);
@@ -887,6 +922,19 @@ int device_get_phy_mode(struct device *dev)
EXPORT_SYMBOL_GPL(device_get_phy_mode);
/**
+ * fwnode_iomap - Maps the memory mapped IO for a given fwnode
+ * @fwnode: Pointer to the firmware node
+ * @index: Index of the IO range
+ *
+ * Returns a pointer to the mapped memory.
+ */
+void __iomem *fwnode_iomap(struct fwnode_handle *fwnode, int index)
+{
+ return fwnode_call_ptr_op(fwnode, iomap, index);
+}
+EXPORT_SYMBOL(fwnode_iomap);
+
+/**
* fwnode_irq_get - Get IRQ directly from a fwnode
* @fwnode: Pointer to the firmware node
* @index: Zero-based index of the IRQ
@@ -896,19 +944,38 @@ EXPORT_SYMBOL_GPL(device_get_phy_mode);
*/
int fwnode_irq_get(const struct fwnode_handle *fwnode, unsigned int index)
{
- struct resource res;
- int ret;
+ return fwnode_call_int_op(fwnode, irq_get, index);
+}
+EXPORT_SYMBOL(fwnode_irq_get);
- if (is_of_node(fwnode))
- return of_irq_get(to_of_node(fwnode), index);
+/**
+ * fwnode_irq_get_byname - Get IRQ from a fwnode using its name
+ * @fwnode: Pointer to the firmware node
+ * @name: IRQ name
+ *
+ * Description:
+ * Find a match to the string @name in the 'interrupt-names' string array
+ * in _DSD for ACPI, or of_node for Device Tree. Then get the Linux IRQ
+ * number of the IRQ resource corresponding to the index of the matched
+ * string.
+ *
+ * Return:
+ * Linux IRQ number on success, or negative errno otherwise.
+ */
+int fwnode_irq_get_byname(const struct fwnode_handle *fwnode, const char *name)
+{
+ int index;
- ret = acpi_irq_get(ACPI_HANDLE_FWNODE(fwnode), index, &res);
- if (ret)
- return ret;
+ if (!name)
+ return -EINVAL;
- return res.start;
+ index = fwnode_property_match_string(fwnode, "interrupt-names", name);
+ if (index < 0)
+ return index;
+
+ return fwnode_irq_get(fwnode, index);
}
-EXPORT_SYMBOL(fwnode_irq_get);
+EXPORT_SYMBOL(fwnode_irq_get_byname);
/**
* fwnode_graph_get_next_endpoint - Get next endpoint firmware node
@@ -934,14 +1001,14 @@ fwnode_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
parent = fwnode_graph_get_port_parent(prev);
else
parent = fwnode;
+ if (IS_ERR_OR_NULL(parent))
+ return NULL;
ep = fwnode_call_ptr_op(parent, graph_get_next_endpoint, prev);
+ if (ep)
+ return ep;
- if (IS_ERR_OR_NULL(ep) &&
- !IS_ERR_OR_NULL(parent) && !IS_ERR_OR_NULL(parent->secondary))
- ep = fwnode_graph_get_next_endpoint(parent->secondary, NULL);
-
- return ep;
+ return fwnode_graph_get_next_endpoint(parent->secondary, NULL);
}
EXPORT_SYMBOL_GPL(fwnode_graph_get_next_endpoint);
@@ -1133,21 +1200,29 @@ int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
}
EXPORT_SYMBOL(fwnode_graph_parse_endpoint);
-const void *device_get_match_data(struct device *dev)
+const void *device_get_match_data(const struct device *dev)
{
return fwnode_call_ptr_op(dev_fwnode(dev), device_get_match_data, dev);
}
EXPORT_SYMBOL_GPL(device_get_match_data);
-static void *
-fwnode_graph_devcon_match(struct fwnode_handle *fwnode, const char *con_id,
- void *data, devcon_match_fn_t match)
+static unsigned int fwnode_graph_devcon_matches(struct fwnode_handle *fwnode,
+ const char *con_id, void *data,
+ devcon_match_fn_t match,
+ void **matches,
+ unsigned int matches_len)
{
struct fwnode_handle *node;
struct fwnode_handle *ep;
+ unsigned int count = 0;
void *ret;
fwnode_graph_for_each_endpoint(fwnode, ep) {
+ if (matches && count >= matches_len) {
+ fwnode_handle_put(ep);
+ break;
+ }
+
node = fwnode_graph_get_remote_port_parent(ep);
if (!fwnode_device_is_available(node)) {
fwnode_handle_put(node);
@@ -1157,33 +1232,43 @@ fwnode_graph_devcon_match(struct fwnode_handle *fwnode, const char *con_id,
ret = match(node, con_id, data);
fwnode_handle_put(node);
if (ret) {
- fwnode_handle_put(ep);
- return ret;
+ if (matches)
+ matches[count] = ret;
+ count++;
}
}
- return NULL;
+ return count;
}
-static void *
-fwnode_devcon_match(struct fwnode_handle *fwnode, const char *con_id,
- void *data, devcon_match_fn_t match)
+static unsigned int fwnode_devcon_matches(struct fwnode_handle *fwnode,
+ const char *con_id, void *data,
+ devcon_match_fn_t match,
+ void **matches,
+ unsigned int matches_len)
{
struct fwnode_handle *node;
+ unsigned int count = 0;
+ unsigned int i;
void *ret;
- int i;
for (i = 0; ; i++) {
+ if (matches && count >= matches_len)
+ break;
+
node = fwnode_find_reference(fwnode, con_id, i);
if (IS_ERR(node))
break;
ret = match(node, NULL, data);
fwnode_handle_put(node);
- if (ret)
- return ret;
+ if (ret) {
+ if (matches)
+ matches[count] = ret;
+ count++;
+ }
}
- return NULL;
+ return count;
}
/**
@@ -1201,15 +1286,61 @@ void *fwnode_connection_find_match(struct fwnode_handle *fwnode,
const char *con_id, void *data,
devcon_match_fn_t match)
{
+ unsigned int count;
void *ret;
if (!fwnode || !match)
return NULL;
- ret = fwnode_graph_devcon_match(fwnode, con_id, data, match);
- if (ret)
+ count = fwnode_graph_devcon_matches(fwnode, con_id, data, match, &ret, 1);
+ if (count)
return ret;
- return fwnode_devcon_match(fwnode, con_id, data, match);
+ count = fwnode_devcon_matches(fwnode, con_id, data, match, &ret, 1);
+ return count ? ret : NULL;
}
EXPORT_SYMBOL_GPL(fwnode_connection_find_match);
+
+/**
+ * fwnode_connection_find_matches - Find connections from a device node
+ * @fwnode: Device node with the connection
+ * @con_id: Identifier for the connection
+ * @data: Data for the match function
+ * @match: Function to check and convert the connection description
+ * @matches: (Optional) array of pointers to fill with matches
+ * @matches_len: Length of @matches
+ *
+ * Find up to @matches_len connections with unique identifier @con_id between
+ * @fwnode and other device nodes. @match will be used to convert the
+ * connection description to data the caller is expecting to be returned
+ * through the @matches array.
+ * If @matches is NULL @matches_len is ignored and the total number of resolved
+ * matches is returned.
+ *
+ * Return: Number of matches resolved, or negative errno.
+ */
+int fwnode_connection_find_matches(struct fwnode_handle *fwnode,
+ const char *con_id, void *data,
+ devcon_match_fn_t match,
+ void **matches, unsigned int matches_len)
+{
+ unsigned int count_graph;
+ unsigned int count_ref;
+
+ if (!fwnode || !match)
+ return -EINVAL;
+
+ count_graph = fwnode_graph_devcon_matches(fwnode, con_id, data, match,
+ matches, matches_len);
+
+ if (matches) {
+ matches += count_graph;
+ matches_len -= count_graph;
+ }
+
+ count_ref = fwnode_devcon_matches(fwnode, con_id, data, match,
+ matches, matches_len);
+
+ return count_graph + count_ref;
+}
+EXPORT_SYMBOL_GPL(fwnode_connection_find_matches);
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index b1905916f7af..da8996e7a1f1 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -31,6 +31,7 @@ struct regmap_format {
size_t buf_size;
size_t reg_bytes;
size_t pad_bytes;
+ size_t reg_downshift;
size_t val_bytes;
void (*format_write)(struct regmap *map,
unsigned int reg, unsigned int val);
@@ -62,6 +63,7 @@ struct regmap {
regmap_unlock unlock;
void *lock_arg; /* This is passed to lock/unlock functions */
gfp_t alloc_flags;
+ unsigned int reg_base;
struct device *dev; /* Device we do I/O on */
void *work_buf; /* Scratch buffer used to format I/O */
@@ -108,6 +110,10 @@ struct regmap {
int (*reg_write)(void *context, unsigned int reg, unsigned int val);
int (*reg_update_bits)(void *context, unsigned int reg,
unsigned int mask, unsigned int val);
+ /* Bulk read/write */
+ int (*read)(void *context, const void *reg_buf, size_t reg_size,
+ void *val_buf, size_t val_size);
+ int (*write)(void *context, const void *data, size_t count);
bool defer_caching;
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index f2469d3435ca..362e043e26d8 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -133,6 +133,12 @@ int regcache_init(struct regmap *map, const struct regmap_config *config)
return -EINVAL;
}
+ if (config->num_reg_defaults && !config->reg_defaults) {
+ dev_err(map->dev,
+ "Register defaults number are set without the reg!\n");
+ return -EINVAL;
+ }
+
for (i = 0; i < config->num_reg_defaults; i++)
if (config->reg_defaults[i].reg % map->reg_stride)
return -EINVAL;
@@ -183,8 +189,8 @@ int regcache_init(struct regmap *map, const struct regmap_config *config)
return 0;
}
- if (!map->max_register)
- map->max_register = map->num_reg_defaults_raw;
+ if (!map->max_register && map->num_reg_defaults_raw)
+ map->max_register = (map->num_reg_defaults_raw - 1) * map->reg_stride;
if (map->cache_ops->init) {
dev_dbg(map->dev, "Initializing %s cache\n",
@@ -495,7 +501,8 @@ EXPORT_SYMBOL_GPL(regcache_drop_region);
void regcache_cache_only(struct regmap *map, bool enable)
{
map->lock(map->lock_arg);
- WARN_ON(map->cache_bypass && enable);
+ WARN_ON(map->cache_type != REGCACHE_NONE &&
+ map->cache_bypass && enable);
map->cache_only = enable;
trace_regmap_cache_only(map, enable);
map->unlock(map->lock_arg);
@@ -531,7 +538,7 @@ EXPORT_SYMBOL_GPL(regcache_mark_dirty);
* @enable: flag if changes should not be written to the cache
*
* When a register map is marked with the cache bypass option, writes
- * to the register map API will only update the hardware and not the
+ * to the register map API will only update the hardware and not
* the cache directly. This is useful when syncing the cache back to
* the hardware.
*/
diff --git a/drivers/base/regmap/regmap-i3c.c b/drivers/base/regmap/regmap-i3c.c
index 1578fb506683..0328b0b34284 100644
--- a/drivers/base/regmap/regmap-i3c.c
+++ b/drivers/base/regmap/regmap-i3c.c
@@ -40,7 +40,7 @@ static int regmap_i3c_read(void *context,
return i3c_device_do_priv_xfers(i3c, xfers, 2);
}
-static struct regmap_bus regmap_i3c = {
+static const struct regmap_bus regmap_i3c = {
.write = regmap_i3c_write,
.read = regmap_i3c_read,
};
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index d2656581a608..4ef9488d05cd 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -30,6 +30,9 @@ struct regmap_irq_chip_data {
int irq;
int wake_count;
+ unsigned int mask_base;
+ unsigned int unmask_base;
+
void *status_reg_buf;
unsigned int *main_status_buf;
unsigned int *status_buf;
@@ -39,33 +42,15 @@ struct regmap_irq_chip_data {
unsigned int *type_buf;
unsigned int *type_buf_def;
unsigned int **virt_buf;
+ unsigned int **config_buf;
unsigned int irq_reg_stride;
- unsigned int type_reg_stride;
- bool clear_status:1;
-};
+ unsigned int (*get_irq_reg)(struct regmap_irq_chip_data *data,
+ unsigned int base, int index);
-static int sub_irq_reg(struct regmap_irq_chip_data *data,
- unsigned int base_reg, int i)
-{
- const struct regmap_irq_chip *chip = data->chip;
- struct regmap *map = data->map;
- struct regmap_irq_sub_irq_map *subreg;
- unsigned int offset;
- int reg = 0;
-
- if (!chip->sub_reg_offsets || !chip->not_fixed_stride) {
- /* Assume linear mapping */
- reg = base_reg + (i * map->reg_stride * data->irq_reg_stride);
- } else {
- subreg = &chip->sub_reg_offsets[i];
- offset = subreg->offset[0];
- reg = base_reg + offset;
- }
-
- return reg;
-}
+ unsigned int clear_status:1;
+};
static inline const
struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
@@ -74,21 +59,25 @@ struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
return &data->chip->irqs[irq];
}
-static void regmap_irq_lock(struct irq_data *data)
+static bool regmap_irq_can_bulk_read_status(struct regmap_irq_chip_data *data)
{
- struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+ struct regmap *map = data->map;
- mutex_lock(&d->lock);
+ /*
+ * While possible that a user-defined ->get_irq_reg() callback might
+ * be linear enough to support bulk reads, most of the time it won't.
+ * Therefore only allow them if the default callback is being used.
+ */
+ return data->irq_reg_stride == 1 && map->reg_stride == 1 &&
+ data->get_irq_reg == regmap_irq_get_irq_reg_linear &&
+ !map->use_single_read;
}
-static int regmap_irq_update_bits(struct regmap_irq_chip_data *d,
- unsigned int reg, unsigned int mask,
- unsigned int val)
+static void regmap_irq_lock(struct irq_data *data)
{
- if (d->chip->mask_writeonly)
- return regmap_write_bits(d->map, reg, mask, val);
- else
- return regmap_update_bits(d->map, reg, mask, val);
+ struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+
+ mutex_lock(&d->lock);
}
static void regmap_irq_sync_unlock(struct irq_data *data)
@@ -97,7 +86,6 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
struct regmap *map = d->map;
int i, j, ret;
u32 reg;
- u32 unmask_offset;
u32 val;
if (d->chip->runtime_pm) {
@@ -109,7 +97,7 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
if (d->clear_status) {
for (i = 0; i < d->chip->num_regs; i++) {
- reg = sub_irq_reg(d, d->chip->status_base, i);
+ reg = d->get_irq_reg(d, d->chip->status_base, i);
ret = regmap_read(map, reg, &val);
if (ret)
@@ -126,44 +114,32 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
* suppress pointless writes.
*/
for (i = 0; i < d->chip->num_regs; i++) {
- if (!d->chip->mask_base)
- continue;
+ if (d->mask_base) {
+ reg = d->get_irq_reg(d, d->mask_base, i);
+ ret = regmap_update_bits(d->map, reg,
+ d->mask_buf_def[i], d->mask_buf[i]);
+ if (ret)
+ dev_err(d->map->dev, "Failed to sync masks in %x\n",
+ reg);
+ }
- reg = sub_irq_reg(d, d->chip->mask_base, i);
- if (d->chip->mask_invert) {
- ret = regmap_irq_update_bits(d, reg,
- d->mask_buf_def[i], ~d->mask_buf[i]);
- } else if (d->chip->unmask_base) {
- /* set mask with mask_base register */
- ret = regmap_irq_update_bits(d, reg,
+ if (d->unmask_base) {
+ reg = d->get_irq_reg(d, d->unmask_base, i);
+ ret = regmap_update_bits(d->map, reg,
d->mask_buf_def[i], ~d->mask_buf[i]);
- if (ret < 0)
- dev_err(d->map->dev,
- "Failed to sync unmasks in %x\n",
+ if (ret)
+ dev_err(d->map->dev, "Failed to sync masks in %x\n",
reg);
- unmask_offset = d->chip->unmask_base -
- d->chip->mask_base;
- /* clear mask with unmask_base register */
- ret = regmap_irq_update_bits(d,
- reg + unmask_offset,
- d->mask_buf_def[i],
- d->mask_buf[i]);
- } else {
- ret = regmap_irq_update_bits(d, reg,
- d->mask_buf_def[i], d->mask_buf[i]);
}
- if (ret != 0)
- dev_err(d->map->dev, "Failed to sync masks in %x\n",
- reg);
- reg = sub_irq_reg(d, d->chip->wake_base, i);
+ reg = d->get_irq_reg(d, d->chip->wake_base, i);
if (d->wake_buf) {
if (d->chip->wake_invert)
- ret = regmap_irq_update_bits(d, reg,
+ ret = regmap_update_bits(d->map, reg,
d->mask_buf_def[i],
~d->wake_buf[i]);
else
- ret = regmap_irq_update_bits(d, reg,
+ ret = regmap_update_bits(d->map, reg,
d->mask_buf_def[i],
d->wake_buf[i]);
if (ret != 0)
@@ -180,7 +156,7 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
* it'll be ignored in irq handler, then may introduce irq storm
*/
if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
- reg = sub_irq_reg(d, d->chip->ack_base, i);
+ reg = d->get_irq_reg(d, d->chip->ack_base, i);
/* some chips ack by write 0 */
if (d->chip->ack_invert)
@@ -189,11 +165,9 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
ret = regmap_write(map, reg, d->mask_buf[i]);
if (d->chip->clear_ack) {
if (d->chip->ack_invert && !ret)
- ret = regmap_write(map, reg,
- d->mask_buf[i]);
+ ret = regmap_write(map, reg, UINT_MAX);
else if (!ret)
- ret = regmap_write(map, reg,
- ~d->mask_buf[i]);
+ ret = regmap_write(map, reg, 0);
}
if (ret != 0)
dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
@@ -206,12 +180,12 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
for (i = 0; i < d->chip->num_type_reg; i++) {
if (!d->type_buf_def[i])
continue;
- reg = sub_irq_reg(d, d->chip->type_base, i);
+ reg = d->get_irq_reg(d, d->chip->type_base, i);
if (d->chip->type_invert)
- ret = regmap_irq_update_bits(d, reg,
+ ret = regmap_update_bits(d->map, reg,
d->type_buf_def[i], ~d->type_buf[i]);
else
- ret = regmap_irq_update_bits(d, reg,
+ ret = regmap_update_bits(d->map, reg,
d->type_buf_def[i], d->type_buf[i]);
if (ret != 0)
dev_err(d->map->dev, "Failed to sync type in %x\n",
@@ -222,8 +196,8 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
if (d->chip->num_virt_regs) {
for (i = 0; i < d->chip->num_virt_regs; i++) {
for (j = 0; j < d->chip->num_regs; j++) {
- reg = sub_irq_reg(d, d->chip->virt_reg_base[i],
- j);
+ reg = d->get_irq_reg(d, d->chip->virt_reg_base[i],
+ j);
ret = regmap_write(map, reg, d->virt_buf[i][j]);
if (ret != 0)
dev_err(d->map->dev,
@@ -233,6 +207,17 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
}
}
+ for (i = 0; i < d->chip->num_config_bases; i++) {
+ for (j = 0; j < d->chip->num_config_regs; j++) {
+ reg = d->get_irq_reg(d, d->chip->config_base[i], j);
+ ret = regmap_write(map, reg, d->config_buf[i][j]);
+ if (ret)
+ dev_err(d->map->dev,
+ "Failed to write config %x: %d\n",
+ reg, ret);
+ }
+ }
+
if (d->chip->runtime_pm)
pm_runtime_put(map->dev);
@@ -254,30 +239,28 @@ static void regmap_irq_enable(struct irq_data *data)
struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
struct regmap *map = d->map;
const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
- unsigned int mask, type;
-
- type = irq_data->type.type_falling_val | irq_data->type.type_rising_val;
+ unsigned int reg = irq_data->reg_offset / map->reg_stride;
+ unsigned int mask;
/*
* The type_in_mask flag means that the underlying hardware uses
- * separate mask bits for rising and falling edge interrupts, but
- * we want to make them into a single virtual interrupt with
- * configurable edge.
+ * separate mask bits for each interrupt trigger type, but we want
+ * to have a single logical interrupt with a configurable type.
*
- * If the interrupt we're enabling defines the falling or rising
- * masks then instead of using the regular mask bits for this
- * interrupt, use the value previously written to the type buffer
- * at the corresponding offset in regmap_irq_set_type().
+ * If the interrupt we're enabling defines any supported types
+ * then instead of using the regular mask bits for this interrupt,
+ * use the value previously written to the type buffer at the
+ * corresponding offset in regmap_irq_set_type().
*/
- if (d->chip->type_in_mask && type)
- mask = d->type_buf[irq_data->reg_offset / map->reg_stride];
+ if (d->chip->type_in_mask && irq_data->type.types_supported)
+ mask = d->type_buf[reg] & irq_data->mask;
else
mask = irq_data->mask;
if (d->chip->clear_on_unmask)
d->clear_status = true;
- d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~mask;
+ d->mask_buf[reg] &= ~mask;
}
static void regmap_irq_disable(struct irq_data *data)
@@ -294,7 +277,7 @@ static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
struct regmap *map = d->map;
const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
- int reg;
+ int reg, ret;
const struct regmap_irq_type *t = &irq_data->type;
if ((t->types_supported & type) != type)
@@ -334,9 +317,19 @@ static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
return -EINVAL;
}
- if (d->chip->set_type_virt)
- return d->chip->set_type_virt(d->virt_buf, type, data->hwirq,
- reg);
+ if (d->chip->set_type_virt) {
+ ret = d->chip->set_type_virt(d->virt_buf, type, data->hwirq,
+ reg);
+ if (ret)
+ return ret;
+ }
+
+ if (d->chip->set_type_config) {
+ ret = d->chip->set_type_config(d->config_buf, type,
+ irq_data, reg);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -377,17 +370,21 @@ static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
const struct regmap_irq_chip *chip = data->chip;
struct regmap *map = data->map;
struct regmap_irq_sub_irq_map *subreg;
+ unsigned int reg;
int i, ret = 0;
if (!chip->sub_reg_offsets) {
- /* Assume linear mapping */
- ret = regmap_read(map, chip->status_base +
- (b * map->reg_stride * data->irq_reg_stride),
- &data->status_buf[b]);
+ reg = data->get_irq_reg(data, chip->status_base, b);
+ ret = regmap_read(map, reg, &data->status_buf[b]);
} else {
+ /*
+ * Note we can't use ->get_irq_reg() here because the offsets
+ * in 'subreg' are *not* interchangeable with indices.
+ */
subreg = &chip->sub_reg_offsets[b];
for (i = 0; i < subreg->num_regs; i++) {
unsigned int offset = subreg->offset[i];
+ unsigned int index = offset / map->reg_stride;
if (chip->not_fixed_stride)
ret = regmap_read(map,
@@ -396,7 +393,7 @@ static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
else
ret = regmap_read(map,
chip->status_base + offset,
- &data->status_buf[offset]);
+ &data->status_buf[index]);
if (ret)
break;
@@ -449,10 +446,18 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
* sake of simplicity. and add bulk reads only if needed
*/
for (i = 0; i < chip->num_main_regs; i++) {
- ret = regmap_read(map, chip->main_status +
- (i * map->reg_stride
- * data->irq_reg_stride),
- &data->main_status_buf[i]);
+ /*
+ * For not_fixed_stride, don't use ->get_irq_reg().
+ * It would produce an incorrect result.
+ */
+ if (data->chip->not_fixed_stride)
+ reg = chip->main_status +
+ i * map->reg_stride * data->irq_reg_stride;
+ else
+ reg = data->get_irq_reg(data,
+ chip->main_status, i);
+
+ ret = regmap_read(map, reg, &data->main_status_buf[i]);
if (ret) {
dev_err(map->dev,
"Failed to read IRQ status %d\n",
@@ -481,8 +486,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
}
}
- } else if (!map->use_single_read && map->reg_stride == 1 &&
- data->irq_reg_stride == 1) {
+ } else if (regmap_irq_can_bulk_read_status(data)) {
u8 *buf8 = data->status_reg_buf;
u16 *buf16 = data->status_reg_buf;
@@ -518,7 +522,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
} else {
for (i = 0; i < data->chip->num_regs; i++) {
- unsigned int reg = sub_irq_reg(data,
+ unsigned int reg = data->get_irq_reg(data,
data->chip->status_base, i);
ret = regmap_read(map, reg, &data->status_buf[i]);
@@ -537,7 +541,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
/*
* Ignore masked IRQs and ack if we need to; we ack early so
- * there is no race between handling and acknowleding the
+ * there is no race between handling and acknowledging the
* interrupt. We assume that typically few of the interrupts
* will fire simultaneously so don't worry about overhead from
* doing a write per register.
@@ -546,7 +550,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
data->status_buf[i] &= ~data->mask_buf[i];
if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) {
- reg = sub_irq_reg(data, data->chip->ack_base, i);
+ reg = data->get_irq_reg(data, data->chip->ack_base, i);
if (chip->ack_invert)
ret = regmap_write(map, reg,
@@ -556,11 +560,9 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
data->status_buf[i]);
if (chip->clear_ack) {
if (chip->ack_invert && !ret)
- ret = regmap_write(map, reg,
- data->status_buf[i]);
+ ret = regmap_write(map, reg, UINT_MAX);
else if (!ret)
- ret = regmap_write(map, reg,
- ~data->status_buf[i]);
+ ret = regmap_write(map, reg, 0);
}
if (ret != 0)
dev_err(map->dev, "Failed to ack 0x%x: %d\n",
@@ -609,6 +611,91 @@ static const struct irq_domain_ops regmap_domain_ops = {
};
/**
+ * regmap_irq_get_irq_reg_linear() - Linear IRQ register mapping callback.
+ * @data: Data for the &struct regmap_irq_chip
+ * @base: Base register
+ * @index: Register index
+ *
+ * Returns the register address corresponding to the given @base and @index
+ * by the formula ``base + index * regmap_stride * irq_reg_stride``.
+ */
+unsigned int regmap_irq_get_irq_reg_linear(struct regmap_irq_chip_data *data,
+ unsigned int base, int index)
+{
+ const struct regmap_irq_chip *chip = data->chip;
+ struct regmap *map = data->map;
+
+ /*
+ * FIXME: This is for backward compatibility and should be removed
+ * when not_fixed_stride is dropped (it's only used by qcom-pm8008).
+ */
+ if (chip->not_fixed_stride && chip->sub_reg_offsets) {
+ struct regmap_irq_sub_irq_map *subreg;
+
+ subreg = &chip->sub_reg_offsets[0];
+ return base + subreg->offset[0];
+ }
+
+ return base + index * map->reg_stride * data->irq_reg_stride;
+}
+EXPORT_SYMBOL_GPL(regmap_irq_get_irq_reg_linear);
+
+/**
+ * regmap_irq_set_type_config_simple() - Simple IRQ type configuration callback.
+ * @buf: Buffer containing configuration register values, this is a 2D array of
+ * `num_config_bases` rows, each of `num_config_regs` elements.
+ * @type: The requested IRQ type.
+ * @irq_data: The IRQ being configured.
+ * @idx: Index of the irq's config registers within each array `buf[i]`
+ *
+ * This is a &struct regmap_irq_chip->set_type_config callback suitable for
+ * chips with one config register. Register values are updated according to
+ * the &struct regmap_irq_type data associated with an IRQ.
+ */
+int regmap_irq_set_type_config_simple(unsigned int **buf, unsigned int type,
+ const struct regmap_irq *irq_data, int idx)
+{
+ const struct regmap_irq_type *t = &irq_data->type;
+
+ if (t->type_reg_mask)
+ buf[0][idx] &= ~t->type_reg_mask;
+ else
+ buf[0][idx] &= ~(t->type_falling_val |
+ t->type_rising_val |
+ t->type_level_low_val |
+ t->type_level_high_val);
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_FALLING:
+ buf[0][idx] |= t->type_falling_val;
+ break;
+
+ case IRQ_TYPE_EDGE_RISING:
+ buf[0][idx] |= t->type_rising_val;
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ buf[0][idx] |= (t->type_falling_val |
+ t->type_rising_val);
+ break;
+
+ case IRQ_TYPE_LEVEL_HIGH:
+ buf[0][idx] |= t->type_level_high_val;
+ break;
+
+ case IRQ_TYPE_LEVEL_LOW:
+ buf[0][idx] |= t->type_level_low_val;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(regmap_irq_set_type_config_simple);
+
+/**
* regmap_add_irq_chip_fwnode() - Use standard regmap IRQ controller handling
*
* @fwnode: The firmware node where the IRQ domain should be added to.
@@ -636,7 +723,6 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
int ret = -ENOMEM;
int num_type_reg;
u32 reg;
- u32 unmask_offset;
if (chip->num_regs <= 0)
return -EINVAL;
@@ -653,11 +739,19 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
}
if (chip->not_fixed_stride) {
+ dev_warn(map->dev, "not_fixed_stride is deprecated; use ->get_irq_reg() instead");
+
for (i = 0; i < chip->num_regs; i++)
if (chip->sub_reg_offsets[i].num_regs != 1)
return -EINVAL;
}
+ if (chip->num_type_reg)
+ dev_warn(map->dev, "type registers are deprecated; use config registers instead");
+
+ if (chip->num_virt_regs || chip->virt_reg_base || chip->set_type_virt)
+ dev_warn(map->dev, "virtual registers are deprecated; use config registers instead");
+
if (irq_base) {
irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
if (irq_base < 0) {
@@ -673,30 +767,30 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (chip->num_main_regs) {
d->main_status_buf = kcalloc(chip->num_main_regs,
- sizeof(unsigned int),
+ sizeof(*d->main_status_buf),
GFP_KERNEL);
if (!d->main_status_buf)
goto err_alloc;
}
- d->status_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
+ d->status_buf = kcalloc(chip->num_regs, sizeof(*d->status_buf),
GFP_KERNEL);
if (!d->status_buf)
goto err_alloc;
- d->mask_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
+ d->mask_buf = kcalloc(chip->num_regs, sizeof(*d->mask_buf),
GFP_KERNEL);
if (!d->mask_buf)
goto err_alloc;
- d->mask_buf_def = kcalloc(chip->num_regs, sizeof(unsigned int),
+ d->mask_buf_def = kcalloc(chip->num_regs, sizeof(*d->mask_buf_def),
GFP_KERNEL);
if (!d->mask_buf_def)
goto err_alloc;
if (chip->wake_base) {
- d->wake_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
+ d->wake_buf = kcalloc(chip->num_regs, sizeof(*d->wake_buf),
GFP_KERNEL);
if (!d->wake_buf)
goto err_alloc;
@@ -705,11 +799,11 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
num_type_reg = chip->type_in_mask ? chip->num_regs : chip->num_type_reg;
if (num_type_reg) {
d->type_buf_def = kcalloc(num_type_reg,
- sizeof(unsigned int), GFP_KERNEL);
+ sizeof(*d->type_buf_def), GFP_KERNEL);
if (!d->type_buf_def)
goto err_alloc;
- d->type_buf = kcalloc(num_type_reg, sizeof(unsigned int),
+ d->type_buf = kcalloc(num_type_reg, sizeof(*d->type_buf),
GFP_KERNEL);
if (!d->type_buf)
goto err_alloc;
@@ -726,13 +820,31 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
for (i = 0; i < chip->num_virt_regs; i++) {
d->virt_buf[i] = kcalloc(chip->num_regs,
- sizeof(unsigned int),
+ sizeof(**d->virt_buf),
GFP_KERNEL);
if (!d->virt_buf[i])
goto err_alloc;
}
}
+ if (chip->num_config_bases && chip->num_config_regs) {
+ /*
+ * Create config_buf[num_config_bases][num_config_regs]
+ */
+ d->config_buf = kcalloc(chip->num_config_bases,
+ sizeof(*d->config_buf), GFP_KERNEL);
+ if (!d->config_buf)
+ goto err_alloc;
+
+ for (i = 0; i < chip->num_config_regs; i++) {
+ d->config_buf[i] = kcalloc(chip->num_config_regs,
+ sizeof(**d->config_buf),
+ GFP_KERNEL);
+ if (!d->config_buf[i])
+ goto err_alloc;
+ }
+ }
+
d->irq_chip = regmap_irq_chip;
d->irq_chip.name = chip->name;
d->irq = irq;
@@ -740,18 +852,53 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
d->chip = chip;
d->irq_base = irq_base;
+ if (chip->mask_base && chip->unmask_base &&
+ !chip->mask_unmask_non_inverted) {
+ /*
+ * Chips that specify both mask_base and unmask_base used to
+ * get inverted mask behavior by default, with no way to ask
+ * for the normal, non-inverted behavior. This "inverted by
+ * default" behavior is deprecated, but we have to support it
+ * until existing drivers have been fixed.
+ *
+ * Existing drivers should be updated by swapping mask_base
+ * and unmask_base and setting mask_unmask_non_inverted=true.
+ * New drivers should always set the flag.
+ */
+ dev_warn(map->dev, "mask_base and unmask_base are inverted, please fix it");
+
+ /* Might as well warn about mask_invert while we're at it... */
+ if (chip->mask_invert)
+ dev_warn(map->dev, "mask_invert=true ignored");
+
+ d->mask_base = chip->unmask_base;
+ d->unmask_base = chip->mask_base;
+ } else if (chip->mask_invert) {
+ /*
+ * Swap the roles of mask_base and unmask_base if the bits are
+ * inverted. This is deprecated, drivers should use unmask_base
+ * directly.
+ */
+ dev_warn(map->dev, "mask_invert=true is deprecated; please switch to unmask_base");
+
+ d->mask_base = chip->unmask_base;
+ d->unmask_base = chip->mask_base;
+ } else {
+ d->mask_base = chip->mask_base;
+ d->unmask_base = chip->unmask_base;
+ }
+
if (chip->irq_reg_stride)
d->irq_reg_stride = chip->irq_reg_stride;
else
d->irq_reg_stride = 1;
- if (chip->type_reg_stride)
- d->type_reg_stride = chip->type_reg_stride;
+ if (chip->get_irq_reg)
+ d->get_irq_reg = chip->get_irq_reg;
else
- d->type_reg_stride = 1;
+ d->get_irq_reg = regmap_irq_get_irq_reg_linear;
- if (!map->use_single_read && map->reg_stride == 1 &&
- d->irq_reg_stride == 1) {
+ if (regmap_irq_can_bulk_read_status(d)) {
d->status_reg_buf = kmalloc_array(chip->num_regs,
map->format.val_bytes,
GFP_KERNEL);
@@ -768,35 +915,34 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
/* Mask all the interrupts by default */
for (i = 0; i < chip->num_regs; i++) {
d->mask_buf[i] = d->mask_buf_def[i];
- if (!chip->mask_base)
- continue;
- reg = sub_irq_reg(d, d->chip->mask_base, i);
+ if (d->mask_base) {
+ reg = d->get_irq_reg(d, d->mask_base, i);
+ ret = regmap_update_bits(d->map, reg,
+ d->mask_buf_def[i], d->mask_buf[i]);
+ if (ret) {
+ dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
+ reg, ret);
+ goto err_alloc;
+ }
+ }
- if (chip->mask_invert)
- ret = regmap_irq_update_bits(d, reg,
- d->mask_buf[i], ~d->mask_buf[i]);
- else if (d->chip->unmask_base) {
- unmask_offset = d->chip->unmask_base -
- d->chip->mask_base;
- ret = regmap_irq_update_bits(d,
- reg + unmask_offset,
- d->mask_buf[i],
- d->mask_buf[i]);
- } else
- ret = regmap_irq_update_bits(d, reg,
- d->mask_buf[i], d->mask_buf[i]);
- if (ret != 0) {
- dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
- reg, ret);
- goto err_alloc;
+ if (d->unmask_base) {
+ reg = d->get_irq_reg(d, d->unmask_base, i);
+ ret = regmap_update_bits(d->map, reg,
+ d->mask_buf_def[i], ~d->mask_buf[i]);
+ if (ret) {
+ dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
+ reg, ret);
+ goto err_alloc;
+ }
}
if (!chip->init_ack_masked)
continue;
/* Ack masked but set interrupts */
- reg = sub_irq_reg(d, d->chip->status_base, i);
+ reg = d->get_irq_reg(d, d->chip->status_base, i);
ret = regmap_read(map, reg, &d->status_buf[i]);
if (ret != 0) {
dev_err(map->dev, "Failed to read IRQ status: %d\n",
@@ -808,7 +954,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
d->status_buf[i] = ~d->status_buf[i];
if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
- reg = sub_irq_reg(d, d->chip->ack_base, i);
+ reg = d->get_irq_reg(d, d->chip->ack_base, i);
if (chip->ack_invert)
ret = regmap_write(map, reg,
~(d->status_buf[i] & d->mask_buf[i]));
@@ -817,13 +963,9 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
d->status_buf[i] & d->mask_buf[i]);
if (chip->clear_ack) {
if (chip->ack_invert && !ret)
- ret = regmap_write(map, reg,
- (d->status_buf[i] &
- d->mask_buf[i]));
+ ret = regmap_write(map, reg, UINT_MAX);
else if (!ret)
- ret = regmap_write(map, reg,
- ~(d->status_buf[i] &
- d->mask_buf[i]));
+ ret = regmap_write(map, reg, 0);
}
if (ret != 0) {
dev_err(map->dev, "Failed to ack 0x%x: %d\n",
@@ -837,14 +979,14 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (d->wake_buf) {
for (i = 0; i < chip->num_regs; i++) {
d->wake_buf[i] = d->mask_buf_def[i];
- reg = sub_irq_reg(d, d->chip->wake_base, i);
+ reg = d->get_irq_reg(d, d->chip->wake_base, i);
if (chip->wake_invert)
- ret = regmap_irq_update_bits(d, reg,
+ ret = regmap_update_bits(d->map, reg,
d->mask_buf_def[i],
0);
else
- ret = regmap_irq_update_bits(d, reg,
+ ret = regmap_update_bits(d->map, reg,
d->mask_buf_def[i],
d->wake_buf[i]);
if (ret != 0) {
@@ -857,7 +999,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (chip->num_type_reg && !chip->type_in_mask) {
for (i = 0; i < chip->num_type_reg; ++i) {
- reg = sub_irq_reg(d, d->chip->type_base, i);
+ reg = d->get_irq_reg(d, d->chip->type_base, i);
ret = regmap_read(map, reg, &d->type_buf_def[i]);
@@ -913,6 +1055,11 @@ err_alloc:
kfree(d->virt_buf[i]);
kfree(d->virt_buf);
}
+ if (d->config_buf) {
+ for (i = 0; i < chip->num_config_bases; i++)
+ kfree(d->config_buf[i]);
+ kfree(d->config_buf);
+ }
kfree(d);
return ret;
}
@@ -953,7 +1100,7 @@ EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
{
unsigned int virq;
- int hwirq;
+ int i, hwirq;
if (!d)
return;
@@ -983,6 +1130,11 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
kfree(d->mask_buf);
kfree(d->status_reg_buf);
kfree(d->status_buf);
+ if (d->config_buf) {
+ for (i = 0; i < d->chip->num_config_bases; i++)
+ kfree(d->config_buf[i]);
+ kfree(d->config_buf);
+ }
kfree(d);
}
EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
@@ -1053,7 +1205,7 @@ int devm_regmap_add_irq_chip_fwnode(struct device *dev,
EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip_fwnode);
/**
- * devm_regmap_add_irq_chip() - Resource manager regmap_add_irq_chip()
+ * devm_regmap_add_irq_chip() - Resource managed regmap_add_irq_chip()
*
* @dev: The device pointer on which irq_chip belongs to.
* @map: The regmap for the device.
@@ -1082,7 +1234,7 @@ EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);
/**
* devm_regmap_del_irq_chip() - Resource managed regmap_del_irq_chip()
*
- * @dev: Device for which which resource was allocated.
+ * @dev: Device for which the resource was allocated.
* @irq: Primary IRQ for the device.
* @data: &regmap_irq_chip_data allocated by regmap_add_irq_chip().
*
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index 71f16be7e717..3ccdd86a97e7 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -10,13 +10,14 @@
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/slab.h>
+#include <linux/swab.h>
#include "internal.h"
struct regmap_mmio_context {
void __iomem *regs;
unsigned int val_bytes;
- bool relaxed_mmio;
+ bool big_endian;
bool attached_clk;
struct clk *clk;
@@ -33,9 +34,6 @@ static int regmap_mmio_regbits_check(size_t reg_bits)
case 8:
case 16:
case 32:
-#ifdef CONFIG_64BIT
- case 64:
-#endif
return 0;
default:
return -EINVAL;
@@ -50,18 +48,13 @@ static int regmap_mmio_get_min_stride(size_t val_bits)
case 8:
/* The core treats 0 as 1 */
min_stride = 0;
- return 0;
+ break;
case 16:
min_stride = 2;
break;
case 32:
min_stride = 4;
break;
-#ifdef CONFIG_64BIT
- case 64:
- min_stride = 8;
- break;
-#endif
default:
return -EINVAL;
}
@@ -83,6 +76,12 @@ static void regmap_mmio_write8_relaxed(struct regmap_mmio_context *ctx,
writeb_relaxed(val, ctx->regs + reg);
}
+static void regmap_mmio_iowrite8(struct regmap_mmio_context *ctx,
+ unsigned int reg, unsigned int val)
+{
+ iowrite8(val, ctx->regs + reg);
+}
+
static void regmap_mmio_write16le(struct regmap_mmio_context *ctx,
unsigned int reg,
unsigned int val)
@@ -97,10 +96,22 @@ static void regmap_mmio_write16le_relaxed(struct regmap_mmio_context *ctx,
writew_relaxed(val, ctx->regs + reg);
}
+static void regmap_mmio_iowrite16le(struct regmap_mmio_context *ctx,
+ unsigned int reg, unsigned int val)
+{
+ iowrite16(val, ctx->regs + reg);
+}
+
static void regmap_mmio_write16be(struct regmap_mmio_context *ctx,
unsigned int reg,
unsigned int val)
{
+ writew(swab16(val), ctx->regs + reg);
+}
+
+static void regmap_mmio_iowrite16be(struct regmap_mmio_context *ctx,
+ unsigned int reg, unsigned int val)
+{
iowrite16be(val, ctx->regs + reg);
}
@@ -118,28 +129,24 @@ static void regmap_mmio_write32le_relaxed(struct regmap_mmio_context *ctx,
writel_relaxed(val, ctx->regs + reg);
}
-static void regmap_mmio_write32be(struct regmap_mmio_context *ctx,
- unsigned int reg,
- unsigned int val)
+static void regmap_mmio_iowrite32le(struct regmap_mmio_context *ctx,
+ unsigned int reg, unsigned int val)
{
- iowrite32be(val, ctx->regs + reg);
+ iowrite32(val, ctx->regs + reg);
}
-#ifdef CONFIG_64BIT
-static void regmap_mmio_write64le(struct regmap_mmio_context *ctx,
+static void regmap_mmio_write32be(struct regmap_mmio_context *ctx,
unsigned int reg,
unsigned int val)
{
- writeq(val, ctx->regs + reg);
+ writel(swab32(val), ctx->regs + reg);
}
-static void regmap_mmio_write64le_relaxed(struct regmap_mmio_context *ctx,
- unsigned int reg,
- unsigned int val)
+static void regmap_mmio_iowrite32be(struct regmap_mmio_context *ctx,
+ unsigned int reg, unsigned int val)
{
- writeq_relaxed(val, ctx->regs + reg);
+ iowrite32be(val, ctx->regs + reg);
}
-#endif
static int regmap_mmio_write(void *context, unsigned int reg, unsigned int val)
{
@@ -160,6 +167,83 @@ static int regmap_mmio_write(void *context, unsigned int reg, unsigned int val)
return 0;
}
+static int regmap_mmio_noinc_write(void *context, unsigned int reg,
+ const void *val, size_t val_count)
+{
+ struct regmap_mmio_context *ctx = context;
+ int ret = 0;
+ int i;
+
+ if (!IS_ERR(ctx->clk)) {
+ ret = clk_enable(ctx->clk);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * There are no native, assembly-optimized write single register
+ * operations for big endian, so fall back to emulation if this
+ * is needed. (Single bytes are fine, they are not affected by
+ * endianness.)
+ */
+ if (ctx->big_endian && (ctx->val_bytes > 1)) {
+ switch (ctx->val_bytes) {
+ case 2:
+ {
+ const u16 *valp = (const u16 *)val;
+ for (i = 0; i < val_count; i++)
+ writew(swab16(valp[i]), ctx->regs + reg);
+ goto out_clk;
+ }
+ case 4:
+ {
+ const u32 *valp = (const u32 *)val;
+ for (i = 0; i < val_count; i++)
+ writel(swab32(valp[i]), ctx->regs + reg);
+ goto out_clk;
+ }
+#ifdef CONFIG_64BIT
+ case 8:
+ {
+ const u64 *valp = (const u64 *)val;
+ for (i = 0; i < val_count; i++)
+ writeq(swab64(valp[i]), ctx->regs + reg);
+ goto out_clk;
+ }
+#endif
+ default:
+ ret = -EINVAL;
+ goto out_clk;
+ }
+ }
+
+ switch (ctx->val_bytes) {
+ case 1:
+ writesb(ctx->regs + reg, (const u8 *)val, val_count);
+ break;
+ case 2:
+ writesw(ctx->regs + reg, (const u16 *)val, val_count);
+ break;
+ case 4:
+ writesl(ctx->regs + reg, (const u32 *)val, val_count);
+ break;
+#ifdef CONFIG_64BIT
+ case 8:
+ writesq(ctx->regs + reg, (const u64 *)val, val_count);
+ break;
+#endif
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+out_clk:
+ if (!IS_ERR(ctx->clk))
+ clk_disable(ctx->clk);
+
+ return ret;
+}
+
static unsigned int regmap_mmio_read8(struct regmap_mmio_context *ctx,
unsigned int reg)
{
@@ -172,6 +256,12 @@ static unsigned int regmap_mmio_read8_relaxed(struct regmap_mmio_context *ctx,
return readb_relaxed(ctx->regs + reg);
}
+static unsigned int regmap_mmio_ioread8(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return ioread8(ctx->regs + reg);
+}
+
static unsigned int regmap_mmio_read16le(struct regmap_mmio_context *ctx,
unsigned int reg)
{
@@ -184,9 +274,21 @@ static unsigned int regmap_mmio_read16le_relaxed(struct regmap_mmio_context *ctx
return readw_relaxed(ctx->regs + reg);
}
+static unsigned int regmap_mmio_ioread16le(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return ioread16(ctx->regs + reg);
+}
+
static unsigned int regmap_mmio_read16be(struct regmap_mmio_context *ctx,
unsigned int reg)
{
+ return swab16(readw(ctx->regs + reg));
+}
+
+static unsigned int regmap_mmio_ioread16be(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
return ioread16be(ctx->regs + reg);
}
@@ -202,25 +304,23 @@ static unsigned int regmap_mmio_read32le_relaxed(struct regmap_mmio_context *ctx
return readl_relaxed(ctx->regs + reg);
}
-static unsigned int regmap_mmio_read32be(struct regmap_mmio_context *ctx,
- unsigned int reg)
+static unsigned int regmap_mmio_ioread32le(struct regmap_mmio_context *ctx,
+ unsigned int reg)
{
- return ioread32be(ctx->regs + reg);
+ return ioread32(ctx->regs + reg);
}
-#ifdef CONFIG_64BIT
-static unsigned int regmap_mmio_read64le(struct regmap_mmio_context *ctx,
+static unsigned int regmap_mmio_read32be(struct regmap_mmio_context *ctx,
unsigned int reg)
{
- return readq(ctx->regs + reg);
+ return swab32(readl(ctx->regs + reg));
}
-static unsigned int regmap_mmio_read64le_relaxed(struct regmap_mmio_context *ctx,
- unsigned int reg)
+static unsigned int regmap_mmio_ioread32be(struct regmap_mmio_context *ctx,
+ unsigned int reg)
{
- return readq_relaxed(ctx->regs + reg);
+ return ioread32be(ctx->regs + reg);
}
-#endif
static int regmap_mmio_read(void *context, unsigned int reg, unsigned int *val)
{
@@ -241,6 +341,71 @@ static int regmap_mmio_read(void *context, unsigned int reg, unsigned int *val)
return 0;
}
+static int regmap_mmio_noinc_read(void *context, unsigned int reg,
+ void *val, size_t val_count)
+{
+ struct regmap_mmio_context *ctx = context;
+ int ret = 0;
+
+ if (!IS_ERR(ctx->clk)) {
+ ret = clk_enable(ctx->clk);
+ if (ret < 0)
+ return ret;
+ }
+
+ switch (ctx->val_bytes) {
+ case 1:
+ readsb(ctx->regs + reg, (u8 *)val, val_count);
+ break;
+ case 2:
+ readsw(ctx->regs + reg, (u16 *)val, val_count);
+ break;
+ case 4:
+ readsl(ctx->regs + reg, (u32 *)val, val_count);
+ break;
+#ifdef CONFIG_64BIT
+ case 8:
+ readsq(ctx->regs + reg, (u64 *)val, val_count);
+ break;
+#endif
+ default:
+ ret = -EINVAL;
+ goto out_clk;
+ }
+
+ /*
+ * There are no native, assembly-optimized write single register
+ * operations for big endian, so fall back to emulation if this
+ * is needed. (Single bytes are fine, they are not affected by
+ * endianness.)
+ */
+ if (ctx->big_endian && (ctx->val_bytes > 1)) {
+ switch (ctx->val_bytes) {
+ case 2:
+ swab16_array(val, val_count);
+ break;
+ case 4:
+ swab32_array(val, val_count);
+ break;
+#ifdef CONFIG_64BIT
+ case 8:
+ swab64_array(val, val_count);
+ break;
+#endif
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+out_clk:
+ if (!IS_ERR(ctx->clk))
+ clk_disable(ctx->clk);
+
+ return ret;
+}
+
+
static void regmap_mmio_free_context(void *context)
{
struct regmap_mmio_context *ctx = context;
@@ -257,6 +422,8 @@ static const struct regmap_bus regmap_mmio = {
.fast_io = true,
.reg_write = regmap_mmio_write,
.reg_read = regmap_mmio_read,
+ .reg_noinc_write = regmap_mmio_noinc_write,
+ .reg_noinc_read = regmap_mmio_noinc_read,
.free_context = regmap_mmio_free_context,
.val_format_endian_default = REGMAP_ENDIAN_LITTLE,
};
@@ -284,13 +451,15 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
if (config->reg_stride < min_stride)
return ERR_PTR(-EINVAL);
+ if (config->use_relaxed_mmio && config->io_port)
+ return ERR_PTR(-EINVAL);
+
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
ctx->regs = regs;
ctx->val_bytes = config->val_bits / 8;
- ctx->relaxed_mmio = config->use_relaxed_mmio;
ctx->clk = ERR_PTR(-ENODEV);
switch (regmap_get_val_endian(dev, &regmap_mmio, config)) {
@@ -301,7 +470,10 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
#endif
switch (config->val_bits) {
case 8:
- if (ctx->relaxed_mmio) {
+ if (config->io_port) {
+ ctx->reg_read = regmap_mmio_ioread8;
+ ctx->reg_write = regmap_mmio_iowrite8;
+ } else if (config->use_relaxed_mmio) {
ctx->reg_read = regmap_mmio_read8_relaxed;
ctx->reg_write = regmap_mmio_write8_relaxed;
} else {
@@ -310,7 +482,10 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
}
break;
case 16:
- if (ctx->relaxed_mmio) {
+ if (config->io_port) {
+ ctx->reg_read = regmap_mmio_ioread16le;
+ ctx->reg_write = regmap_mmio_iowrite16le;
+ } else if (config->use_relaxed_mmio) {
ctx->reg_read = regmap_mmio_read16le_relaxed;
ctx->reg_write = regmap_mmio_write16le_relaxed;
} else {
@@ -319,7 +494,10 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
}
break;
case 32:
- if (ctx->relaxed_mmio) {
+ if (config->io_port) {
+ ctx->reg_read = regmap_mmio_ioread32le;
+ ctx->reg_write = regmap_mmio_iowrite32le;
+ } else if (config->use_relaxed_mmio) {
ctx->reg_read = regmap_mmio_read32le_relaxed;
ctx->reg_write = regmap_mmio_write32le_relaxed;
} else {
@@ -327,17 +505,6 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
ctx->reg_write = regmap_mmio_write32le;
}
break;
-#ifdef CONFIG_64BIT
- case 64:
- if (ctx->relaxed_mmio) {
- ctx->reg_read = regmap_mmio_read64le_relaxed;
- ctx->reg_write = regmap_mmio_write64le_relaxed;
- } else {
- ctx->reg_read = regmap_mmio_read64le;
- ctx->reg_write = regmap_mmio_write64le;
- }
- break;
-#endif
default:
ret = -EINVAL;
goto err_free;
@@ -347,18 +514,34 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
#ifdef __BIG_ENDIAN
case REGMAP_ENDIAN_NATIVE:
#endif
+ ctx->big_endian = true;
switch (config->val_bits) {
case 8:
- ctx->reg_read = regmap_mmio_read8;
- ctx->reg_write = regmap_mmio_write8;
+ if (config->io_port) {
+ ctx->reg_read = regmap_mmio_ioread8;
+ ctx->reg_write = regmap_mmio_iowrite8;
+ } else {
+ ctx->reg_read = regmap_mmio_read8;
+ ctx->reg_write = regmap_mmio_write8;
+ }
break;
case 16:
- ctx->reg_read = regmap_mmio_read16be;
- ctx->reg_write = regmap_mmio_write16be;
+ if (config->io_port) {
+ ctx->reg_read = regmap_mmio_ioread16be;
+ ctx->reg_write = regmap_mmio_iowrite16be;
+ } else {
+ ctx->reg_read = regmap_mmio_read16be;
+ ctx->reg_write = regmap_mmio_write16be;
+ }
break;
case 32:
- ctx->reg_read = regmap_mmio_read32be;
- ctx->reg_write = regmap_mmio_write32be;
+ if (config->io_port) {
+ ctx->reg_read = regmap_mmio_ioread32be;
+ ctx->reg_write = regmap_mmio_iowrite32be;
+ } else {
+ ctx->reg_read = regmap_mmio_read32be;
+ ctx->reg_write = regmap_mmio_write32be;
+ }
break;
default:
ret = -EINVAL;
diff --git a/drivers/base/regmap/regmap-sccb.c b/drivers/base/regmap/regmap-sccb.c
index 597042e2d009..986af26d88c2 100644
--- a/drivers/base/regmap/regmap-sccb.c
+++ b/drivers/base/regmap/regmap-sccb.c
@@ -80,7 +80,7 @@ static int regmap_sccb_write(void *context, unsigned int reg, unsigned int val)
return i2c_smbus_write_byte_data(i2c, reg, val);
}
-static struct regmap_bus regmap_sccb_bus = {
+static const struct regmap_bus regmap_sccb_bus = {
.reg_write = regmap_sccb_write,
.reg_read = regmap_sccb_read,
};
diff --git a/drivers/base/regmap/regmap-sdw-mbq.c b/drivers/base/regmap/regmap-sdw-mbq.c
index fe3ac26b66ad..388c3a087bd9 100644
--- a/drivers/base/regmap/regmap-sdw-mbq.c
+++ b/drivers/base/regmap/regmap-sdw-mbq.c
@@ -42,7 +42,7 @@ static int regmap_sdw_mbq_read(void *context, unsigned int reg, unsigned int *va
return 0;
}
-static struct regmap_bus regmap_sdw_mbq = {
+static const struct regmap_bus regmap_sdw_mbq = {
.reg_read = regmap_sdw_mbq_read,
.reg_write = regmap_sdw_mbq_write,
.reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
diff --git a/drivers/base/regmap/regmap-sdw.c b/drivers/base/regmap/regmap-sdw.c
index 966de8a136d9..81b0327f719d 100644
--- a/drivers/base/regmap/regmap-sdw.c
+++ b/drivers/base/regmap/regmap-sdw.c
@@ -30,7 +30,7 @@ static int regmap_sdw_read(void *context, unsigned int reg, unsigned int *val)
return 0;
}
-static struct regmap_bus regmap_sdw = {
+static const struct regmap_bus regmap_sdw = {
.reg_read = regmap_sdw_read,
.reg_write = regmap_sdw_write,
.reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
diff --git a/drivers/base/regmap/regmap-slimbus.c b/drivers/base/regmap/regmap-slimbus.c
index 0968059f1ef5..8075db788b39 100644
--- a/drivers/base/regmap/regmap-slimbus.c
+++ b/drivers/base/regmap/regmap-slimbus.c
@@ -22,7 +22,7 @@ static int regmap_slimbus_read(void *context, const void *reg, size_t reg_size,
return slim_read(sdev, *(u16 *)reg, val_size, val);
}
-static struct regmap_bus regmap_slimbus_bus = {
+static const struct regmap_bus regmap_slimbus_bus = {
.write = regmap_slimbus_write,
.read = regmap_slimbus_read,
.reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
diff --git a/drivers/base/regmap/regmap-spi-avmm.c b/drivers/base/regmap/regmap-spi-avmm.c
index ad1da83e849f..4c2b94b3e30b 100644
--- a/drivers/base/regmap/regmap-spi-avmm.c
+++ b/drivers/base/regmap/regmap-spi-avmm.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
+#include <linux/swab.h>
/*
* This driver implements the regmap operations for a generic SPI
@@ -162,19 +163,12 @@ struct spi_avmm_bridge {
/* bridge buffer used in translation between protocol layers */
char trans_buf[TRANS_BUF_SIZE];
char phy_buf[PHY_BUF_SIZE];
- void (*swap_words)(char *buf, unsigned int len);
+ void (*swap_words)(void *buf, unsigned int len);
};
-static void br_swap_words_32(char *buf, unsigned int len)
+static void br_swap_words_32(void *buf, unsigned int len)
{
- u32 *p = (u32 *)buf;
- unsigned int count;
-
- count = len / 4;
- while (count--) {
- *p = swab32p(p);
- p++;
- }
+ swab32_array(buf, len / 4);
}
/*
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index 719323bc6c7f..37ab23a9d034 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -113,6 +113,7 @@ static const struct regmap_bus *regmap_get_spi_bus(struct spi_device *spi,
const struct regmap_config *config)
{
size_t max_size = spi_max_transfer_size(spi);
+ size_t max_msg_size, reg_reserve_size;
struct regmap_bus *bus;
if (max_size != SIZE_MAX) {
@@ -120,9 +121,16 @@ static const struct regmap_bus *regmap_get_spi_bus(struct spi_device *spi,
if (!bus)
return ERR_PTR(-ENOMEM);
+ max_msg_size = spi_max_message_size(spi);
+ reg_reserve_size = config->reg_bits / BITS_PER_BYTE
+ + config->pad_bits / BITS_PER_BYTE;
+ if (max_size + reg_reserve_size > max_msg_size)
+ max_size -= reg_reserve_size;
+
bus->free_on_exit = true;
bus->max_raw_read = max_size;
bus->max_raw_write = max_size;
+
return bus;
}
diff --git a/drivers/base/regmap/regmap-w1.c b/drivers/base/regmap/regmap-w1.c
index 1fbaaad71ca5..3a8b402db852 100644
--- a/drivers/base/regmap/regmap-w1.c
+++ b/drivers/base/regmap/regmap-w1.c
@@ -172,17 +172,17 @@ static int w1_reg_a16_v16_write(void *context, unsigned int reg,
* Various types of supported bus addressing
*/
-static struct regmap_bus regmap_w1_bus_a8_v8 = {
+static const struct regmap_bus regmap_w1_bus_a8_v8 = {
.reg_read = w1_reg_a8_v8_read,
.reg_write = w1_reg_a8_v8_write,
};
-static struct regmap_bus regmap_w1_bus_a8_v16 = {
+static const struct regmap_bus regmap_w1_bus_a8_v16 = {
.reg_read = w1_reg_a8_v16_read,
.reg_write = w1_reg_a8_v16_write,
};
-static struct regmap_bus regmap_w1_bus_a16_v16 = {
+static const struct regmap_bus regmap_w1_bus_a16_v16 = {
.reg_read = w1_reg_a16_v16_read,
.reg_write = w1_reg_a16_v16_write,
};
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 8f9fe5fd4707..c6d6d53e8cd3 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -288,15 +288,9 @@ static void regmap_format_16_native(void *buf, unsigned int val,
memcpy(buf, &v, sizeof(v));
}
-static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
+static void regmap_format_24_be(void *buf, unsigned int val, unsigned int shift)
{
- u8 *b = buf;
-
- val <<= shift;
-
- b[0] = val >> 16;
- b[1] = val >> 8;
- b[2] = val;
+ put_unaligned_be24(val << shift, buf);
}
static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
@@ -380,14 +374,9 @@ static unsigned int regmap_parse_16_native(const void *buf)
return v;
}
-static unsigned int regmap_parse_24(const void *buf)
+static unsigned int regmap_parse_24_be(const void *buf)
{
- const u8 *b = buf;
- unsigned int ret = b[2];
- ret |= ((unsigned int)b[1]) << 8;
- ret |= ((unsigned int)b[0]) << 16;
-
- return ret;
+ return get_unaligned_be24(buf);
}
static unsigned int regmap_parse_32_be(const void *buf)
@@ -821,8 +810,11 @@ struct regmap *__regmap_init(struct device *dev,
else
map->alloc_flags = GFP_KERNEL;
+ map->reg_base = config->reg_base;
+
map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
map->format.pad_bytes = config->pad_bits / 8;
+ map->format.reg_downshift = config->reg_downshift;
map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
config->val_bits + config->pad_bits, 8);
@@ -835,12 +827,15 @@ struct regmap *__regmap_init(struct device *dev,
map->reg_stride_order = ilog2(map->reg_stride);
else
map->reg_stride_order = -1;
- map->use_single_read = config->use_single_read || !bus || !bus->read;
- map->use_single_write = config->use_single_write || !bus || !bus->write;
- map->can_multi_write = config->can_multi_write && bus && bus->write;
+ map->use_single_read = config->use_single_read || !(config->read || (bus && bus->read));
+ map->use_single_write = config->use_single_write || !(config->write || (bus && bus->write));
+ map->can_multi_write = config->can_multi_write && (config->write || (bus && bus->write));
if (bus) {
map->max_raw_read = bus->max_raw_read;
map->max_raw_write = bus->max_raw_write;
+ } else if (config->max_raw_read && config->max_raw_write) {
+ map->max_raw_read = config->max_raw_read;
+ map->max_raw_write = config->max_raw_write;
}
map->dev = dev;
map->bus = bus;
@@ -874,7 +869,18 @@ struct regmap *__regmap_init(struct device *dev,
map->read_flag_mask = bus->read_flag_mask;
}
- if (!bus) {
+ if (config && config->read && config->write) {
+ map->reg_read = _regmap_bus_read;
+ if (config->reg_update_bits)
+ map->reg_update_bits = config->reg_update_bits;
+
+ /* Bulk read/write */
+ map->read = config->read;
+ map->write = config->write;
+
+ reg_endian = REGMAP_ENDIAN_NATIVE;
+ val_endian = REGMAP_ENDIAN_NATIVE;
+ } else if (!bus) {
map->reg_read = config->reg_read;
map->reg_write = config->reg_write;
map->reg_update_bits = config->reg_update_bits;
@@ -891,10 +897,13 @@ struct regmap *__regmap_init(struct device *dev,
} else {
map->reg_read = _regmap_bus_read;
map->reg_update_bits = bus->reg_update_bits;
- }
+ /* Bulk read/write */
+ map->read = bus->read;
+ map->write = bus->write;
- reg_endian = regmap_get_reg_endian(bus, config);
- val_endian = regmap_get_val_endian(dev, bus, config);
+ reg_endian = regmap_get_reg_endian(bus, config);
+ val_endian = regmap_get_val_endian(dev, bus, config);
+ }
switch (config->reg_bits + map->reg_shift) {
case 2:
@@ -971,9 +980,13 @@ struct regmap *__regmap_init(struct device *dev,
break;
case 24:
- if (reg_endian != REGMAP_ENDIAN_BIG)
+ switch (reg_endian) {
+ case REGMAP_ENDIAN_BIG:
+ map->format.format_reg = regmap_format_24_be;
+ break;
+ default:
goto err_hwlock;
- map->format.format_reg = regmap_format_24;
+ }
break;
case 32:
@@ -1044,10 +1057,14 @@ struct regmap *__regmap_init(struct device *dev,
}
break;
case 24:
- if (val_endian != REGMAP_ENDIAN_BIG)
+ switch (val_endian) {
+ case REGMAP_ENDIAN_BIG:
+ map->format.format_val = regmap_format_24_be;
+ map->format.parse_val = regmap_parse_24_be;
+ break;
+ default:
goto err_hwlock;
- map->format.format_val = regmap_format_24;
- map->format.parse_val = regmap_parse_24;
+ }
break;
case 32:
switch (val_endian) {
@@ -1280,6 +1297,9 @@ static void regmap_field_init(struct regmap_field *rm_field,
rm_field->reg = reg_field.reg;
rm_field->shift = reg_field.lsb;
rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb);
+
+ WARN_ONCE(rm_field->mask == 0, "invalid empty mask defined\n");
+
rm_field->id_size = reg_field.id_size;
rm_field->id_offset = reg_field.id_offset;
}
@@ -1668,8 +1688,6 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
size_t len;
int i;
- WARN_ON(!map->bus);
-
/* Check for unwritable or noinc registers in range
* before we start
*/
@@ -1735,6 +1753,8 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
return ret;
}
+ reg += map->reg_base;
+ reg >>= map->format.reg_downshift;
map->format.format_reg(map->work_buf, reg, map->reg_shift);
regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
map->write_flag_mask);
@@ -1749,7 +1769,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
val = work_val;
}
- if (map->async && map->bus->async_write) {
+ if (map->async && map->bus && map->bus->async_write) {
struct regmap_async *async;
trace_regmap_async_write_start(map, reg, val_len);
@@ -1817,11 +1837,11 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
* write.
*/
if (val == work_val)
- ret = map->bus->write(map->bus_context, map->work_buf,
- map->format.reg_bytes +
- map->format.pad_bytes +
- val_len);
- else if (map->bus->gather_write)
+ ret = map->write(map->bus_context, map->work_buf,
+ map->format.reg_bytes +
+ map->format.pad_bytes +
+ val_len);
+ else if (map->bus && map->bus->gather_write)
ret = map->bus->gather_write(map->bus_context, map->work_buf,
map->format.reg_bytes +
map->format.pad_bytes,
@@ -1839,7 +1859,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
memcpy(buf, map->work_buf, map->format.reg_bytes);
memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
val, val_len);
- ret = map->bus->write(map->bus_context, buf, len);
+ ret = map->write(map->bus_context, buf, len);
kfree(buf);
} else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
@@ -1862,8 +1882,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
*/
bool regmap_can_raw_write(struct regmap *map)
{
- return map->bus && map->bus->write && map->format.format_val &&
- map->format.format_reg;
+ return map->write && map->format.format_val && map->format.format_reg;
}
EXPORT_SYMBOL_GPL(regmap_can_raw_write);
@@ -1896,7 +1915,7 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg,
struct regmap_range_node *range;
struct regmap *map = context;
- WARN_ON(!map->bus || !map->format.format_write);
+ WARN_ON(!map->format.format_write);
range = _regmap_range_lookup(map, reg);
if (range) {
@@ -1905,12 +1924,13 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg,
return ret;
}
+ reg += map->reg_base;
+ reg >>= map->format.reg_downshift;
map->format.format_write(map, reg, val);
trace_regmap_hw_write_start(map, reg, 1);
- ret = map->bus->write(map->bus_context, map->work_buf,
- map->format.buf_size);
+ ret = map->write(map->bus_context, map->work_buf, map->format.buf_size);
trace_regmap_hw_write_done(map, reg, 1);
@@ -1930,7 +1950,7 @@ static int _regmap_bus_raw_write(void *context, unsigned int reg,
{
struct regmap *map = context;
- WARN_ON(!map->bus || !map->format.format_val);
+ WARN_ON(!map->format.format_val);
map->format.format_val(map->work_buf + map->format.reg_bytes
+ map->format.pad_bytes, val, 0);
@@ -1944,7 +1964,7 @@ static int _regmap_bus_raw_write(void *context, unsigned int reg,
static inline void *_regmap_map_get_context(struct regmap *map)
{
- return (map->bus) ? map : map->bus_context;
+ return (map->bus || (!map->bus && map->read)) ? map : map->bus_context;
}
int _regmap_write(struct regmap *map, unsigned int reg,
@@ -2109,6 +2129,99 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
}
EXPORT_SYMBOL_GPL(regmap_raw_write);
+static int regmap_noinc_readwrite(struct regmap *map, unsigned int reg,
+ void *val, unsigned int val_len, bool write)
+{
+ size_t val_bytes = map->format.val_bytes;
+ size_t val_count = val_len / val_bytes;
+ unsigned int lastval;
+ u8 *u8p;
+ u16 *u16p;
+ u32 *u32p;
+#ifdef CONFIG_64BIT
+ u64 *u64p;
+#endif
+ int ret;
+ int i;
+
+ switch (val_bytes) {
+ case 1:
+ u8p = val;
+ if (write)
+ lastval = (unsigned int)u8p[val_count - 1];
+ break;
+ case 2:
+ u16p = val;
+ if (write)
+ lastval = (unsigned int)u16p[val_count - 1];
+ break;
+ case 4:
+ u32p = val;
+ if (write)
+ lastval = (unsigned int)u32p[val_count - 1];
+ break;
+#ifdef CONFIG_64BIT
+ case 8:
+ u64p = val;
+ if (write)
+ lastval = (unsigned int)u64p[val_count - 1];
+ break;
+#endif
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * Update the cache with the last value we write, the rest is just
+ * gone down in the hardware FIFO. We can't cache FIFOs. This makes
+ * sure a single read from the cache will work.
+ */
+ if (write) {
+ if (!map->cache_bypass && !map->defer_caching) {
+ ret = regcache_write(map, reg, lastval);
+ if (ret != 0)
+ return ret;
+ if (map->cache_only) {
+ map->cache_dirty = true;
+ return 0;
+ }
+ }
+ ret = map->bus->reg_noinc_write(map->bus_context, reg, val, val_count);
+ } else {
+ ret = map->bus->reg_noinc_read(map->bus_context, reg, val, val_count);
+ }
+
+ if (!ret && regmap_should_log(map)) {
+ dev_info(map->dev, "%x %s [", reg, write ? "<=" : "=>");
+ for (i = 0; i < val_count; i++) {
+ switch (val_bytes) {
+ case 1:
+ pr_cont("%x", u8p[i]);
+ break;
+ case 2:
+ pr_cont("%x", u16p[i]);
+ break;
+ case 4:
+ pr_cont("%x", u32p[i]);
+ break;
+#ifdef CONFIG_64BIT
+ case 8:
+ pr_cont("%llx", u64p[i]);
+ break;
+#endif
+ default:
+ break;
+ }
+ if (i == (val_count - 1))
+ pr_cont("]\n");
+ else
+ pr_cont(",");
+ }
+ }
+
+ return 0;
+}
+
/**
* regmap_noinc_write(): Write data from a register without incrementing the
* register number
@@ -2136,10 +2249,8 @@ int regmap_noinc_write(struct regmap *map, unsigned int reg,
size_t write_len;
int ret;
- if (!map->bus)
+ if (!map->write && !(map->bus && map->bus->reg_noinc_write))
return -EINVAL;
- if (!map->bus->write)
- return -ENOTSUPP;
if (val_len % map->format.val_bytes)
return -EINVAL;
if (!IS_ALIGNED(reg, map->reg_stride))
@@ -2154,6 +2265,15 @@ int regmap_noinc_write(struct regmap *map, unsigned int reg,
goto out_unlock;
}
+ /*
+ * Use the accelerated operation if we can. The val drops the const
+ * typing in order to facilitate code reuse in regmap_noinc_readwrite().
+ */
+ if (map->bus->reg_noinc_write) {
+ ret = regmap_noinc_readwrite(map, reg, (void *)val, val_len, true);
+ goto out_unlock;
+ }
+
while (val_len) {
if (map->max_raw_write && map->max_raw_write < val_len)
write_len = map->max_raw_write;
@@ -2202,6 +2322,28 @@ int regmap_field_update_bits_base(struct regmap_field *field,
EXPORT_SYMBOL_GPL(regmap_field_update_bits_base);
/**
+ * regmap_field_test_bits() - Check if all specified bits are set in a
+ * register field.
+ *
+ * @field: Register field to operate on
+ * @bits: Bits to test
+ *
+ * Returns -1 if the underlying regmap_field_read() fails, 0 if at least one of the
+ * tested bits is not set and 1 if all tested bits are set.
+ */
+int regmap_field_test_bits(struct regmap_field *field, unsigned int bits)
+{
+ unsigned int val, ret;
+
+ ret = regmap_field_read(field, &val);
+ if (ret)
+ return ret;
+
+ return (val & bits) == bits;
+}
+EXPORT_SYMBOL_GPL(regmap_field_test_bits);
+
+/**
* regmap_fields_update_bits_base() - Perform a read/modify/write cycle a
* register field with port ID
*
@@ -2259,7 +2401,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
* Some devices don't support bulk write, for them we have a series of
* single write operations.
*/
- if (!map->bus || !map->format.parse_inplace) {
+ if (!map->write || !map->format.parse_inplace) {
map->lock(map->lock_arg);
for (i = 0; i < val_count; i++) {
unsigned int ival;
@@ -2306,6 +2448,10 @@ out:
kfree(wval);
}
+
+ if (!ret)
+ trace_regmap_bulk_write(map, reg, val, val_bytes * val_count);
+
return ret;
}
EXPORT_SYMBOL_GPL(regmap_bulk_write);
@@ -2346,6 +2492,8 @@ static int _regmap_raw_multi_reg_write(struct regmap *map,
unsigned int reg = regs[i].reg;
unsigned int val = regs[i].def;
trace_regmap_hw_write_start(map, reg, 1);
+ reg += map->reg_base;
+ reg >>= map->format.reg_downshift;
map->format.format_reg(u8, reg, map->reg_shift);
u8 += reg_bytes + pad_bytes;
map->format.format_val(u8, val, 0);
@@ -2354,7 +2502,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map,
u8 = buf;
*u8 |= map->write_flag_mask;
- ret = map->bus->write(map->bus_context, buf, len);
+ ret = map->write(map->bus_context, buf, len);
kfree(buf);
@@ -2660,9 +2808,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
struct regmap_range_node *range;
int ret;
- WARN_ON(!map->bus);
-
- if (!map->bus || !map->bus->read)
+ if (!map->read)
return -EINVAL;
range = _regmap_range_lookup(map, reg);
@@ -2673,14 +2819,16 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
return ret;
}
+ reg += map->reg_base;
+ reg >>= map->format.reg_downshift;
map->format.format_reg(map->work_buf, reg, map->reg_shift);
regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
map->read_flag_mask);
trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
- ret = map->bus->read(map->bus_context, map->work_buf,
- map->format.reg_bytes + map->format.pad_bytes,
- val, val_len);
+ ret = map->read(map->bus_context, map->work_buf,
+ map->format.reg_bytes + map->format.pad_bytes,
+ val, val_len);
trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
@@ -2791,8 +2939,6 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
unsigned int v;
int ret, i;
- if (!map->bus)
- return -EINVAL;
if (val_len % map->format.val_bytes)
return -EINVAL;
if (!IS_ALIGNED(reg, map->reg_stride))
@@ -2807,7 +2953,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
size_t chunk_count, chunk_bytes;
size_t chunk_regs = val_count;
- if (!map->bus->read) {
+ if (!map->read) {
ret = -ENOTSUPP;
goto out;
}
@@ -2867,7 +3013,7 @@ EXPORT_SYMBOL_GPL(regmap_raw_read);
* @val: Pointer to data buffer
* @val_len: Length of output buffer in bytes.
*
- * The regmap API usually assumes that bulk bus read operations will read a
+ * The regmap API usually assumes that bulk read operations will read a
* range of registers. Some devices have certain registers for which a read
* operation read will read from an internal FIFO.
*
@@ -2885,10 +3031,9 @@ int regmap_noinc_read(struct regmap *map, unsigned int reg,
size_t read_len;
int ret;
- if (!map->bus)
- return -EINVAL;
- if (!map->bus->read)
+ if (!map->read)
return -ENOTSUPP;
+
if (val_len % map->format.val_bytes)
return -EINVAL;
if (!IS_ALIGNED(reg, map->reg_stride))
@@ -2903,6 +3048,22 @@ int regmap_noinc_read(struct regmap *map, unsigned int reg,
goto out_unlock;
}
+ /* Use the accelerated operation if we can */
+ if (map->bus->reg_noinc_read) {
+ /*
+ * We have not defined the FIFO semantics for cache, as the
+ * cache is just one value deep. Should we return the last
+ * written value? Just avoid this by always reading the FIFO
+ * even when using cache. Cache only will not work.
+ */
+ if (map->cache_only) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ ret = regmap_noinc_readwrite(map, reg, val, val_len, false);
+ goto out_unlock;
+ }
+
while (val_len) {
if (map->max_raw_read && map->max_raw_read < val_len)
read_len = map->max_raw_read;
@@ -3002,7 +3163,7 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
if (val_count == 0)
return -EINVAL;
- if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
+ if (map->read && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
if (ret != 0)
return ret;
@@ -3052,6 +3213,9 @@ out:
map->unlock(map->lock_arg);
}
+ if (!ret)
+ trace_regmap_bulk_read(map, reg, val, val_bytes * val_count);
+
return ret;
}
EXPORT_SYMBOL_GPL(regmap_bulk_read);
diff --git a/drivers/base/regmap/trace.h b/drivers/base/regmap/trace.h
index 9abee14df9ee..704e106e5dbd 100644
--- a/drivers/base/regmap/trace.h
+++ b/drivers/base/regmap/trace.h
@@ -32,9 +32,7 @@ DECLARE_EVENT_CLASS(regmap_reg,
__entry->val = val;
),
- TP_printk("%s reg=%x val=%x", __get_str(name),
- (unsigned int)__entry->reg,
- (unsigned int)__entry->val)
+ TP_printk("%s reg=%x val=%x", __get_str(name), __entry->reg, __entry->val)
);
DEFINE_EVENT(regmap_reg, regmap_reg_write,
@@ -43,7 +41,6 @@ DEFINE_EVENT(regmap_reg, regmap_reg_write,
unsigned int val),
TP_ARGS(map, reg, val)
-
);
DEFINE_EVENT(regmap_reg, regmap_reg_read,
@@ -52,7 +49,6 @@ DEFINE_EVENT(regmap_reg, regmap_reg_read,
unsigned int val),
TP_ARGS(map, reg, val)
-
);
DEFINE_EVENT(regmap_reg, regmap_reg_read_cache,
@@ -61,7 +57,47 @@ DEFINE_EVENT(regmap_reg, regmap_reg_read_cache,
unsigned int val),
TP_ARGS(map, reg, val)
+);
+
+DECLARE_EVENT_CLASS(regmap_bulk,
+
+ TP_PROTO(struct regmap *map, unsigned int reg,
+ const void *val, int val_len),
+
+ TP_ARGS(map, reg, val, val_len),
+
+ TP_STRUCT__entry(
+ __string(name, regmap_name(map))
+ __field(unsigned int, reg)
+ __dynamic_array(char, buf, val_len)
+ __field(int, val_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, regmap_name(map));
+ __entry->reg = reg;
+ __entry->val_len = val_len;
+ memcpy(__get_dynamic_array(buf), val, val_len);
+ ),
+ TP_printk("%s reg=%x val=%s", __get_str(name), __entry->reg,
+ __print_hex(__get_dynamic_array(buf), __entry->val_len))
+);
+
+DEFINE_EVENT(regmap_bulk, regmap_bulk_write,
+
+ TP_PROTO(struct regmap *map, unsigned int reg,
+ const void *val, int val_len),
+
+ TP_ARGS(map, reg, val, val_len)
+);
+
+DEFINE_EVENT(regmap_bulk, regmap_bulk_read,
+
+ TP_PROTO(struct regmap *map, unsigned int reg,
+ const void *val, int val_len),
+
+ TP_ARGS(map, reg, val, val_len)
);
DECLARE_EVENT_CLASS(regmap_block,
@@ -82,9 +118,7 @@ DECLARE_EVENT_CLASS(regmap_block,
__entry->count = count;
),
- TP_printk("%s reg=%x count=%d", __get_str(name),
- (unsigned int)__entry->reg,
- (int)__entry->count)
+ TP_printk("%s reg=%x count=%d", __get_str(name), __entry->reg, __entry->count)
);
DEFINE_EVENT(regmap_block, regmap_hw_read_start,
@@ -154,8 +188,7 @@ DECLARE_EVENT_CLASS(regmap_bool,
__entry->flag = flag;
),
- TP_printk("%s flag=%d", __get_str(name),
- (int)__entry->flag)
+ TP_printk("%s flag=%d", __get_str(name), __entry->flag)
);
DEFINE_EVENT(regmap_bool, regmap_cache_only,
@@ -163,7 +196,6 @@ DEFINE_EVENT(regmap_bool, regmap_cache_only,
TP_PROTO(struct regmap *map, bool flag),
TP_ARGS(map, flag)
-
);
DEFINE_EVENT(regmap_bool, regmap_cache_bypass,
@@ -171,7 +203,6 @@ DEFINE_EVENT(regmap_bool, regmap_cache_bypass,
TP_PROTO(struct regmap *map, bool flag),
TP_ARGS(map, flag)
-
);
DECLARE_EVENT_CLASS(regmap_async,
@@ -203,7 +234,6 @@ DEFINE_EVENT(regmap_async, regmap_async_io_complete,
TP_PROTO(struct regmap *map),
TP_ARGS(map)
-
);
DEFINE_EVENT(regmap_async, regmap_async_complete_start,
@@ -211,7 +241,6 @@ DEFINE_EVENT(regmap_async, regmap_async_complete_start,
TP_PROTO(struct regmap *map),
TP_ARGS(map)
-
);
DEFINE_EVENT(regmap_async, regmap_async_complete_done,
@@ -219,7 +248,6 @@ DEFINE_EVENT(regmap_async, regmap_async_complete_done,
TP_PROTO(struct regmap *map),
TP_ARGS(map)
-
);
TRACE_EVENT(regcache_drop_region,
@@ -241,8 +269,7 @@ TRACE_EVENT(regcache_drop_region,
__entry->to = to;
),
- TP_printk("%s %u-%u", __get_str(name), (unsigned int)__entry->from,
- (unsigned int)__entry->to)
+ TP_printk("%s %u-%u", __get_str(name), __entry->from, __entry->to)
);
#endif /* _TRACE_REGMAP_H */
diff --git a/drivers/base/soc.c b/drivers/base/soc.c
index 0af5363a582c..22130b5f789d 100644
--- a/drivers/base/soc.c
+++ b/drivers/base/soc.c
@@ -241,15 +241,13 @@ static int soc_device_match_one(struct device *dev, void *arg)
const struct soc_device_attribute *soc_device_match(
const struct soc_device_attribute *matches)
{
- int ret = 0;
+ int ret;
if (!matches)
return NULL;
- while (!ret) {
- if (!(matches->machine || matches->family ||
- matches->revision || matches->soc_id))
- break;
+ while (matches->machine || matches->family || matches->revision ||
+ matches->soc_id) {
ret = bus_for_each_dev(&soc_bus_type, NULL, (void *)matches,
soc_device_match_one);
if (ret < 0 && early_soc_dev_attr)
@@ -257,10 +255,10 @@ const struct soc_device_attribute *soc_device_match(
matches);
if (ret < 0)
return NULL;
- if (!ret)
- matches++;
- else
+ if (ret)
return matches;
+
+ matches++;
}
return NULL;
}
diff --git a/drivers/base/test/test_async_driver_probe.c b/drivers/base/test/test_async_driver_probe.c
index 3bb7beb127a9..4d1976ca5072 100644
--- a/drivers/base/test/test_async_driver_probe.c
+++ b/drivers/base/test/test_async_driver_probe.c
@@ -104,7 +104,7 @@ static int __init test_async_probe_init(void)
struct platform_device **pdev = NULL;
int async_id = 0, sync_id = 0;
unsigned long long duration;
- ktime_t calltime, delta;
+ ktime_t calltime;
int err, nid, cpu;
pr_info("registering first set of asynchronous devices...\n");
@@ -133,8 +133,7 @@ static int __init test_async_probe_init(void)
goto err_unregister_async_devs;
}
- delta = ktime_sub(ktime_get(), calltime);
- duration = (unsigned long long) ktime_to_ms(delta);
+ duration = (unsigned long long)ktime_ms_delta(ktime_get(), calltime);
pr_info("registration took %lld msecs\n", duration);
if (duration > TEST_PROBE_THRESHOLD) {
pr_err("test failed: probe took too long\n");
@@ -161,8 +160,7 @@ static int __init test_async_probe_init(void)
async_id++;
}
- delta = ktime_sub(ktime_get(), calltime);
- duration = (unsigned long long) ktime_to_ms(delta);
+ duration = (unsigned long long)ktime_ms_delta(ktime_get(), calltime);
dev_info(&(*pdev)->dev,
"registration took %lld msecs\n", duration);
if (duration > TEST_PROBE_THRESHOLD) {
@@ -197,8 +195,7 @@ static int __init test_async_probe_init(void)
goto err_unregister_sync_devs;
}
- delta = ktime_sub(ktime_get(), calltime);
- duration = (unsigned long long) ktime_to_ms(delta);
+ duration = (unsigned long long)ktime_ms_delta(ktime_get(), calltime);
pr_info("registration took %lld msecs\n", duration);
if (duration < TEST_PROBE_THRESHOLD) {
dev_err(&(*pdev)->dev,
@@ -223,8 +220,7 @@ static int __init test_async_probe_init(void)
sync_id++;
- delta = ktime_sub(ktime_get(), calltime);
- duration = (unsigned long long) ktime_to_ms(delta);
+ duration = (unsigned long long)ktime_ms_delta(ktime_get(), calltime);
dev_info(&(*pdev)->dev,
"registration took %lld msecs\n", duration);
if (duration < TEST_PROBE_THRESHOLD) {
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index 8f2b641d0b8c..89f98be5c5b9 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -14,11 +14,11 @@
#include <linux/hardirq.h>
#include <linux/topology.h>
-#define define_id_show_func(name) \
+#define define_id_show_func(name, fmt) \
static ssize_t name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
- return sysfs_emit(buf, "%d\n", topology_##name(dev->id)); \
+ return sysfs_emit(buf, fmt "\n", topology_##name(dev->id)); \
}
#define define_siblings_read_func(name, mask) \
@@ -42,56 +42,67 @@ static ssize_t name##_list_read(struct file *file, struct kobject *kobj, \
off, count); \
}
-define_id_show_func(physical_package_id);
+define_id_show_func(physical_package_id, "%d");
static DEVICE_ATTR_RO(physical_package_id);
-define_id_show_func(die_id);
+#ifdef TOPOLOGY_DIE_SYSFS
+define_id_show_func(die_id, "%d");
static DEVICE_ATTR_RO(die_id);
+#endif
-define_id_show_func(cluster_id);
+#ifdef TOPOLOGY_CLUSTER_SYSFS
+define_id_show_func(cluster_id, "%d");
static DEVICE_ATTR_RO(cluster_id);
+#endif
-define_id_show_func(core_id);
+define_id_show_func(core_id, "%d");
static DEVICE_ATTR_RO(core_id);
+define_id_show_func(ppin, "0x%llx");
+static DEVICE_ATTR_ADMIN_RO(ppin);
+
define_siblings_read_func(thread_siblings, sibling_cpumask);
-static BIN_ATTR_RO(thread_siblings, 0);
-static BIN_ATTR_RO(thread_siblings_list, 0);
+static BIN_ATTR_RO(thread_siblings, CPUMAP_FILE_MAX_BYTES);
+static BIN_ATTR_RO(thread_siblings_list, CPULIST_FILE_MAX_BYTES);
define_siblings_read_func(core_cpus, sibling_cpumask);
-static BIN_ATTR_RO(core_cpus, 0);
-static BIN_ATTR_RO(core_cpus_list, 0);
+static BIN_ATTR_RO(core_cpus, CPUMAP_FILE_MAX_BYTES);
+static BIN_ATTR_RO(core_cpus_list, CPULIST_FILE_MAX_BYTES);
define_siblings_read_func(core_siblings, core_cpumask);
-static BIN_ATTR_RO(core_siblings, 0);
-static BIN_ATTR_RO(core_siblings_list, 0);
+static BIN_ATTR_RO(core_siblings, CPUMAP_FILE_MAX_BYTES);
+static BIN_ATTR_RO(core_siblings_list, CPULIST_FILE_MAX_BYTES);
+#ifdef TOPOLOGY_CLUSTER_SYSFS
define_siblings_read_func(cluster_cpus, cluster_cpumask);
-static BIN_ATTR_RO(cluster_cpus, 0);
-static BIN_ATTR_RO(cluster_cpus_list, 0);
+static BIN_ATTR_RO(cluster_cpus, CPUMAP_FILE_MAX_BYTES);
+static BIN_ATTR_RO(cluster_cpus_list, CPULIST_FILE_MAX_BYTES);
+#endif
+#ifdef TOPOLOGY_DIE_SYSFS
define_siblings_read_func(die_cpus, die_cpumask);
-static BIN_ATTR_RO(die_cpus, 0);
-static BIN_ATTR_RO(die_cpus_list, 0);
+static BIN_ATTR_RO(die_cpus, CPUMAP_FILE_MAX_BYTES);
+static BIN_ATTR_RO(die_cpus_list, CPULIST_FILE_MAX_BYTES);
+#endif
define_siblings_read_func(package_cpus, core_cpumask);
-static BIN_ATTR_RO(package_cpus, 0);
-static BIN_ATTR_RO(package_cpus_list, 0);
+static BIN_ATTR_RO(package_cpus, CPUMAP_FILE_MAX_BYTES);
+static BIN_ATTR_RO(package_cpus_list, CPULIST_FILE_MAX_BYTES);
-#ifdef CONFIG_SCHED_BOOK
-define_id_show_func(book_id);
+#ifdef TOPOLOGY_BOOK_SYSFS
+define_id_show_func(book_id, "%d");
static DEVICE_ATTR_RO(book_id);
define_siblings_read_func(book_siblings, book_cpumask);
-static BIN_ATTR_RO(book_siblings, 0);
-static BIN_ATTR_RO(book_siblings_list, 0);
+static BIN_ATTR_RO(book_siblings, CPUMAP_FILE_MAX_BYTES);
+static BIN_ATTR_RO(book_siblings_list, CPULIST_FILE_MAX_BYTES);
#endif
-#ifdef CONFIG_SCHED_DRAWER
-define_id_show_func(drawer_id);
+#ifdef TOPOLOGY_DRAWER_SYSFS
+define_id_show_func(drawer_id, "%d");
static DEVICE_ATTR_RO(drawer_id);
define_siblings_read_func(drawer_siblings, drawer_cpumask);
-static BIN_ATTR_RO(drawer_siblings, 0);
-static BIN_ATTR_RO(drawer_siblings_list, 0);
+static BIN_ATTR_RO(drawer_siblings, CPUMAP_FILE_MAX_BYTES);
+static BIN_ATTR_RO(drawer_siblings_list, CPULIST_FILE_MAX_BYTES);
#endif
static struct bin_attribute *bin_attrs[] = {
@@ -101,17 +112,21 @@ static struct bin_attribute *bin_attrs[] = {
&bin_attr_thread_siblings_list,
&bin_attr_core_siblings,
&bin_attr_core_siblings_list,
+#ifdef TOPOLOGY_CLUSTER_SYSFS
&bin_attr_cluster_cpus,
&bin_attr_cluster_cpus_list,
+#endif
+#ifdef TOPOLOGY_DIE_SYSFS
&bin_attr_die_cpus,
&bin_attr_die_cpus_list,
+#endif
&bin_attr_package_cpus,
&bin_attr_package_cpus_list,
-#ifdef CONFIG_SCHED_BOOK
+#ifdef TOPOLOGY_BOOK_SYSFS
&bin_attr_book_siblings,
&bin_attr_book_siblings_list,
#endif
-#ifdef CONFIG_SCHED_DRAWER
+#ifdef TOPOLOGY_DRAWER_SYSFS
&bin_attr_drawer_siblings,
&bin_attr_drawer_siblings_list,
#endif
@@ -120,21 +135,36 @@ static struct bin_attribute *bin_attrs[] = {
static struct attribute *default_attrs[] = {
&dev_attr_physical_package_id.attr,
+#ifdef TOPOLOGY_DIE_SYSFS
&dev_attr_die_id.attr,
+#endif
+#ifdef TOPOLOGY_CLUSTER_SYSFS
&dev_attr_cluster_id.attr,
+#endif
&dev_attr_core_id.attr,
-#ifdef CONFIG_SCHED_BOOK
+#ifdef TOPOLOGY_BOOK_SYSFS
&dev_attr_book_id.attr,
#endif
-#ifdef CONFIG_SCHED_DRAWER
+#ifdef TOPOLOGY_DRAWER_SYSFS
&dev_attr_drawer_id.attr,
#endif
+ &dev_attr_ppin.attr,
NULL
};
+static umode_t topology_is_visible(struct kobject *kobj,
+ struct attribute *attr, int unused)
+{
+ if (attr == &dev_attr_ppin.attr && !topology_ppin(kobj_to_dev(kobj)->id))
+ return 0;
+
+ return attr->mode;
+}
+
static const struct attribute_group topology_attr_group = {
.attrs = default_attrs,
.bin_attrs = bin_attrs,
+ .is_visible = topology_is_visible,
.name = "topology"
};