aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/firmware
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/firmware')
-rw-r--r--drivers/firmware/arm_ffa/bus.c4
-rw-r--r--drivers/firmware/arm_scmi/Kconfig32
-rw-r--r--drivers/firmware/arm_scmi/Makefile9
-rw-r--r--drivers/firmware/arm_scmi/bus.c398
-rw-r--r--drivers/firmware/arm_scmi/common.h100
-rw-r--r--drivers/firmware/arm_scmi/driver.c1225
-rw-r--r--drivers/firmware/arm_scmi/mailbox.c6
-rw-r--r--drivers/firmware/arm_scmi/optee.c6
-rw-r--r--drivers/firmware/arm_scmi/protocols.h7
-rw-r--r--drivers/firmware/arm_scmi/raw_mode.c1443
-rw-r--r--drivers/firmware/arm_scmi/raw_mode.h31
-rw-r--r--drivers/firmware/arm_scmi/shmem.c9
-rw-r--r--drivers/firmware/arm_scmi/smc.c6
-rw-r--r--drivers/firmware/arm_scmi/virtio.c11
-rw-r--r--drivers/firmware/dmi-sysfs.c14
-rw-r--r--drivers/firmware/efi/cper_cxl.c12
-rw-r--r--drivers/firmware/efi/earlycon.c41
-rw-r--r--drivers/firmware/efi/efi-init.c2
-rw-r--r--drivers/firmware/efi/efi.c85
-rw-r--r--drivers/firmware/efi/esrt.c15
-rw-r--r--drivers/firmware/efi/libstub/Makefile4
-rw-r--r--drivers/firmware/efi/libstub/arm64-entry.S67
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c26
-rw-r--r--drivers/firmware/efi/libstub/arm64.c50
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c67
-rw-r--r--drivers/firmware/efi/libstub/efistub.h23
-rw-r--r--drivers/firmware/efi/libstub/zboot.c2
-rw-r--r--drivers/firmware/efi/memattr.c9
-rw-r--r--drivers/firmware/efi/runtime-wrappers.c1
-rw-r--r--drivers/firmware/efi/sysfb_efi.c8
-rw-r--r--drivers/firmware/efi/vars.c38
-rw-r--r--drivers/firmware/google/Kconfig8
-rw-r--r--drivers/firmware/google/coreboot_table.c9
-rw-r--r--drivers/firmware/google/coreboot_table.h1
-rw-r--r--drivers/firmware/google/framebuffer-coreboot.c4
-rw-r--r--drivers/firmware/google/gsmi.c9
-rw-r--r--drivers/firmware/meson/meson_sm.c2
-rw-r--r--drivers/firmware/psci/psci.c45
-rw-r--r--drivers/firmware/qcom_scm-legacy.c2
-rw-r--r--drivers/firmware/qcom_scm-smc.c88
-rw-r--r--drivers/firmware/qcom_scm.c92
-rw-r--r--drivers/firmware/qcom_scm.h8
-rw-r--r--drivers/firmware/stratix10-svc.c25
-rw-r--r--drivers/firmware/sysfb_simplefb.c43
-rw-r--r--drivers/firmware/xilinx/zynqmp.c27
45 files changed, 3336 insertions, 778 deletions
diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c
index 99d439480612..f29d77ecf72d 100644
--- a/drivers/firmware/arm_ffa/bus.c
+++ b/drivers/firmware/arm_ffa/bus.c
@@ -56,9 +56,9 @@ static void ffa_device_remove(struct device *dev)
ffa_drv->remove(to_ffa_dev(dev));
}
-static int ffa_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int ffa_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct ffa_device *ffa_dev = to_ffa_dev(dev);
+ const struct ffa_device *ffa_dev = to_ffa_dev(dev);
return add_uevent_var(env, "MODALIAS=arm_ffa:%04x:%pUb",
ffa_dev->vm_id, &ffa_dev->uuid);
diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig
index a14f65444b35..ea0f5083ac47 100644
--- a/drivers/firmware/arm_scmi/Kconfig
+++ b/drivers/firmware/arm_scmi/Kconfig
@@ -23,6 +23,38 @@ config ARM_SCMI_PROTOCOL
if ARM_SCMI_PROTOCOL
+config ARM_SCMI_NEED_DEBUGFS
+ bool
+ help
+ This declares whether at least one SCMI facility is configured
+ which needs debugfs support. When selected causess the creation
+ of a common SCMI debugfs root directory.
+
+config ARM_SCMI_RAW_MODE_SUPPORT
+ bool "Enable support for SCMI Raw transmission mode"
+ depends on DEBUG_FS
+ select ARM_SCMI_NEED_DEBUGFS
+ help
+ Enable support for SCMI Raw transmission mode.
+
+ If enabled allows the direct injection and snooping of SCMI bare
+ messages through a dedicated debugfs interface.
+ It is meant to be used by SCMI compliance/testing suites.
+
+ When enabled regular SCMI drivers interactions are inhibited in
+ order to avoid unexpected interactions with the SCMI Raw message
+ flow. If unsure say N.
+
+config ARM_SCMI_RAW_MODE_SUPPORT_COEX
+ bool "Allow SCMI Raw mode coexistence with normal SCMI stack"
+ depends on ARM_SCMI_RAW_MODE_SUPPORT
+ help
+ Allow SCMI Raw transmission mode to coexist with normal SCMI stack.
+
+ This will allow regular SCMI drivers to register with the core and
+ operate normally, thing which could make an SCMI test suite using the
+ SCMI Raw mode support unreliable. If unsure, say N.
+
config ARM_SCMI_HAVE_TRANSPORT
bool
help
diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile
index 9ea86f8cc8f7..b31d78fa66cc 100644
--- a/drivers/firmware/arm_scmi/Makefile
+++ b/drivers/firmware/arm_scmi/Makefile
@@ -1,6 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
scmi-bus-y = bus.o
+scmi-core-objs := $(scmi-bus-y)
+
scmi-driver-y = driver.o notify.o
+scmi-driver-$(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT) += raw_mode.o
scmi-transport-$(CONFIG_ARM_SCMI_HAVE_SHMEM) = shmem.o
scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_MAILBOX) += mailbox.o
scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_SMC) += smc.o
@@ -8,9 +11,11 @@ scmi-transport-$(CONFIG_ARM_SCMI_HAVE_MSG) += msg.o
scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO) += virtio.o
scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_OPTEE) += optee.o
scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o powercap.o
-scmi-module-objs := $(scmi-bus-y) $(scmi-driver-y) $(scmi-protocols-y) \
- $(scmi-transport-y)
+scmi-module-objs := $(scmi-driver-y) $(scmi-protocols-y) $(scmi-transport-y)
+
+obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-core.o
obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-module.o
+
obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o
obj-$(CONFIG_ARM_SCMI_POWER_CONTROL) += scmi_power_control.o
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index 35bb70724d44..73140b854b31 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -7,17 +7,185 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/atomic.h>
#include <linux/types.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/device.h>
+#include <linux/of.h>
#include "common.h"
+BLOCKING_NOTIFIER_HEAD(scmi_requested_devices_nh);
+EXPORT_SYMBOL_GPL(scmi_requested_devices_nh);
+
static DEFINE_IDA(scmi_bus_id);
-static DEFINE_IDR(scmi_protocols);
-static DEFINE_SPINLOCK(protocol_lock);
+
+static DEFINE_IDR(scmi_requested_devices);
+/* Protect access to scmi_requested_devices */
+static DEFINE_MUTEX(scmi_requested_devices_mtx);
+
+struct scmi_requested_dev {
+ const struct scmi_device_id *id_table;
+ struct list_head node;
+};
+
+/* Track globally the creation of SCMI SystemPower related devices */
+static atomic_t scmi_syspower_registered = ATOMIC_INIT(0);
+
+/**
+ * scmi_protocol_device_request - Helper to request a device
+ *
+ * @id_table: A protocol/name pair descriptor for the device to be created.
+ *
+ * This helper let an SCMI driver request specific devices identified by the
+ * @id_table to be created for each active SCMI instance.
+ *
+ * The requested device name MUST NOT be already existent for any protocol;
+ * at first the freshly requested @id_table is annotated in the IDR table
+ * @scmi_requested_devices and then the requested device is advertised to any
+ * registered party via the @scmi_requested_devices_nh notification chain.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_protocol_device_request(const struct scmi_device_id *id_table)
+{
+ int ret = 0;
+ unsigned int id = 0;
+ struct list_head *head, *phead = NULL;
+ struct scmi_requested_dev *rdev;
+
+ pr_debug("Requesting SCMI device (%s) for protocol %x\n",
+ id_table->name, id_table->protocol_id);
+
+ if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT) &&
+ !IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX)) {
+ pr_warn("SCMI Raw mode active. Rejecting '%s'/0x%02X\n",
+ id_table->name, id_table->protocol_id);
+ return -EINVAL;
+ }
+
+ /*
+ * Search for the matching protocol rdev list and then search
+ * of any existent equally named device...fails if any duplicate found.
+ */
+ mutex_lock(&scmi_requested_devices_mtx);
+ idr_for_each_entry(&scmi_requested_devices, head, id) {
+ if (!phead) {
+ /* A list found registered in the IDR is never empty */
+ rdev = list_first_entry(head, struct scmi_requested_dev,
+ node);
+ if (rdev->id_table->protocol_id ==
+ id_table->protocol_id)
+ phead = head;
+ }
+ list_for_each_entry(rdev, head, node) {
+ if (!strcmp(rdev->id_table->name, id_table->name)) {
+ pr_err("Ignoring duplicate request [%d] %s\n",
+ rdev->id_table->protocol_id,
+ rdev->id_table->name);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+ }
+
+ /*
+ * No duplicate found for requested id_table, so let's create a new
+ * requested device entry for this new valid request.
+ */
+ rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
+ if (!rdev) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ rdev->id_table = id_table;
+
+ /*
+ * Append the new requested device table descriptor to the head of the
+ * related protocol list, eventually creating such head if not already
+ * there.
+ */
+ if (!phead) {
+ phead = kzalloc(sizeof(*phead), GFP_KERNEL);
+ if (!phead) {
+ kfree(rdev);
+ ret = -ENOMEM;
+ goto out;
+ }
+ INIT_LIST_HEAD(phead);
+
+ ret = idr_alloc(&scmi_requested_devices, (void *)phead,
+ id_table->protocol_id,
+ id_table->protocol_id + 1, GFP_KERNEL);
+ if (ret != id_table->protocol_id) {
+ pr_err("Failed to save SCMI device - ret:%d\n", ret);
+ kfree(rdev);
+ kfree(phead);
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = 0;
+ }
+ list_add(&rdev->node, phead);
+
+out:
+ mutex_unlock(&scmi_requested_devices_mtx);
+
+ if (!ret)
+ blocking_notifier_call_chain(&scmi_requested_devices_nh,
+ SCMI_BUS_NOTIFY_DEVICE_REQUEST,
+ (void *)rdev->id_table);
+
+ return ret;
+}
+
+/**
+ * scmi_protocol_device_unrequest - Helper to unrequest a device
+ *
+ * @id_table: A protocol/name pair descriptor for the device to be unrequested.
+ *
+ * The unrequested device, described by the provided id_table, is at first
+ * removed from the IDR @scmi_requested_devices and then the removal is
+ * advertised to any registered party via the @scmi_requested_devices_nh
+ * notification chain.
+ */
+static void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table)
+{
+ struct list_head *phead;
+
+ pr_debug("Unrequesting SCMI device (%s) for protocol %x\n",
+ id_table->name, id_table->protocol_id);
+
+ mutex_lock(&scmi_requested_devices_mtx);
+ phead = idr_find(&scmi_requested_devices, id_table->protocol_id);
+ if (phead) {
+ struct scmi_requested_dev *victim, *tmp;
+
+ list_for_each_entry_safe(victim, tmp, phead, node) {
+ if (!strcmp(victim->id_table->name, id_table->name)) {
+ list_del(&victim->node);
+
+ mutex_unlock(&scmi_requested_devices_mtx);
+ blocking_notifier_call_chain(&scmi_requested_devices_nh,
+ SCMI_BUS_NOTIFY_DEVICE_UNREQUEST,
+ (void *)victim->id_table);
+ kfree(victim);
+ mutex_lock(&scmi_requested_devices_mtx);
+ break;
+ }
+ }
+
+ if (list_empty(phead)) {
+ idr_remove(&scmi_requested_devices,
+ id_table->protocol_id);
+ kfree(phead);
+ }
+ }
+ mutex_unlock(&scmi_requested_devices_mtx);
+}
static const struct scmi_device_id *
scmi_dev_match_id(struct scmi_device *scmi_dev, struct scmi_driver *scmi_drv)
@@ -57,11 +225,11 @@ static int scmi_match_by_id_table(struct device *dev, void *data)
struct scmi_device_id *id_table = data;
return sdev->protocol_id == id_table->protocol_id &&
- !strcmp(sdev->name, id_table->name);
+ (id_table->name && !strcmp(sdev->name, id_table->name));
}
-struct scmi_device *scmi_child_dev_find(struct device *parent,
- int prot_id, const char *name)
+static struct scmi_device *scmi_child_dev_find(struct device *parent,
+ int prot_id, const char *name)
{
struct scmi_device_id id_table;
struct device *dev;
@@ -76,30 +244,6 @@ struct scmi_device *scmi_child_dev_find(struct device *parent,
return to_scmi_dev(dev);
}
-const struct scmi_protocol *scmi_protocol_get(int protocol_id)
-{
- const struct scmi_protocol *proto;
-
- proto = idr_find(&scmi_protocols, protocol_id);
- if (!proto || !try_module_get(proto->owner)) {
- pr_warn("SCMI Protocol 0x%x not found!\n", protocol_id);
- return NULL;
- }
-
- pr_debug("Found SCMI Protocol 0x%x\n", protocol_id);
-
- return proto;
-}
-
-void scmi_protocol_put(int protocol_id)
-{
- const struct scmi_protocol *proto;
-
- proto = idr_find(&scmi_protocols, protocol_id);
- if (proto)
- module_put(proto->owner);
-}
-
static int scmi_dev_probe(struct device *dev)
{
struct scmi_driver *scmi_drv = to_scmi_driver(dev->driver);
@@ -120,12 +264,13 @@ static void scmi_dev_remove(struct device *dev)
scmi_drv->remove(scmi_dev);
}
-static struct bus_type scmi_bus_type = {
+struct bus_type scmi_bus_type = {
.name = "scmi_protocol",
.match = scmi_dev_match,
.probe = scmi_dev_probe,
.remove = scmi_dev_remove,
};
+EXPORT_SYMBOL_GPL(scmi_bus_type);
int scmi_driver_register(struct scmi_driver *driver, struct module *owner,
const char *mod_name)
@@ -146,7 +291,7 @@ int scmi_driver_register(struct scmi_driver *driver, struct module *owner,
retval = driver_register(&driver->driver);
if (!retval)
- pr_debug("registered new scmi driver %s\n", driver->name);
+ pr_debug("Registered new scmi driver %s\n", driver->name);
return retval;
}
@@ -164,13 +309,53 @@ static void scmi_device_release(struct device *dev)
kfree(to_scmi_dev(dev));
}
-struct scmi_device *
-scmi_device_create(struct device_node *np, struct device *parent, int protocol,
- const char *name)
+static void __scmi_device_destroy(struct scmi_device *scmi_dev)
+{
+ pr_debug("(%s) Destroying SCMI device '%s' for protocol 0x%x (%s)\n",
+ of_node_full_name(scmi_dev->dev.parent->of_node),
+ dev_name(&scmi_dev->dev), scmi_dev->protocol_id,
+ scmi_dev->name);
+
+ if (scmi_dev->protocol_id == SCMI_PROTOCOL_SYSTEM)
+ atomic_set(&scmi_syspower_registered, 0);
+
+ kfree_const(scmi_dev->name);
+ ida_free(&scmi_bus_id, scmi_dev->id);
+ device_unregister(&scmi_dev->dev);
+}
+
+static struct scmi_device *
+__scmi_device_create(struct device_node *np, struct device *parent,
+ int protocol, const char *name)
{
int id, retval;
struct scmi_device *scmi_dev;
+ /*
+ * If the same protocol/name device already exist under the same parent
+ * (i.e. SCMI instance) just return the existent device.
+ * This avoids any race between the SCMI driver, creating devices for
+ * each DT defined protocol at probe time, and the concurrent
+ * registration of SCMI drivers.
+ */
+ scmi_dev = scmi_child_dev_find(parent, protocol, name);
+ if (scmi_dev)
+ return scmi_dev;
+
+ /*
+ * Ignore any possible subsequent failures while creating the device
+ * since we are doomed anyway at that point; not using a mutex which
+ * spans across this whole function to keep things simple and to avoid
+ * to serialize all the __scmi_device_create calls across possibly
+ * different SCMI server instances (parent)
+ */
+ if (protocol == SCMI_PROTOCOL_SYSTEM &&
+ atomic_cmpxchg(&scmi_syspower_registered, 0, 1)) {
+ dev_warn(parent,
+ "SCMI SystemPower protocol device must be unique !\n");
+ return NULL;
+ }
+
scmi_dev = kzalloc(sizeof(*scmi_dev), GFP_KERNEL);
if (!scmi_dev)
return NULL;
@@ -191,7 +376,7 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol,
scmi_dev->id = id;
scmi_dev->protocol_id = protocol;
scmi_dev->dev.parent = parent;
- scmi_dev->dev.of_node = np;
+ device_set_node(&scmi_dev->dev, of_fwnode_handle(np));
scmi_dev->dev.bus = &scmi_bus_type;
scmi_dev->dev.release = scmi_device_release;
dev_set_name(&scmi_dev->dev, "scmi_dev.%d", id);
@@ -200,6 +385,10 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol,
if (retval)
goto put_dev;
+ pr_debug("(%s) Created SCMI device '%s' for protocol 0x%x (%s)\n",
+ of_node_full_name(parent->of_node),
+ dev_name(&scmi_dev->dev), protocol, name);
+
return scmi_dev;
put_dev:
kfree_const(scmi_dev->name);
@@ -208,77 +397,85 @@ put_dev:
return NULL;
}
-void scmi_device_destroy(struct scmi_device *scmi_dev)
-{
- kfree_const(scmi_dev->name);
- scmi_handle_put(scmi_dev->handle);
- ida_free(&scmi_bus_id, scmi_dev->id);
- device_unregister(&scmi_dev->dev);
-}
-
-void scmi_device_link_add(struct device *consumer, struct device *supplier)
-{
- struct device_link *link;
-
- link = device_link_add(consumer, supplier, DL_FLAG_AUTOREMOVE_CONSUMER);
-
- WARN_ON(!link);
-}
-
-void scmi_set_handle(struct scmi_device *scmi_dev)
-{
- scmi_dev->handle = scmi_handle_get(&scmi_dev->dev);
- if (scmi_dev->handle)
- scmi_device_link_add(&scmi_dev->dev, scmi_dev->handle->dev);
-}
-
-int scmi_protocol_register(const struct scmi_protocol *proto)
+/**
+ * scmi_device_create - A method to create one or more SCMI devices
+ *
+ * @np: A reference to the device node to use for the new device(s)
+ * @parent: The parent device to use identifying a specific SCMI instance
+ * @protocol: The SCMI protocol to be associated with this device
+ * @name: The requested-name of the device to be created; this is optional
+ * and if no @name is provided, all the devices currently known to
+ * be requested on the SCMI bus for @protocol will be created.
+ *
+ * This method can be invoked to create a single well-defined device (like
+ * a transport device or a device requested by an SCMI driver loaded after
+ * the core SCMI stack has been probed), or to create all the devices currently
+ * known to have been requested by the loaded SCMI drivers for a specific
+ * protocol (typically during SCMI core protocol enumeration at probe time).
+ *
+ * Return: The created device (or one of them if @name was NOT provided and
+ * multiple devices were created) or NULL if no device was created;
+ * note that NULL indicates an error ONLY in case a specific @name
+ * was provided: when @name param was not provided, a number of devices
+ * could have been potentially created for a whole protocol, unless no
+ * device was found to have been requested for that specific protocol.
+ */
+struct scmi_device *scmi_device_create(struct device_node *np,
+ struct device *parent, int protocol,
+ const char *name)
{
- int ret;
-
- if (!proto) {
- pr_err("invalid protocol\n");
- return -EINVAL;
- }
-
- if (!proto->instance_init) {
- pr_err("missing init for protocol 0x%x\n", proto->id);
- return -EINVAL;
+ struct list_head *phead;
+ struct scmi_requested_dev *rdev;
+ struct scmi_device *scmi_dev = NULL;
+
+ if (name)
+ return __scmi_device_create(np, parent, protocol, name);
+
+ mutex_lock(&scmi_requested_devices_mtx);
+ phead = idr_find(&scmi_requested_devices, protocol);
+ /* Nothing to do. */
+ if (!phead) {
+ mutex_unlock(&scmi_requested_devices_mtx);
+ return scmi_dev;
}
- spin_lock(&protocol_lock);
- ret = idr_alloc(&scmi_protocols, (void *)proto,
- proto->id, proto->id + 1, GFP_ATOMIC);
- spin_unlock(&protocol_lock);
- if (ret != proto->id) {
- pr_err("unable to allocate SCMI idr slot for 0x%x - err %d\n",
- proto->id, ret);
- return ret;
+ /* Walk the list of requested devices for protocol and create them */
+ list_for_each_entry(rdev, phead, node) {
+ struct scmi_device *sdev;
+
+ sdev = __scmi_device_create(np, parent,
+ rdev->id_table->protocol_id,
+ rdev->id_table->name);
+ /* Report errors and carry on... */
+ if (sdev)
+ scmi_dev = sdev;
+ else
+ pr_err("(%s) Failed to create device for protocol 0x%x (%s)\n",
+ of_node_full_name(parent->of_node),
+ rdev->id_table->protocol_id,
+ rdev->id_table->name);
}
+ mutex_unlock(&scmi_requested_devices_mtx);
- pr_debug("Registered SCMI Protocol 0x%x\n", proto->id);
-
- return 0;
+ return scmi_dev;
}
-EXPORT_SYMBOL_GPL(scmi_protocol_register);
+EXPORT_SYMBOL_GPL(scmi_device_create);
-void scmi_protocol_unregister(const struct scmi_protocol *proto)
+void scmi_device_destroy(struct device *parent, int protocol, const char *name)
{
- spin_lock(&protocol_lock);
- idr_remove(&scmi_protocols, proto->id);
- spin_unlock(&protocol_lock);
-
- pr_debug("Unregistered SCMI Protocol 0x%x\n", proto->id);
+ struct scmi_device *scmi_dev;
- return;
+ scmi_dev = scmi_child_dev_find(parent, protocol, name);
+ if (scmi_dev)
+ __scmi_device_destroy(scmi_dev);
}
-EXPORT_SYMBOL_GPL(scmi_protocol_unregister);
+EXPORT_SYMBOL_GPL(scmi_device_destroy);
static int __scmi_devices_unregister(struct device *dev, void *data)
{
struct scmi_device *scmi_dev = to_scmi_dev(dev);
- scmi_device_destroy(scmi_dev);
+ __scmi_device_destroy(scmi_dev);
return 0;
}
@@ -287,20 +484,33 @@ static void scmi_devices_unregister(void)
bus_for_each_dev(&scmi_bus_type, NULL, NULL, __scmi_devices_unregister);
}
-int __init scmi_bus_init(void)
+static int __init scmi_bus_init(void)
{
int retval;
retval = bus_register(&scmi_bus_type);
if (retval)
- pr_err("scmi protocol bus register failed (%d)\n", retval);
+ pr_err("SCMI protocol bus register failed (%d)\n", retval);
+
+ pr_info("SCMI protocol bus registered\n");
return retval;
}
+subsys_initcall(scmi_bus_init);
-void __exit scmi_bus_exit(void)
+static void __exit scmi_bus_exit(void)
{
+ /*
+ * Destroy all remaining devices: just in case the drivers were
+ * manually unbound and at first and then the modules unloaded.
+ */
scmi_devices_unregister();
bus_unregister(&scmi_bus_type);
ida_destroy(&scmi_bus_id);
}
+module_exit(scmi_bus_exit);
+
+MODULE_ALIAS("scmi-core");
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+MODULE_DESCRIPTION("ARM SCMI protocol bus");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index a1c0154c31c6..c46dc5215af7 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -27,6 +27,48 @@
#include "protocols.h"
#include "notify.h"
+#define SCMI_MAX_CHANNELS 256
+
+#define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
+
+enum scmi_error_codes {
+ SCMI_SUCCESS = 0, /* Success */
+ SCMI_ERR_SUPPORT = -1, /* Not supported */
+ SCMI_ERR_PARAMS = -2, /* Invalid Parameters */
+ SCMI_ERR_ACCESS = -3, /* Invalid access/permission denied */
+ SCMI_ERR_ENTRY = -4, /* Not found */
+ SCMI_ERR_RANGE = -5, /* Value out of range */
+ SCMI_ERR_BUSY = -6, /* Device busy */
+ SCMI_ERR_COMMS = -7, /* Communication Error */
+ SCMI_ERR_GENERIC = -8, /* Generic Error */
+ SCMI_ERR_HARDWARE = -9, /* Hardware Error */
+ SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
+};
+
+static const int scmi_linux_errmap[] = {
+ /* better than switch case as long as return value is continuous */
+ 0, /* SCMI_SUCCESS */
+ -EOPNOTSUPP, /* SCMI_ERR_SUPPORT */
+ -EINVAL, /* SCMI_ERR_PARAM */
+ -EACCES, /* SCMI_ERR_ACCESS */
+ -ENOENT, /* SCMI_ERR_ENTRY */
+ -ERANGE, /* SCMI_ERR_RANGE */
+ -EBUSY, /* SCMI_ERR_BUSY */
+ -ECOMM, /* SCMI_ERR_COMMS */
+ -EIO, /* SCMI_ERR_GENERIC */
+ -EREMOTEIO, /* SCMI_ERR_HARDWARE */
+ -EPROTO, /* SCMI_ERR_PROTOCOL */
+};
+
+static inline int scmi_to_linux_errno(int errno)
+{
+ int err_idx = -errno;
+
+ if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap))
+ return scmi_linux_errmap[err_idx];
+ return -EIO;
+}
+
#define MSG_ID_MASK GENMASK(7, 0)
#define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr))
#define MSG_TYPE_MASK GENMASK(9, 8)
@@ -96,18 +138,19 @@ static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr)
struct scmi_revision_info *
scmi_revision_area_get(const struct scmi_protocol_handle *ph);
-int scmi_handle_put(const struct scmi_handle *handle);
-void scmi_device_link_add(struct device *consumer, struct device *supplier);
-struct scmi_handle *scmi_handle_get(struct device *dev);
-void scmi_set_handle(struct scmi_device *scmi_dev);
void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
u8 *prot_imp);
-int __init scmi_bus_init(void);
-void __exit scmi_bus_exit(void);
+extern struct bus_type scmi_bus_type;
+
+#define SCMI_BUS_NOTIFY_DEVICE_REQUEST 0
+#define SCMI_BUS_NOTIFY_DEVICE_UNREQUEST 1
+extern struct blocking_notifier_head scmi_requested_devices_nh;
-const struct scmi_protocol *scmi_protocol_get(int protocol_id);
-void scmi_protocol_put(int protocol_id);
+struct scmi_device *scmi_device_create(struct device_node *np,
+ struct device *parent, int protocol,
+ const char *name);
+void scmi_device_destroy(struct device *parent, int protocol, const char *name);
int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id);
void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id);
@@ -116,6 +159,8 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id);
/**
* struct scmi_chan_info - Structure representing a SCMI channel information
*
+ * @id: An identifier for this channel: this matches the protocol number
+ * used to initialize this channel
* @dev: Reference to device in the SCMI hierarchy corresponding to this
* channel
* @rx_timeout_ms: The configured RX timeout in milliseconds.
@@ -127,6 +172,7 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id);
* @transport_info: Transport layer related information
*/
struct scmi_chan_info {
+ int id;
struct device *dev;
unsigned int rx_timeout_ms;
struct scmi_handle *handle;
@@ -153,7 +199,7 @@ struct scmi_chan_info {
*/
struct scmi_transport_ops {
int (*link_supplier)(struct device *dev);
- bool (*chan_available)(struct device *dev, int idx);
+ bool (*chan_available)(struct device_node *of_node, int idx);
int (*chan_setup)(struct scmi_chan_info *cinfo, struct device *dev,
bool tx);
int (*chan_free)(int id, void *p, void *data);
@@ -170,11 +216,6 @@ struct scmi_transport_ops {
bool (*poll_done)(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer);
};
-int scmi_protocol_device_request(const struct scmi_device_id *id_table);
-void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table);
-struct scmi_device *scmi_child_dev_find(struct device *parent,
- int prot_id, const char *name);
-
/**
* struct scmi_desc - Description of SoC integration
*
@@ -215,6 +256,36 @@ struct scmi_desc {
const bool atomic_enabled;
};
+static inline bool is_polling_required(struct scmi_chan_info *cinfo,
+ const struct scmi_desc *desc)
+{
+ return cinfo->no_completion_irq || desc->force_polling;
+}
+
+static inline bool is_transport_polling_capable(const struct scmi_desc *desc)
+{
+ return desc->ops->poll_done || desc->sync_cmds_completed_on_ret;
+}
+
+static inline bool is_polling_enabled(struct scmi_chan_info *cinfo,
+ const struct scmi_desc *desc)
+{
+ return is_polling_required(cinfo, desc) &&
+ is_transport_polling_capable(desc);
+}
+
+void scmi_xfer_raw_put(const struct scmi_handle *handle,
+ struct scmi_xfer *xfer);
+struct scmi_xfer *scmi_xfer_raw_get(const struct scmi_handle *handle);
+struct scmi_chan_info *
+scmi_xfer_raw_channel_get(const struct scmi_handle *handle, u8 protocol_id);
+
+int scmi_xfer_raw_inflight_register(const struct scmi_handle *handle,
+ struct scmi_xfer *xfer);
+
+int scmi_xfer_raw_wait_for_message_response(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer,
+ unsigned int timeout_ms);
#ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX
extern const struct scmi_desc scmi_mailbox_desc;
#endif
@@ -229,7 +300,6 @@ extern const struct scmi_desc scmi_optee_desc;
#endif
void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv);
-void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id);
/* shmem related declarations */
struct scmi_shared_mem;
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index f818d00bb2c6..d21c7eafd641 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -14,7 +14,10 @@
* Copyright (C) 2018-2021 ARM Ltd.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/bitmap.h>
+#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/idr.h>
@@ -34,22 +37,15 @@
#include "common.h"
#include "notify.h"
+#include "raw_mode.h"
+
#define CREATE_TRACE_POINTS
#include <trace/events/scmi.h>
-enum scmi_error_codes {
- SCMI_SUCCESS = 0, /* Success */
- SCMI_ERR_SUPPORT = -1, /* Not supported */
- SCMI_ERR_PARAMS = -2, /* Invalid Parameters */
- SCMI_ERR_ACCESS = -3, /* Invalid access/permission denied */
- SCMI_ERR_ENTRY = -4, /* Not found */
- SCMI_ERR_RANGE = -5, /* Value out of range */
- SCMI_ERR_BUSY = -6, /* Device busy */
- SCMI_ERR_COMMS = -7, /* Communication Error */
- SCMI_ERR_GENERIC = -8, /* Generic Error */
- SCMI_ERR_HARDWARE = -9, /* Hardware Error */
- SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
-};
+static DEFINE_IDA(scmi_id);
+
+static DEFINE_IDR(scmi_protocols);
+static DEFINE_SPINLOCK(protocol_lock);
/* List of all SCMI devices active in system */
static LIST_HEAD(scmi_list);
@@ -58,18 +54,7 @@ static DEFINE_MUTEX(scmi_list_mutex);
/* Track the unique id for the transfers for debug & profiling purpose */
static atomic_t transfer_last_id;
-static DEFINE_IDR(scmi_requested_devices);
-static DEFINE_MUTEX(scmi_requested_devices_mtx);
-
-/* Track globally the creation of SCMI SystemPower related devices */
-static bool scmi_syspower_registered;
-/* Protect access to scmi_syspower_registered */
-static DEFINE_MUTEX(scmi_syspower_mtx);
-
-struct scmi_requested_dev {
- const struct scmi_device_id *id_table;
- struct list_head node;
-};
+static struct dentry *scmi_top_dentry;
/**
* struct scmi_xfers_info - Structure to manage transfer information
@@ -118,8 +103,23 @@ struct scmi_protocol_instance {
#define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph)
/**
+ * struct scmi_debug_info - Debug common info
+ * @top_dentry: A reference to the top debugfs dentry
+ * @name: Name of this SCMI instance
+ * @type: Type of this SCMI instance
+ * @is_atomic: Flag to state if the transport of this instance is atomic
+ */
+struct scmi_debug_info {
+ struct dentry *top_dentry;
+ const char *name;
+ const char *type;
+ bool is_atomic;
+};
+
+/**
* struct scmi_info - Structure representing a SCMI instance
*
+ * @id: A sequence number starting from zero identifying this instance
* @dev: Device pointer
* @desc: SoC description for this instance
* @version: SCMI revision information containing protocol version,
@@ -147,8 +147,15 @@ struct scmi_protocol_instance {
* @notify_priv: Pointer to private data structure specific to notifications.
* @node: List head
* @users: Number of users of this instance
+ * @bus_nb: A notifier to listen for device bind/unbind on the scmi bus
+ * @dev_req_nb: A notifier to listen for device request/unrequest on the scmi
+ * bus
+ * @devreq_mtx: A mutex to serialize device creation for this SCMI instance
+ * @dbg: A pointer to debugfs related data (if any)
+ * @raw: An opaque reference handle used by SCMI Raw mode.
*/
struct scmi_info {
+ int id;
struct device *dev;
const struct scmi_desc *desc;
struct scmi_revision_info version;
@@ -166,32 +173,114 @@ struct scmi_info {
void *notify_priv;
struct list_head node;
int users;
+ struct notifier_block bus_nb;
+ struct notifier_block dev_req_nb;
+ /* Serialize device creation process for this instance */
+ struct mutex devreq_mtx;
+ struct scmi_debug_info *dbg;
+ void *raw;
};
#define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
+#define bus_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, bus_nb)
+#define req_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, dev_req_nb)
-static const int scmi_linux_errmap[] = {
- /* better than switch case as long as return value is continuous */
- 0, /* SCMI_SUCCESS */
- -EOPNOTSUPP, /* SCMI_ERR_SUPPORT */
- -EINVAL, /* SCMI_ERR_PARAM */
- -EACCES, /* SCMI_ERR_ACCESS */
- -ENOENT, /* SCMI_ERR_ENTRY */
- -ERANGE, /* SCMI_ERR_RANGE */
- -EBUSY, /* SCMI_ERR_BUSY */
- -ECOMM, /* SCMI_ERR_COMMS */
- -EIO, /* SCMI_ERR_GENERIC */
- -EREMOTEIO, /* SCMI_ERR_HARDWARE */
- -EPROTO, /* SCMI_ERR_PROTOCOL */
-};
+static const struct scmi_protocol *scmi_protocol_get(int protocol_id)
+{
+ const struct scmi_protocol *proto;
+
+ proto = idr_find(&scmi_protocols, protocol_id);
+ if (!proto || !try_module_get(proto->owner)) {
+ pr_warn("SCMI Protocol 0x%x not found!\n", protocol_id);
+ return NULL;
+ }
+
+ pr_debug("Found SCMI Protocol 0x%x\n", protocol_id);
+
+ return proto;
+}
+
+static void scmi_protocol_put(int protocol_id)
+{
+ const struct scmi_protocol *proto;
+
+ proto = idr_find(&scmi_protocols, protocol_id);
+ if (proto)
+ module_put(proto->owner);
+}
+
+int scmi_protocol_register(const struct scmi_protocol *proto)
+{
+ int ret;
+
+ if (!proto) {
+ pr_err("invalid protocol\n");
+ return -EINVAL;
+ }
+
+ if (!proto->instance_init) {
+ pr_err("missing init for protocol 0x%x\n", proto->id);
+ return -EINVAL;
+ }
+
+ spin_lock(&protocol_lock);
+ ret = idr_alloc(&scmi_protocols, (void *)proto,
+ proto->id, proto->id + 1, GFP_ATOMIC);
+ spin_unlock(&protocol_lock);
+ if (ret != proto->id) {
+ pr_err("unable to allocate SCMI idr slot for 0x%x - err %d\n",
+ proto->id, ret);
+ return ret;
+ }
+
+ pr_debug("Registered SCMI Protocol 0x%x\n", proto->id);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(scmi_protocol_register);
+
+void scmi_protocol_unregister(const struct scmi_protocol *proto)
+{
+ spin_lock(&protocol_lock);
+ idr_remove(&scmi_protocols, proto->id);
+ spin_unlock(&protocol_lock);
-static inline int scmi_to_linux_errno(int errno)
+ pr_debug("Unregistered SCMI Protocol 0x%x\n", proto->id);
+}
+EXPORT_SYMBOL_GPL(scmi_protocol_unregister);
+
+/**
+ * scmi_create_protocol_devices - Create devices for all pending requests for
+ * this SCMI instance.
+ *
+ * @np: The device node describing the protocol
+ * @info: The SCMI instance descriptor
+ * @prot_id: The protocol ID
+ * @name: The optional name of the device to be created: if not provided this
+ * call will lead to the creation of all the devices currently requested
+ * for the specified protocol.
+ */
+static void scmi_create_protocol_devices(struct device_node *np,
+ struct scmi_info *info,
+ int prot_id, const char *name)
{
- int err_idx = -errno;
+ struct scmi_device *sdev;
- if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap))
- return scmi_linux_errmap[err_idx];
- return -EIO;
+ mutex_lock(&info->devreq_mtx);
+ sdev = scmi_device_create(np, info->dev, prot_id, name);
+ if (name && !sdev)
+ dev_err(info->dev,
+ "failed to create device for protocol 0x%X (%s)\n",
+ prot_id, name);
+ mutex_unlock(&info->devreq_mtx);
+}
+
+static void scmi_destroy_protocol_devices(struct scmi_info *info,
+ int prot_id, const char *name)
+{
+ mutex_lock(&info->devreq_mtx);
+ scmi_device_destroy(info->dev, prot_id, name);
+ mutex_unlock(&info->devreq_mtx);
}
void scmi_notification_instance_data_set(const struct scmi_handle *handle,
@@ -311,8 +400,6 @@ static int scmi_xfer_token_set(struct scmi_xfers_info *minfo,
if (xfer_id != next_token)
atomic_add((int)(xfer_id - next_token), &transfer_last_id);
- /* Set in-flight */
- set_bit(xfer_id, minfo->xfer_alloc_table);
xfer->hdr.seq = (u16)xfer_id;
return 0;
@@ -331,32 +418,123 @@ static inline void scmi_xfer_token_clear(struct scmi_xfers_info *minfo,
}
/**
+ * scmi_xfer_inflight_register_unlocked - Register the xfer as in-flight
+ *
+ * @xfer: The xfer to register
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
+ *
+ * Note that this helper assumes that the xfer to be registered as in-flight
+ * had been built using an xfer sequence number which still corresponds to a
+ * free slot in the xfer_alloc_table.
+ *
+ * Context: Assumes to be called with @xfer_lock already acquired.
+ */
+static inline void
+scmi_xfer_inflight_register_unlocked(struct scmi_xfer *xfer,
+ struct scmi_xfers_info *minfo)
+{
+ /* Set in-flight */
+ set_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
+ hash_add(minfo->pending_xfers, &xfer->node, xfer->hdr.seq);
+ xfer->pending = true;
+}
+
+/**
+ * scmi_xfer_inflight_register - Try to register an xfer as in-flight
+ *
+ * @xfer: The xfer to register
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
+ *
+ * Note that this helper does NOT assume anything about the sequence number
+ * that was baked into the provided xfer, so it checks at first if it can
+ * be mapped to a free slot and fails with an error if another xfer with the
+ * same sequence number is currently still registered as in-flight.
+ *
+ * Return: 0 on Success or -EBUSY if sequence number embedded in the xfer
+ * could not rbe mapped to a free slot in the xfer_alloc_table.
+ */
+static int scmi_xfer_inflight_register(struct scmi_xfer *xfer,
+ struct scmi_xfers_info *minfo)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&minfo->xfer_lock, flags);
+ if (!test_bit(xfer->hdr.seq, minfo->xfer_alloc_table))
+ scmi_xfer_inflight_register_unlocked(xfer, minfo);
+ else
+ ret = -EBUSY;
+ spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+
+ return ret;
+}
+
+/**
+ * scmi_xfer_raw_inflight_register - An helper to register the given xfer as in
+ * flight on the TX channel, if possible.
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @xfer: The xfer to register
+ *
+ * Return: 0 on Success, error otherwise
+ */
+int scmi_xfer_raw_inflight_register(const struct scmi_handle *handle,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ return scmi_xfer_inflight_register(xfer, &info->tx_minfo);
+}
+
+/**
+ * scmi_xfer_pending_set - Pick a proper sequence number and mark the xfer
+ * as pending in-flight
+ *
+ * @xfer: The xfer to act upon
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
+ *
+ * Return: 0 on Success or error otherwise
+ */
+static inline int scmi_xfer_pending_set(struct scmi_xfer *xfer,
+ struct scmi_xfers_info *minfo)
+{
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&minfo->xfer_lock, flags);
+ /* Set a new monotonic token as the xfer sequence number */
+ ret = scmi_xfer_token_set(minfo, xfer);
+ if (!ret)
+ scmi_xfer_inflight_register_unlocked(xfer, minfo);
+ spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+
+ return ret;
+}
+
+/**
* scmi_xfer_get() - Allocate one message
*
* @handle: Pointer to SCMI entity handle
* @minfo: Pointer to Tx/Rx Message management info based on channel type
- * @set_pending: If true a monotonic token is picked and the xfer is added to
- * the pending hash table.
*
* Helper function which is used by various message functions that are
* exposed to clients of this driver for allocating a message traffic event.
*
- * Picks an xfer from the free list @free_xfers (if any available) and, if
- * required, sets a monotonically increasing token and stores the inflight xfer
- * into the @pending_xfers hashtable for later retrieval.
+ * Picks an xfer from the free list @free_xfers (if any available) and perform
+ * a basic initialization.
+ *
+ * Note that, at this point, still no sequence number is assigned to the
+ * allocated xfer, nor it is registered as a pending transaction.
*
* The successfully initialized xfer is refcounted.
*
- * Context: Holds @xfer_lock while manipulating @xfer_alloc_table and
- * @free_xfers.
+ * Context: Holds @xfer_lock while manipulating @free_xfers.
*
- * Return: 0 if all went fine, else corresponding error.
+ * Return: An initialized xfer if all went fine, else pointer error.
*/
static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
- struct scmi_xfers_info *minfo,
- bool set_pending)
+ struct scmi_xfers_info *minfo)
{
- int ret;
unsigned long flags;
struct scmi_xfer *xfer;
@@ -376,31 +554,71 @@ static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
*/
xfer->transfer_id = atomic_inc_return(&transfer_last_id);
- if (set_pending) {
- /* Pick and set monotonic token */
- ret = scmi_xfer_token_set(minfo, xfer);
- if (!ret) {
- hash_add(minfo->pending_xfers, &xfer->node,
- xfer->hdr.seq);
- xfer->pending = true;
- } else {
- dev_err(handle->dev,
- "Failed to get monotonic token %d\n", ret);
- hlist_add_head(&xfer->node, &minfo->free_xfers);
- xfer = ERR_PTR(ret);
- }
- }
-
- if (!IS_ERR(xfer)) {
- refcount_set(&xfer->users, 1);
- atomic_set(&xfer->busy, SCMI_XFER_FREE);
- }
+ refcount_set(&xfer->users, 1);
+ atomic_set(&xfer->busy, SCMI_XFER_FREE);
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
return xfer;
}
/**
+ * scmi_xfer_raw_get - Helper to get a bare free xfer from the TX channel
+ *
+ * @handle: Pointer to SCMI entity handle
+ *
+ * Note that xfer is taken from the TX channel structures.
+ *
+ * Return: A valid xfer on Success, or an error-pointer otherwise
+ */
+struct scmi_xfer *scmi_xfer_raw_get(const struct scmi_handle *handle)
+{
+ struct scmi_xfer *xfer;
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ xfer = scmi_xfer_get(handle, &info->tx_minfo);
+ if (!IS_ERR(xfer))
+ xfer->flags |= SCMI_XFER_FLAG_IS_RAW;
+
+ return xfer;
+}
+
+/**
+ * scmi_xfer_raw_channel_get - Helper to get a reference to the proper channel
+ * to use for a specific protocol_id Raw transaction.
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @protocol_id: Identifier of the protocol
+ *
+ * Note that in a regular SCMI stack, usually, a protocol has to be defined in
+ * the DT to have an associated channel and be usable; but in Raw mode any
+ * protocol in range is allowed, re-using the Base channel, so as to enable
+ * fuzzing on any protocol without the need of a fully compiled DT.
+ *
+ * Return: A reference to the channel to use, or an ERR_PTR
+ */
+struct scmi_chan_info *
+scmi_xfer_raw_channel_get(const struct scmi_handle *handle, u8 protocol_id)
+{
+ struct scmi_chan_info *cinfo;
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ cinfo = idr_find(&info->tx_idr, protocol_id);
+ if (!cinfo) {
+ if (protocol_id == SCMI_PROTOCOL_BASE)
+ return ERR_PTR(-EINVAL);
+ /* Use Base channel for protocols not defined for DT */
+ cinfo = idr_find(&info->tx_idr, SCMI_PROTOCOL_BASE);
+ if (!cinfo)
+ return ERR_PTR(-EINVAL);
+ dev_warn_once(handle->dev,
+ "Using Base channel for protocol 0x%X\n",
+ protocol_id);
+ }
+
+ return cinfo;
+}
+
+/**
* __scmi_xfer_put() - Release a message
*
* @minfo: Pointer to Tx/Rx Message management info based on channel type
@@ -429,6 +647,24 @@ __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
}
/**
+ * scmi_xfer_raw_put - Release an xfer that was taken by @scmi_xfer_raw_get
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @xfer: A reference to the xfer to put
+ *
+ * Note that as with other xfer_put() handlers the xfer is really effectively
+ * released only if there are no more users on the system.
+ */
+void scmi_xfer_raw_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
+{
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ xfer->flags &= ~SCMI_XFER_FLAG_IS_RAW;
+ xfer->flags &= ~SCMI_XFER_FLAG_CHAN_SET;
+ return __scmi_xfer_put(&info->tx_minfo, xfer);
+}
+
+/**
* scmi_xfer_lookup_unlocked - Helper to lookup an xfer_id
*
* @minfo: Pointer to Tx/Rx Message management info based on channel type
@@ -623,25 +859,6 @@ static inline void scmi_clear_channel(struct scmi_info *info,
info->desc->ops->clear_channel(cinfo);
}
-static inline bool is_polling_required(struct scmi_chan_info *cinfo,
- struct scmi_info *info)
-{
- return cinfo->no_completion_irq || info->desc->force_polling;
-}
-
-static inline bool is_transport_polling_capable(struct scmi_info *info)
-{
- return info->desc->ops->poll_done ||
- info->desc->sync_cmds_completed_on_ret;
-}
-
-static inline bool is_polling_enabled(struct scmi_chan_info *cinfo,
- struct scmi_info *info)
-{
- return is_polling_required(cinfo, info) &&
- is_transport_polling_capable(info);
-}
-
static void scmi_handle_notification(struct scmi_chan_info *cinfo,
u32 msg_hdr, void *priv)
{
@@ -652,7 +869,7 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo,
ktime_t ts;
ts = ktime_get_boottime();
- xfer = scmi_xfer_get(cinfo->handle, minfo, false);
+ xfer = scmi_xfer_get(cinfo->handle, minfo);
if (IS_ERR(xfer)) {
dev_err(dev, "failed to get free message slot (%ld)\n",
PTR_ERR(xfer));
@@ -667,9 +884,9 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo,
info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
xfer);
- trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, "NOTI",
- xfer->hdr.seq, xfer->hdr.status,
- xfer->rx.buf, xfer->rx.len);
+ trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
+ xfer->hdr.id, "NOTI", xfer->hdr.seq,
+ xfer->hdr.status, xfer->rx.buf, xfer->rx.len);
scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
@@ -678,6 +895,12 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo,
xfer->hdr.protocol_id, xfer->hdr.seq,
MSG_TYPE_NOTIFICATION);
+ if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
+ xfer->hdr.seq = MSG_XTRACT_TOKEN(msg_hdr);
+ scmi_raw_message_report(info->raw, xfer, SCMI_RAW_NOTIF_QUEUE,
+ cinfo->id);
+ }
+
__scmi_xfer_put(minfo, xfer);
scmi_clear_channel(info, cinfo);
@@ -691,6 +914,9 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
xfer = scmi_xfer_command_acquire(cinfo, msg_hdr);
if (IS_ERR(xfer)) {
+ if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
+ scmi_raw_error_report(info->raw, cinfo, msg_hdr, priv);
+
if (MSG_XTRACT_TYPE(msg_hdr) == MSG_TYPE_DELAYED_RESP)
scmi_clear_channel(info, cinfo);
return;
@@ -705,9 +931,11 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
smp_store_mb(xfer->priv, priv);
info->desc->ops->fetch_response(cinfo, xfer);
- trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id,
+ trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
+ xfer->hdr.id,
xfer->hdr.type == MSG_TYPE_DELAYED_RESP ?
- "DLYD" : "RESP",
+ (!SCMI_XFER_IS_RAW(xfer) ? "DLYD" : "dlyd") :
+ (!SCMI_XFER_IS_RAW(xfer) ? "RESP" : "resp"),
xfer->hdr.seq, xfer->hdr.status,
xfer->rx.buf, xfer->rx.len);
@@ -722,6 +950,18 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
complete(&xfer->done);
}
+ if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
+ /*
+ * When in polling mode avoid to queue the Raw xfer on the IRQ
+ * RX path since it will be already queued at the end of the TX
+ * poll loop.
+ */
+ if (!xfer->hdr.poll_completion)
+ scmi_raw_message_report(info->raw, xfer,
+ SCMI_RAW_REPLY_QUEUE,
+ cinfo->id);
+ }
+
scmi_xfer_command_release(info, xfer);
}
@@ -785,36 +1025,18 @@ static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
ktime_after(ktime_get(), stop);
}
-/**
- * scmi_wait_for_message_response - An helper to group all the possible ways of
- * waiting for a synchronous message response.
- *
- * @cinfo: SCMI channel info
- * @xfer: Reference to the transfer being waited for.
- *
- * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on
- * configuration flags like xfer->hdr.poll_completion.
- *
- * Return: 0 on Success, error otherwise.
- */
-static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
- struct scmi_xfer *xfer)
+static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
+ struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer, unsigned int timeout_ms)
{
- struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
- struct device *dev = info->dev;
- int ret = 0, timeout_ms = info->desc->max_rx_timeout_ms;
-
- trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id,
- xfer->hdr.protocol_id, xfer->hdr.seq,
- timeout_ms,
- xfer->hdr.poll_completion);
+ int ret = 0;
if (xfer->hdr.poll_completion) {
/*
* Real polling is needed only if transport has NOT declared
* itself to support synchronous commands replies.
*/
- if (!info->desc->sync_cmds_completed_on_ret) {
+ if (!desc->sync_cmds_completed_on_ret) {
/*
* Poll on xfer using transport provided .poll_done();
* assumes no completion interrupt was available.
@@ -833,6 +1055,8 @@ static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
if (!ret) {
unsigned long flags;
+ struct scmi_info *info =
+ handle_to_scmi_info(cinfo->handle);
/*
* Do not fetch_response if an out-of-order delayed
@@ -840,16 +1064,27 @@ static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
*/
spin_lock_irqsave(&xfer->lock, flags);
if (xfer->state == SCMI_XFER_SENT_OK) {
- info->desc->ops->fetch_response(cinfo, xfer);
+ desc->ops->fetch_response(cinfo, xfer);
xfer->state = SCMI_XFER_RESP_OK;
}
spin_unlock_irqrestore(&xfer->lock, flags);
/* Trace polled replies. */
- trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id,
- "RESP",
+ trace_scmi_msg_dump(info->id, cinfo->id,
+ xfer->hdr.protocol_id, xfer->hdr.id,
+ !SCMI_XFER_IS_RAW(xfer) ?
+ "RESP" : "resp",
xfer->hdr.seq, xfer->hdr.status,
xfer->rx.buf, xfer->rx.len);
+
+ if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
+ struct scmi_info *info =
+ handle_to_scmi_info(cinfo->handle);
+
+ scmi_raw_message_report(info->raw, xfer,
+ SCMI_RAW_REPLY_QUEUE,
+ cinfo->id);
+ }
}
} else {
/* And we wait for the response. */
@@ -865,6 +1100,59 @@ static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
}
/**
+ * scmi_wait_for_message_response - An helper to group all the possible ways of
+ * waiting for a synchronous message response.
+ *
+ * @cinfo: SCMI channel info
+ * @xfer: Reference to the transfer being waited for.
+ *
+ * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on
+ * configuration flags like xfer->hdr.poll_completion.
+ *
+ * Return: 0 on Success, error otherwise.
+ */
+static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+ struct device *dev = info->dev;
+
+ trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id,
+ xfer->hdr.protocol_id, xfer->hdr.seq,
+ info->desc->max_rx_timeout_ms,
+ xfer->hdr.poll_completion);
+
+ return scmi_wait_for_reply(dev, info->desc, cinfo, xfer,
+ info->desc->max_rx_timeout_ms);
+}
+
+/**
+ * scmi_xfer_raw_wait_for_message_response - An helper to wait for a message
+ * reply to an xfer raw request on a specific channel for the required timeout.
+ *
+ * @cinfo: SCMI channel info
+ * @xfer: Reference to the transfer being waited for.
+ * @timeout_ms: The maximum timeout in milliseconds
+ *
+ * Return: 0 on Success, error otherwise.
+ */
+int scmi_xfer_raw_wait_for_message_response(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer,
+ unsigned int timeout_ms)
+{
+ int ret;
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+ struct device *dev = info->dev;
+
+ ret = scmi_wait_for_reply(dev, info->desc, cinfo, xfer, timeout_ms);
+ if (ret)
+ dev_dbg(dev, "timed out in RAW response - HDR:%08X\n",
+ pack_scmi_header(&xfer->hdr));
+
+ return ret;
+}
+
+/**
* do_xfer() - Do one transfer
*
* @ph: Pointer to SCMI protocol handle
@@ -884,7 +1172,8 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
struct scmi_chan_info *cinfo;
/* Check for polling request on custom command xfers at first */
- if (xfer->hdr.poll_completion && !is_transport_polling_capable(info)) {
+ if (xfer->hdr.poll_completion &&
+ !is_transport_polling_capable(info->desc)) {
dev_warn_once(dev,
"Polling mode is not supported by transport.\n");
return -EINVAL;
@@ -895,7 +1184,7 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
return -EINVAL;
/* True ONLY if also supported by transport. */
- if (is_polling_enabled(cinfo, info))
+ if (is_polling_enabled(cinfo, info->desc))
xfer->hdr.poll_completion = true;
/*
@@ -910,6 +1199,8 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
xfer->hdr.protocol_id, xfer->hdr.seq,
xfer->hdr.poll_completion);
+ /* Clear any stale status */
+ xfer->hdr.status = SCMI_SUCCESS;
xfer->state = SCMI_XFER_SENT_OK;
/*
* Even though spinlocking is not needed here since no race is possible
@@ -926,9 +1217,9 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
return ret;
}
- trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, "CMND",
- xfer->hdr.seq, xfer->hdr.status,
- xfer->tx.buf, xfer->tx.len);
+ trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
+ xfer->hdr.id, "CMND", xfer->hdr.seq,
+ xfer->hdr.status, xfer->tx.buf, xfer->tx.len);
ret = scmi_wait_for_message_response(cinfo, xfer);
if (!ret && xfer->hdr.status)
@@ -952,8 +1243,6 @@ static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph,
xfer->rx.len = info->desc->max_msg_size;
}
-#define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
-
/**
* do_xfer_with_response() - Do one transfer and wait until the delayed
* response is received
@@ -1041,13 +1330,22 @@ static int xfer_get_init(const struct scmi_protocol_handle *ph,
tx_size > info->desc->max_msg_size)
return -ERANGE;
- xfer = scmi_xfer_get(pi->handle, minfo, true);
+ xfer = scmi_xfer_get(pi->handle, minfo);
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "failed to get free message slot(%d)\n", ret);
return ret;
}
+ /* Pick a sequence number and register this xfer as in-flight */
+ ret = scmi_xfer_pending_set(xfer, minfo);
+ if (ret) {
+ dev_err(pi->handle->dev,
+ "Failed to get monotonic token %d\n", ret);
+ __scmi_xfer_put(minfo, xfer);
+ return ret;
+ }
+
xfer->tx.len = tx_size;
xfer->rx.len = rx_size ? : info->desc->max_msg_size;
xfer->hdr.type = MSG_TYPE_COMMAND;
@@ -1820,20 +2118,14 @@ static bool scmi_is_transport_atomic(const struct scmi_handle *handle,
bool ret;
struct scmi_info *info = handle_to_scmi_info(handle);
- ret = info->desc->atomic_enabled && is_transport_polling_capable(info);
+ ret = info->desc->atomic_enabled &&
+ is_transport_polling_capable(info->desc);
if (ret && atomic_threshold)
*atomic_threshold = info->atomic_threshold;
return ret;
}
-static inline
-struct scmi_handle *scmi_handle_get_from_info_unlocked(struct scmi_info *info)
-{
- info->users++;
- return &info->handle;
-}
-
/**
* scmi_handle_get() - Get the SCMI handle for a device
*
@@ -1845,7 +2137,7 @@ struct scmi_handle *scmi_handle_get_from_info_unlocked(struct scmi_info *info)
*
* Return: pointer to handle if successful, NULL on error
*/
-struct scmi_handle *scmi_handle_get(struct device *dev)
+static struct scmi_handle *scmi_handle_get(struct device *dev)
{
struct list_head *p;
struct scmi_info *info;
@@ -1855,7 +2147,8 @@ struct scmi_handle *scmi_handle_get(struct device *dev)
list_for_each(p, &scmi_list) {
info = list_entry(p, struct scmi_info, node);
if (dev->parent == info->dev) {
- handle = scmi_handle_get_from_info_unlocked(info);
+ info->users++;
+ handle = &info->handle;
break;
}
}
@@ -1876,7 +2169,7 @@ struct scmi_handle *scmi_handle_get(struct device *dev)
* Return: 0 is successfully released
* if null was passed, it returns -EINVAL;
*/
-int scmi_handle_put(const struct scmi_handle *handle)
+static int scmi_handle_put(const struct scmi_handle *handle)
{
struct scmi_info *info;
@@ -1892,6 +2185,23 @@ int scmi_handle_put(const struct scmi_handle *handle)
return 0;
}
+static void scmi_device_link_add(struct device *consumer,
+ struct device *supplier)
+{
+ struct device_link *link;
+
+ link = device_link_add(consumer, supplier, DL_FLAG_AUTOREMOVE_CONSUMER);
+
+ WARN_ON(!link);
+}
+
+static void scmi_set_handle(struct scmi_device *scmi_dev)
+{
+ scmi_dev->handle = scmi_handle_get(&scmi_dev->dev);
+ if (scmi_dev->handle)
+ scmi_device_link_add(&scmi_dev->dev, scmi_dev->handle->dev);
+}
+
static int __scmi_xfer_info_init(struct scmi_info *sinfo,
struct scmi_xfers_info *info)
{
@@ -1985,23 +2295,20 @@ static int scmi_xfer_info_init(struct scmi_info *sinfo)
return ret;
}
-static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
+static int scmi_chan_setup(struct scmi_info *info, struct device_node *of_node,
int prot_id, bool tx)
{
int ret, idx;
+ char name[32];
struct scmi_chan_info *cinfo;
struct idr *idr;
+ struct scmi_device *tdev = NULL;
/* Transmit channel is first entry i.e. index 0 */
idx = tx ? 0 : 1;
idr = tx ? &info->tx_idr : &info->rx_idr;
- /* check if already allocated, used for multiple device per protocol */
- cinfo = idr_find(idr, prot_id);
- if (cinfo)
- return 0;
-
- if (!info->desc->ops->chan_available(dev, idx)) {
+ if (!info->desc->ops->chan_available(of_node, idx)) {
cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
return -EINVAL;
@@ -2012,27 +2319,52 @@ static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
if (!cinfo)
return -ENOMEM;
- cinfo->dev = dev;
cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms;
+ /* Create a unique name for this transport device */
+ snprintf(name, 32, "__scmi_transport_device_%s_%02X",
+ idx ? "rx" : "tx", prot_id);
+ /* Create a uniquely named, dedicated transport device for this chan */
+ tdev = scmi_device_create(of_node, info->dev, prot_id, name);
+ if (!tdev) {
+ dev_err(info->dev,
+ "failed to create transport device (%s)\n", name);
+ devm_kfree(info->dev, cinfo);
+ return -EINVAL;
+ }
+ of_node_get(of_node);
+
+ cinfo->id = prot_id;
+ cinfo->dev = &tdev->dev;
ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
- if (ret)
+ if (ret) {
+ of_node_put(of_node);
+ scmi_device_destroy(info->dev, prot_id, name);
+ devm_kfree(info->dev, cinfo);
return ret;
+ }
- if (tx && is_polling_required(cinfo, info)) {
- if (is_transport_polling_capable(info))
- dev_info(dev,
+ if (tx && is_polling_required(cinfo, info->desc)) {
+ if (is_transport_polling_capable(info->desc))
+ dev_info(&tdev->dev,
"Enabled polling mode TX channel - prot_id:%d\n",
prot_id);
else
- dev_warn(dev,
+ dev_warn(&tdev->dev,
"Polling mode NOT supported by transport.\n");
}
idr_alloc:
ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
if (ret != prot_id) {
- dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
+ dev_err(info->dev,
+ "unable to allocate SCMI idr slot err %d\n", ret);
+ /* Destroy channel and device only if created by this call. */
+ if (tdev) {
+ of_node_put(of_node);
+ scmi_device_destroy(info->dev, prot_id, name);
+ devm_kfree(info->dev, cinfo);
+ }
return ret;
}
@@ -2041,13 +2373,14 @@ idr_alloc:
}
static inline int
-scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
+scmi_txrx_setup(struct scmi_info *info, struct device_node *of_node,
+ int prot_id)
{
- int ret = scmi_chan_setup(info, dev, prot_id, true);
+ int ret = scmi_chan_setup(info, of_node, prot_id, true);
if (!ret) {
/* Rx is optional, report only memory errors */
- ret = scmi_chan_setup(info, dev, prot_id, false);
+ ret = scmi_chan_setup(info, of_node, prot_id, false);
if (ret && ret != -ENOMEM)
ret = 0;
}
@@ -2056,306 +2389,264 @@ scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
}
/**
- * scmi_get_protocol_device - Helper to get/create an SCMI device.
- *
- * @np: A device node representing a valid active protocols for the referred
- * SCMI instance.
- * @info: The referred SCMI instance for which we are getting/creating this
- * device.
- * @prot_id: The protocol ID.
- * @name: The device name.
- *
- * Referring to the specific SCMI instance identified by @info, this helper
- * takes care to return a properly initialized device matching the requested
- * @proto_id and @name: if device was still not existent it is created as a
- * child of the specified SCMI instance @info and its transport properly
- * initialized as usual.
- *
- * Return: A properly initialized scmi device, NULL otherwise.
+ * scmi_channels_setup - Helper to initialize all required channels
+ *
+ * @info: The SCMI instance descriptor.
+ *
+ * Initialize all the channels found described in the DT against the underlying
+ * configured transport using custom defined dedicated devices instead of
+ * borrowing devices from the SCMI drivers; this way channels are initialized
+ * upfront during core SCMI stack probing and are no more coupled with SCMI
+ * devices used by SCMI drivers.
+ *
+ * Note that, even though a pair of TX/RX channels is associated to each
+ * protocol defined in the DT, a distinct freshly initialized channel is
+ * created only if the DT node for the protocol at hand describes a dedicated
+ * channel: in all the other cases the common BASE protocol channel is reused.
+ *
+ * Return: 0 on Success
*/
-static inline struct scmi_device *
-scmi_get_protocol_device(struct device_node *np, struct scmi_info *info,
- int prot_id, const char *name)
+static int scmi_channels_setup(struct scmi_info *info)
{
- struct scmi_device *sdev;
+ int ret;
+ struct device_node *child, *top_np = info->dev->of_node;
- /* Already created for this parent SCMI instance ? */
- sdev = scmi_child_dev_find(info->dev, prot_id, name);
- if (sdev)
- return sdev;
+ /* Initialize a common generic channel at first */
+ ret = scmi_txrx_setup(info, top_np, SCMI_PROTOCOL_BASE);
+ if (ret)
+ return ret;
- mutex_lock(&scmi_syspower_mtx);
- if (prot_id == SCMI_PROTOCOL_SYSTEM && scmi_syspower_registered) {
- dev_warn(info->dev,
- "SCMI SystemPower protocol device must be unique !\n");
- mutex_unlock(&scmi_syspower_mtx);
+ for_each_available_child_of_node(top_np, child) {
+ u32 prot_id;
- return NULL;
+ if (of_property_read_u32(child, "reg", &prot_id))
+ continue;
+
+ if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
+ dev_err(info->dev,
+ "Out of range protocol %d\n", prot_id);
+
+ ret = scmi_txrx_setup(info, child, prot_id);
+ if (ret) {
+ of_node_put(child);
+ return ret;
+ }
}
- pr_debug("Creating SCMI device (%s) for protocol %x\n", name, prot_id);
+ return 0;
+}
- sdev = scmi_device_create(np, info->dev, prot_id, name);
- if (!sdev) {
- dev_err(info->dev, "failed to create %d protocol device\n",
- prot_id);
- mutex_unlock(&scmi_syspower_mtx);
+static int scmi_chan_destroy(int id, void *p, void *idr)
+{
+ struct scmi_chan_info *cinfo = p;
- return NULL;
+ if (cinfo->dev) {
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+ struct scmi_device *sdev = to_scmi_dev(cinfo->dev);
+
+ of_node_put(cinfo->dev->of_node);
+ scmi_device_destroy(info->dev, id, sdev->name);
+ cinfo->dev = NULL;
}
- if (scmi_txrx_setup(info, &sdev->dev, prot_id)) {
- dev_err(&sdev->dev, "failed to setup transport\n");
- scmi_device_destroy(sdev);
- mutex_unlock(&scmi_syspower_mtx);
+ idr_remove(idr, id);
- return NULL;
- }
+ return 0;
+}
- if (prot_id == SCMI_PROTOCOL_SYSTEM)
- scmi_syspower_registered = true;
+static void scmi_cleanup_channels(struct scmi_info *info, struct idr *idr)
+{
+ /* At first free all channels at the transport layer ... */
+ idr_for_each(idr, info->desc->ops->chan_free, idr);
- mutex_unlock(&scmi_syspower_mtx);
+ /* ...then destroy all underlying devices */
+ idr_for_each(idr, scmi_chan_destroy, idr);
- return sdev;
+ idr_destroy(idr);
}
-static inline void
-scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
- int prot_id, const char *name)
+static void scmi_cleanup_txrx_channels(struct scmi_info *info)
{
- struct scmi_device *sdev;
-
- sdev = scmi_get_protocol_device(np, info, prot_id, name);
- if (!sdev)
- return;
+ scmi_cleanup_channels(info, &info->tx_idr);
- /* setup handle now as the transport is ready */
- scmi_set_handle(sdev);
+ scmi_cleanup_channels(info, &info->rx_idr);
}
-/**
- * scmi_create_protocol_devices - Create devices for all pending requests for
- * this SCMI instance.
- *
- * @np: The device node describing the protocol
- * @info: The SCMI instance descriptor
- * @prot_id: The protocol ID
- *
- * All devices previously requested for this instance (if any) are found and
- * created by scanning the proper @&scmi_requested_devices entry.
- */
-static void scmi_create_protocol_devices(struct device_node *np,
- struct scmi_info *info, int prot_id)
+static int scmi_bus_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
{
- struct list_head *phead;
+ struct scmi_info *info = bus_nb_to_scmi_info(nb);
+ struct scmi_device *sdev = to_scmi_dev(data);
- mutex_lock(&scmi_requested_devices_mtx);
- phead = idr_find(&scmi_requested_devices, prot_id);
- if (phead) {
- struct scmi_requested_dev *rdev;
+ /* Skip transport devices and devices of different SCMI instances */
+ if (!strncmp(sdev->name, "__scmi_transport_device", 23) ||
+ sdev->dev.parent != info->dev)
+ return NOTIFY_DONE;
- list_for_each_entry(rdev, phead, node)
- scmi_create_protocol_device(np, info, prot_id,
- rdev->id_table->name);
+ switch (action) {
+ case BUS_NOTIFY_BIND_DRIVER:
+ /* setup handle now as the transport is ready */
+ scmi_set_handle(sdev);
+ break;
+ case BUS_NOTIFY_UNBOUND_DRIVER:
+ scmi_handle_put(sdev->handle);
+ sdev->handle = NULL;
+ break;
+ default:
+ return NOTIFY_DONE;
}
- mutex_unlock(&scmi_requested_devices_mtx);
+
+ dev_dbg(info->dev, "Device %s (%s) is now %s\n", dev_name(&sdev->dev),
+ sdev->name, action == BUS_NOTIFY_BIND_DRIVER ?
+ "about to be BOUND." : "UNBOUND.");
+
+ return NOTIFY_OK;
}
-/**
- * scmi_protocol_device_request - Helper to request a device
- *
- * @id_table: A protocol/name pair descriptor for the device to be created.
- *
- * This helper let an SCMI driver request specific devices identified by the
- * @id_table to be created for each active SCMI instance.
- *
- * The requested device name MUST NOT be already existent for any protocol;
- * at first the freshly requested @id_table is annotated in the IDR table
- * @scmi_requested_devices, then a matching device is created for each already
- * active SCMI instance. (if any)
- *
- * This way the requested device is created straight-away for all the already
- * initialized(probed) SCMI instances (handles) and it remains also annotated
- * as pending creation if the requesting SCMI driver was loaded before some
- * SCMI instance and related transports were available: when such late instance
- * is probed, its probe will take care to scan the list of pending requested
- * devices and create those on its own (see @scmi_create_protocol_devices and
- * its enclosing loop)
- *
- * Return: 0 on Success
- */
-int scmi_protocol_device_request(const struct scmi_device_id *id_table)
+static int scmi_device_request_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
{
- int ret = 0;
- unsigned int id = 0;
- struct list_head *head, *phead = NULL;
- struct scmi_requested_dev *rdev;
- struct scmi_info *info;
+ struct device_node *np;
+ struct scmi_device_id *id_table = data;
+ struct scmi_info *info = req_nb_to_scmi_info(nb);
- pr_debug("Requesting SCMI device (%s) for protocol %x\n",
- id_table->name, id_table->protocol_id);
+ np = idr_find(&info->active_protocols, id_table->protocol_id);
+ if (!np)
+ return NOTIFY_DONE;
- /*
- * Search for the matching protocol rdev list and then search
- * of any existent equally named device...fails if any duplicate found.
- */
- mutex_lock(&scmi_requested_devices_mtx);
- idr_for_each_entry(&scmi_requested_devices, head, id) {
- if (!phead) {
- /* A list found registered in the IDR is never empty */
- rdev = list_first_entry(head, struct scmi_requested_dev,
- node);
- if (rdev->id_table->protocol_id ==
- id_table->protocol_id)
- phead = head;
- }
- list_for_each_entry(rdev, head, node) {
- if (!strcmp(rdev->id_table->name, id_table->name)) {
- pr_err("Ignoring duplicate request [%d] %s\n",
- rdev->id_table->protocol_id,
- rdev->id_table->name);
- ret = -EINVAL;
- goto out;
- }
- }
- }
+ dev_dbg(info->dev, "%sRequested device (%s) for protocol 0x%x\n",
+ action == SCMI_BUS_NOTIFY_DEVICE_REQUEST ? "" : "UN-",
+ id_table->name, id_table->protocol_id);
- /*
- * No duplicate found for requested id_table, so let's create a new
- * requested device entry for this new valid request.
- */
- rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
- if (!rdev) {
- ret = -ENOMEM;
- goto out;
+ switch (action) {
+ case SCMI_BUS_NOTIFY_DEVICE_REQUEST:
+ scmi_create_protocol_devices(np, info, id_table->protocol_id,
+ id_table->name);
+ break;
+ case SCMI_BUS_NOTIFY_DEVICE_UNREQUEST:
+ scmi_destroy_protocol_devices(info, id_table->protocol_id,
+ id_table->name);
+ break;
+ default:
+ return NOTIFY_DONE;
}
- rdev->id_table = id_table;
- /*
- * Append the new requested device table descriptor to the head of the
- * related protocol list, eventually creating such head if not already
- * there.
- */
- if (!phead) {
- phead = kzalloc(sizeof(*phead), GFP_KERNEL);
- if (!phead) {
- kfree(rdev);
- ret = -ENOMEM;
- goto out;
- }
- INIT_LIST_HEAD(phead);
-
- ret = idr_alloc(&scmi_requested_devices, (void *)phead,
- id_table->protocol_id,
- id_table->protocol_id + 1, GFP_KERNEL);
- if (ret != id_table->protocol_id) {
- pr_err("Failed to save SCMI device - ret:%d\n", ret);
- kfree(rdev);
- kfree(phead);
- ret = -EINVAL;
- goto out;
- }
- ret = 0;
- }
- list_add(&rdev->node, phead);
+ return NOTIFY_OK;
+}
- /*
- * Now effectively create and initialize the requested device for every
- * already initialized SCMI instance which has registered the requested
- * protocol as a valid active one: i.e. defined in DT and supported by
- * current platform FW.
- */
- mutex_lock(&scmi_list_mutex);
- list_for_each_entry(info, &scmi_list, node) {
- struct device_node *child;
-
- child = idr_find(&info->active_protocols,
- id_table->protocol_id);
- if (child) {
- struct scmi_device *sdev;
-
- sdev = scmi_get_protocol_device(child, info,
- id_table->protocol_id,
- id_table->name);
- if (sdev) {
- /* Set handle if not already set: device existed */
- if (!sdev->handle)
- sdev->handle =
- scmi_handle_get_from_info_unlocked(info);
- /* Relink consumer and suppliers */
- if (sdev->handle)
- scmi_device_link_add(&sdev->dev,
- sdev->handle->dev);
- }
- } else {
- dev_err(info->dev,
- "Failed. SCMI protocol %d not active.\n",
- id_table->protocol_id);
- }
- }
- mutex_unlock(&scmi_list_mutex);
+static void scmi_debugfs_common_cleanup(void *d)
+{
+ struct scmi_debug_info *dbg = d;
-out:
- mutex_unlock(&scmi_requested_devices_mtx);
+ if (!dbg)
+ return;
- return ret;
+ debugfs_remove_recursive(dbg->top_dentry);
+ kfree(dbg->name);
+ kfree(dbg->type);
}
-/**
- * scmi_protocol_device_unrequest - Helper to unrequest a device
- *
- * @id_table: A protocol/name pair descriptor for the device to be unrequested.
- *
- * An helper to let an SCMI driver release its request about devices; note that
- * devices are created and initialized once the first SCMI driver request them
- * but they destroyed only on SCMI core unloading/unbinding.
- *
- * The current SCMI transport layer uses such devices as internal references and
- * as such they could be shared as same transport between multiple drivers so
- * that cannot be safely destroyed till the whole SCMI stack is removed.
- * (unless adding further burden of refcounting.)
- */
-void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table)
+static struct scmi_debug_info *scmi_debugfs_common_setup(struct scmi_info *info)
{
- struct list_head *phead;
+ char top_dir[16];
+ struct dentry *trans, *top_dentry;
+ struct scmi_debug_info *dbg;
+ const char *c_ptr = NULL;
- pr_debug("Unrequesting SCMI device (%s) for protocol %x\n",
- id_table->name, id_table->protocol_id);
+ dbg = devm_kzalloc(info->dev, sizeof(*dbg), GFP_KERNEL);
+ if (!dbg)
+ return NULL;
- mutex_lock(&scmi_requested_devices_mtx);
- phead = idr_find(&scmi_requested_devices, id_table->protocol_id);
- if (phead) {
- struct scmi_requested_dev *victim, *tmp;
+ dbg->name = kstrdup(of_node_full_name(info->dev->of_node), GFP_KERNEL);
+ if (!dbg->name) {
+ devm_kfree(info->dev, dbg);
+ return NULL;
+ }
- list_for_each_entry_safe(victim, tmp, phead, node) {
- if (!strcmp(victim->id_table->name, id_table->name)) {
- list_del(&victim->node);
- kfree(victim);
- break;
- }
- }
+ of_property_read_string(info->dev->of_node, "compatible", &c_ptr);
+ dbg->type = kstrdup(c_ptr, GFP_KERNEL);
+ if (!dbg->type) {
+ kfree(dbg->name);
+ devm_kfree(info->dev, dbg);
+ return NULL;
+ }
- if (list_empty(phead)) {
- idr_remove(&scmi_requested_devices,
- id_table->protocol_id);
- kfree(phead);
- }
+ snprintf(top_dir, 16, "%d", info->id);
+ top_dentry = debugfs_create_dir(top_dir, scmi_top_dentry);
+ trans = debugfs_create_dir("transport", top_dentry);
+
+ dbg->is_atomic = info->desc->atomic_enabled &&
+ is_transport_polling_capable(info->desc);
+
+ debugfs_create_str("instance_name", 0400, top_dentry,
+ (char **)&dbg->name);
+
+ debugfs_create_u32("atomic_threshold_us", 0400, top_dentry,
+ &info->atomic_threshold);
+
+ debugfs_create_str("type", 0400, trans, (char **)&dbg->type);
+
+ debugfs_create_bool("is_atomic", 0400, trans, &dbg->is_atomic);
+
+ debugfs_create_u32("max_rx_timeout_ms", 0400, trans,
+ (u32 *)&info->desc->max_rx_timeout_ms);
+
+ debugfs_create_u32("max_msg_size", 0400, trans,
+ (u32 *)&info->desc->max_msg_size);
+
+ debugfs_create_u32("tx_max_msg", 0400, trans,
+ (u32 *)&info->tx_minfo.max_msg);
+
+ debugfs_create_u32("rx_max_msg", 0400, trans,
+ (u32 *)&info->rx_minfo.max_msg);
+
+ dbg->top_dentry = top_dentry;
+
+ if (devm_add_action_or_reset(info->dev,
+ scmi_debugfs_common_cleanup, dbg)) {
+ scmi_debugfs_common_cleanup(dbg);
+ return NULL;
}
- mutex_unlock(&scmi_requested_devices_mtx);
+
+ return dbg;
}
-static int scmi_cleanup_txrx_channels(struct scmi_info *info)
+static int scmi_debugfs_raw_mode_setup(struct scmi_info *info)
{
- int ret;
- struct idr *idr = &info->tx_idr;
+ int id, num_chans = 0, ret = 0;
+ struct scmi_chan_info *cinfo;
+ u8 channels[SCMI_MAX_CHANNELS] = {};
+ DECLARE_BITMAP(protos, SCMI_MAX_CHANNELS) = {};
+
+ if (!info->dbg)
+ return -EINVAL;
+
+ /* Enumerate all channels to collect their ids */
+ idr_for_each_entry(&info->tx_idr, cinfo, id) {
+ /*
+ * Cannot happen, but be defensive.
+ * Zero as num_chans is ok, warn and carry on.
+ */
+ if (num_chans >= SCMI_MAX_CHANNELS || !cinfo) {
+ dev_warn(info->dev,
+ "SCMI RAW - Error enumerating channels\n");
+ break;
+ }
- ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
- idr_destroy(&info->tx_idr);
+ if (!test_bit(cinfo->id, protos)) {
+ channels[num_chans++] = cinfo->id;
+ set_bit(cinfo->id, protos);
+ }
+ }
- idr = &info->rx_idr;
- ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
- idr_destroy(&info->rx_idr);
+ info->raw = scmi_raw_mode_init(&info->handle, info->dbg->top_dentry,
+ info->id, channels, num_chans,
+ info->desc, info->tx_minfo.max_msg);
+ if (IS_ERR(info->raw)) {
+ dev_err(info->dev, "Failed to initialize SCMI RAW Mode !\n");
+ ret = PTR_ERR(info->raw);
+ info->raw = NULL;
+ }
return ret;
}
@@ -2377,12 +2668,19 @@ static int scmi_probe(struct platform_device *pdev)
if (!info)
return -ENOMEM;
+ info->id = ida_alloc_min(&scmi_id, 0, GFP_KERNEL);
+ if (info->id < 0)
+ return info->id;
+
info->dev = dev;
info->desc = desc;
+ info->bus_nb.notifier_call = scmi_bus_notifier;
+ info->dev_req_nb.notifier_call = scmi_device_request_notifier;
INIT_LIST_HEAD(&info->node);
idr_init(&info->protocols);
mutex_init(&info->protocols_mtx);
idr_init(&info->active_protocols);
+ mutex_init(&info->devreq_mtx);
platform_set_drvdata(pdev, info);
idr_init(&info->tx_idr);
@@ -2406,21 +2704,55 @@ static int scmi_probe(struct platform_device *pdev)
if (desc->ops->link_supplier) {
ret = desc->ops->link_supplier(dev);
if (ret)
- return ret;
+ goto clear_ida;
}
- ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
+ /* Setup all channels described in the DT at first */
+ ret = scmi_channels_setup(info);
if (ret)
- return ret;
+ goto clear_ida;
- ret = scmi_xfer_info_init(info);
+ ret = bus_register_notifier(&scmi_bus_type, &info->bus_nb);
if (ret)
goto clear_txrx_setup;
+ ret = blocking_notifier_chain_register(&scmi_requested_devices_nh,
+ &info->dev_req_nb);
+ if (ret)
+ goto clear_bus_notifier;
+
+ ret = scmi_xfer_info_init(info);
+ if (ret)
+ goto clear_dev_req_notifier;
+
+ if (scmi_top_dentry) {
+ info->dbg = scmi_debugfs_common_setup(info);
+ if (!info->dbg)
+ dev_warn(dev, "Failed to setup SCMI debugfs.\n");
+
+ if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
+ bool coex =
+ IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX);
+
+ ret = scmi_debugfs_raw_mode_setup(info);
+ if (!coex) {
+ if (ret)
+ goto clear_dev_req_notifier;
+
+ /* Bail out anyway when coex enabled */
+ return ret;
+ }
+
+ /* Coex enabled, carry on in any case. */
+ dev_info(dev, "SCMI RAW Mode COEX enabled !\n");
+ }
+ }
+
if (scmi_notification_init(handle))
dev_err(dev, "SCMI Notifications NOT available.\n");
- if (info->desc->atomic_enabled && !is_transport_polling_capable(info))
+ if (info->desc->atomic_enabled &&
+ !is_transport_polling_capable(info->desc))
dev_err(dev,
"Transport is not polling capable. Atomic mode not supported.\n");
@@ -2467,29 +2799,36 @@ static int scmi_probe(struct platform_device *pdev)
}
of_node_get(child);
- scmi_create_protocol_devices(child, info, prot_id);
+ scmi_create_protocol_devices(child, info, prot_id, NULL);
}
return 0;
notification_exit:
+ if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
+ scmi_raw_mode_cleanup(info->raw);
scmi_notification_exit(&info->handle);
+clear_dev_req_notifier:
+ blocking_notifier_chain_unregister(&scmi_requested_devices_nh,
+ &info->dev_req_nb);
+clear_bus_notifier:
+ bus_unregister_notifier(&scmi_bus_type, &info->bus_nb);
clear_txrx_setup:
scmi_cleanup_txrx_channels(info);
+clear_ida:
+ ida_free(&scmi_id, info->id);
return ret;
}
-void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id)
-{
- idr_remove(idr, id);
-}
-
static int scmi_remove(struct platform_device *pdev)
{
- int ret, id;
+ int id;
struct scmi_info *info = platform_get_drvdata(pdev);
struct device_node *child;
+ if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
+ scmi_raw_mode_cleanup(info->raw);
+
mutex_lock(&scmi_list_mutex);
if (info->users)
dev_warn(&pdev->dev,
@@ -2507,10 +2846,14 @@ static int scmi_remove(struct platform_device *pdev)
of_node_put(child);
idr_destroy(&info->active_protocols);
+ blocking_notifier_chain_unregister(&scmi_requested_devices_nh,
+ &info->dev_req_nb);
+ bus_unregister_notifier(&scmi_bus_type, &info->bus_nb);
+
/* Safe to free channels since no more users */
- ret = scmi_cleanup_txrx_channels(info);
- if (ret)
- dev_warn(&pdev->dev, "Failed to cleanup SCMI channels.\n");
+ scmi_cleanup_txrx_channels(info);
+
+ ida_free(&scmi_id, info->id);
return 0;
}
@@ -2639,6 +2982,19 @@ static void __exit scmi_transports_exit(void)
__scmi_transports_setup(false);
}
+static struct dentry *scmi_debugfs_init(void)
+{
+ struct dentry *d;
+
+ d = debugfs_create_dir("scmi", NULL);
+ if (IS_ERR(d)) {
+ pr_err("Could NOT create SCMI top dentry.\n");
+ return NULL;
+ }
+
+ return d;
+}
+
static int __init scmi_driver_init(void)
{
int ret;
@@ -2647,13 +3003,14 @@ static int __init scmi_driver_init(void)
if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT)))
return -EINVAL;
- scmi_bus_init();
-
/* Initialize any compiled-in transport which provided an init/exit */
ret = scmi_transports_init();
if (ret)
return ret;
+ if (IS_ENABLED(CONFIG_ARM_SCMI_NEED_DEBUGFS))
+ scmi_top_dentry = scmi_debugfs_init();
+
scmi_base_register();
scmi_clock_register();
@@ -2667,7 +3024,7 @@ static int __init scmi_driver_init(void)
return platform_driver_register(&scmi_driver);
}
-subsys_initcall(scmi_driver_init);
+module_init(scmi_driver_init);
static void __exit scmi_driver_exit(void)
{
@@ -2682,11 +3039,11 @@ static void __exit scmi_driver_exit(void)
scmi_system_unregister();
scmi_powercap_unregister();
- scmi_bus_exit();
-
scmi_transports_exit();
platform_driver_unregister(&scmi_driver);
+
+ debugfs_remove_recursive(scmi_top_dentry);
}
module_exit(scmi_driver_exit);
diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c
index 1e40cb035044..0d9c9538b7f4 100644
--- a/drivers/firmware/arm_scmi/mailbox.c
+++ b/drivers/firmware/arm_scmi/mailbox.c
@@ -46,9 +46,9 @@ static void rx_callback(struct mbox_client *cl, void *m)
scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem), NULL);
}
-static bool mailbox_chan_available(struct device *dev, int idx)
+static bool mailbox_chan_available(struct device_node *of_node, int idx)
{
- return !of_parse_phandle_with_args(dev->of_node, "mboxes",
+ return !of_parse_phandle_with_args(of_node, "mboxes",
"#mbox-cells", idx, NULL);
}
@@ -120,8 +120,6 @@ static int mailbox_chan_free(int id, void *p, void *data)
smbox->cinfo = NULL;
}
- scmi_free_channel(cinfo, data, id);
-
return 0;
}
diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c
index 2a7aeab40e54..929720387102 100644
--- a/drivers/firmware/arm_scmi/optee.c
+++ b/drivers/firmware/arm_scmi/optee.c
@@ -328,11 +328,11 @@ static int scmi_optee_link_supplier(struct device *dev)
return 0;
}
-static bool scmi_optee_chan_available(struct device *dev, int idx)
+static bool scmi_optee_chan_available(struct device_node *of_node, int idx)
{
u32 channel_id;
- return !of_property_read_u32_index(dev->of_node, "linaro,optee-channel-id",
+ return !of_property_read_u32_index(of_node, "linaro,optee-channel-id",
idx, &channel_id);
}
@@ -481,8 +481,6 @@ static int scmi_optee_chan_free(int id, void *p, void *data)
cinfo->transport_info = NULL;
channel->cinfo = NULL;
- scmi_free_channel(cinfo, data, id);
-
return 0;
}
diff --git a/drivers/firmware/arm_scmi/protocols.h b/drivers/firmware/arm_scmi/protocols.h
index 2f3bf691db7c..78e1a01eb656 100644
--- a/drivers/firmware/arm_scmi/protocols.h
+++ b/drivers/firmware/arm_scmi/protocols.h
@@ -115,6 +115,7 @@ struct scmi_msg_hdr {
* - SCMI_XFER_SENT_OK -> SCMI_XFER_RESP_OK [ -> SCMI_XFER_DRESP_OK ]
* - SCMI_XFER_SENT_OK -> SCMI_XFER_DRESP_OK
* (Missing synchronous response is assumed OK and ignored)
+ * @flags: Optional flags associated to this xfer.
* @lock: A spinlock to protect state and busy fields.
* @priv: A pointer for transport private usage.
*/
@@ -135,6 +136,12 @@ struct scmi_xfer {
#define SCMI_XFER_RESP_OK 1
#define SCMI_XFER_DRESP_OK 2
int state;
+#define SCMI_XFER_FLAG_IS_RAW BIT(0)
+#define SCMI_XFER_IS_RAW(x) ((x)->flags & SCMI_XFER_FLAG_IS_RAW)
+#define SCMI_XFER_FLAG_CHAN_SET BIT(1)
+#define SCMI_XFER_IS_CHAN_SET(x) \
+ ((x)->flags & SCMI_XFER_FLAG_CHAN_SET)
+ int flags;
/* A lock to protect state and busy fields */
spinlock_t lock;
void *priv;
diff --git a/drivers/firmware/arm_scmi/raw_mode.c b/drivers/firmware/arm_scmi/raw_mode.c
new file mode 100644
index 000000000000..d40df099fd51
--- /dev/null
+++ b/drivers/firmware/arm_scmi/raw_mode.c
@@ -0,0 +1,1443 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Raw mode support
+ *
+ * Copyright (C) 2022 ARM Ltd.
+ */
+/**
+ * DOC: Theory of operation
+ *
+ * When enabled the SCMI Raw mode support exposes a userspace API which allows
+ * to send and receive SCMI commands, replies and notifications from a user
+ * application through injection and snooping of bare SCMI messages in binary
+ * little-endian format.
+ *
+ * Such injected SCMI transactions will then be routed through the SCMI core
+ * stack towards the SCMI backend server using whatever SCMI transport is
+ * currently configured on the system under test.
+ *
+ * It is meant to help in running any sort of SCMI backend server testing, no
+ * matter where the server is placed, as long as it is normally reachable via
+ * the transport configured on the system.
+ *
+ * It is activated by a Kernel configuration option since it is NOT meant to
+ * be used in production but only during development and in CI deployments.
+ *
+ * In order to avoid possible interferences between the SCMI Raw transactions
+ * originated from a test-suite and the normal operations of the SCMI drivers,
+ * when Raw mode is enabled, by default, all the regular SCMI drivers are
+ * inhibited, unless CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX is enabled: in this
+ * latter case the regular SCMI stack drivers will be loaded as usual and it is
+ * up to the user of this interface to take care of manually inhibiting the
+ * regular SCMI drivers in order to avoid interferences during the test runs.
+ *
+ * The exposed API is as follows.
+ *
+ * All SCMI Raw entries are rooted under a common top /raw debugfs top directory
+ * which in turn is rooted under the corresponding underlying SCMI instance.
+ *
+ * /sys/kernel/debug/scmi/
+ * `-- 0
+ * |-- atomic_threshold_us
+ * |-- instance_name
+ * |-- raw
+ * | |-- channels
+ * | | |-- 0x10
+ * | | | |-- message
+ * | | | `-- message_async
+ * | | `-- 0x13
+ * | | |-- message
+ * | | `-- message_async
+ * | |-- errors
+ * | |-- message
+ * | |-- message_async
+ * | |-- notification
+ * | `-- reset
+ * `-- transport
+ * |-- is_atomic
+ * |-- max_msg_size
+ * |-- max_rx_timeout_ms
+ * |-- rx_max_msg
+ * |-- tx_max_msg
+ * `-- type
+ *
+ * where:
+ *
+ * - errors: used to read back timed-out and unexpected replies
+ * - message*: used to send sync/async commands and read back immediate and
+ * delayed reponses (if any)
+ * - notification: used to read any notification being emitted by the system
+ * (if previously enabled by the user app)
+ * - reset: used to flush the queues of messages (of any kind) still pending
+ * to be read; this is useful at test-suite start/stop to get
+ * rid of any unread messages from the previous run.
+ *
+ * with the per-channel entries rooted at /channels being present only on a
+ * system where multiple transport channels have been configured.
+ *
+ * Such per-channel entries can be used to explicitly choose a specific channel
+ * for SCMI bare message injection, in contrast with the general entries above
+ * where, instead, the selection of the proper channel to use is automatically
+ * performed based the protocol embedded in the injected message and on how the
+ * transport is configured on the system.
+ *
+ * Note that other common general entries are available under transport/ to let
+ * the user applications properly make up their expectations in terms of
+ * timeouts and message characteristics.
+ *
+ * Each write to the message* entries causes one command request to be built
+ * and sent while the replies or delayed response are read back from those same
+ * entries one message at time (receiving an EOF at each message boundary).
+ *
+ * The user application running the test is in charge of handling timeouts
+ * on replies and properly choosing SCMI sequence numbers for the outgoing
+ * requests (using the same sequence number is supported but discouraged).
+ *
+ * Injection of multiple in-flight requests is supported as long as the user
+ * application uses properly distinct sequence numbers for concurrent requests
+ * and takes care to properly manage all the related issues about concurrency
+ * and command/reply pairing. Keep in mind that, anyway, the real level of
+ * parallelism attainable in such scenario is dependent on the characteristics
+ * of the underlying transport being used.
+ *
+ * Since the SCMI core regular stack is partially used to deliver and collect
+ * the messages, late replies arrived after timeouts and any other sort of
+ * unexpected message can be identified by the SCMI core as usual and they will
+ * be reported as messages under "errors" for later analysis.
+ */
+
+#include <linux/bitmap.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/xarray.h>
+
+#include "common.h"
+
+#include "raw_mode.h"
+
+#include <trace/events/scmi.h>
+
+#define SCMI_XFER_RAW_MAX_RETRIES 10
+
+/**
+ * struct scmi_raw_queue - Generic Raw queue descriptor
+ *
+ * @free_bufs: A freelists listhead used to keep unused raw buffers
+ * @free_bufs_lock: Spinlock used to protect access to @free_bufs
+ * @msg_q: A listhead to a queue of snooped messages waiting to be read out
+ * @msg_q_lock: Spinlock used to protect access to @msg_q
+ * @wq: A waitqueue used to wait and poll on related @msg_q
+ */
+struct scmi_raw_queue {
+ struct list_head free_bufs;
+ /* Protect free_bufs[] lists */
+ spinlock_t free_bufs_lock;
+ struct list_head msg_q;
+ /* Protect msg_q[] lists */
+ spinlock_t msg_q_lock;
+ wait_queue_head_t wq;
+};
+
+/**
+ * struct scmi_raw_mode_info - Structure holding SCMI Raw instance data
+ *
+ * @id: Sequential Raw instance ID.
+ * @handle: Pointer to SCMI entity handle to use
+ * @desc: Pointer to the transport descriptor to use
+ * @tx_max_msg: Maximum number of concurrent TX in-flight messages
+ * @q: An array of Raw queue descriptors
+ * @chans_q: An XArray mapping optional additional per-channel queues
+ * @free_waiters: Head of freelist for unused waiters
+ * @free_mtx: A mutex to protect the waiters freelist
+ * @active_waiters: Head of list for currently active and used waiters
+ * @active_mtx: A mutex to protect the active waiters list
+ * @waiters_work: A work descriptor to be used with the workqueue machinery
+ * @wait_wq: A workqueue reference to the created workqueue
+ * @dentry: Top debugfs root dentry for SCMI Raw
+ * @gid: A group ID used for devres accounting
+ *
+ * Note that this descriptor is passed back to the core after SCMI Raw is
+ * initialized as an opaque handle to use by subsequent SCMI Raw call hooks.
+ *
+ */
+struct scmi_raw_mode_info {
+ unsigned int id;
+ const struct scmi_handle *handle;
+ const struct scmi_desc *desc;
+ int tx_max_msg;
+ struct scmi_raw_queue *q[SCMI_RAW_MAX_QUEUE];
+ struct xarray chans_q;
+ struct list_head free_waiters;
+ /* Protect free_waiters list */
+ struct mutex free_mtx;
+ struct list_head active_waiters;
+ /* Protect active_waiters list */
+ struct mutex active_mtx;
+ struct work_struct waiters_work;
+ struct workqueue_struct *wait_wq;
+ struct dentry *dentry;
+ void *gid;
+};
+
+/**
+ * struct scmi_xfer_raw_waiter - Structure to describe an xfer to be waited for
+ *
+ * @start_jiffies: The timestamp in jiffies of when this structure was queued.
+ * @cinfo: A reference to the channel to use for this transaction
+ * @xfer: A reference to the xfer to be waited for
+ * @async_response: A completion to be, optionally, used for async waits: it
+ * will be setup by @scmi_do_xfer_raw_start, if needed, to be
+ * pointed at by xfer->async_done.
+ * @node: A list node.
+ */
+struct scmi_xfer_raw_waiter {
+ unsigned long start_jiffies;
+ struct scmi_chan_info *cinfo;
+ struct scmi_xfer *xfer;
+ struct completion async_response;
+ struct list_head node;
+};
+
+/**
+ * struct scmi_raw_buffer - Structure to hold a full SCMI message
+ *
+ * @max_len: The maximum allowed message size (header included) that can be
+ * stored into @msg
+ * @msg: A message buffer used to collect a full message grabbed from an xfer.
+ * @node: A list node.
+ */
+struct scmi_raw_buffer {
+ size_t max_len;
+ struct scmi_msg msg;
+ struct list_head node;
+};
+
+/**
+ * struct scmi_dbg_raw_data - Structure holding data needed by the debugfs
+ * layer
+ *
+ * @chan_id: The preferred channel to use: if zero the channel is automatically
+ * selected based on protocol.
+ * @raw: A reference to the Raw instance.
+ * @tx: A message buffer used to collect TX message on write.
+ * @tx_size: The effective size of the TX message.
+ * @tx_req_size: The final expected size of the complete TX message.
+ * @rx: A message buffer to collect RX message on read.
+ * @rx_size: The effective size of the RX message.
+ */
+struct scmi_dbg_raw_data {
+ u8 chan_id;
+ struct scmi_raw_mode_info *raw;
+ struct scmi_msg tx;
+ size_t tx_size;
+ size_t tx_req_size;
+ struct scmi_msg rx;
+ size_t rx_size;
+};
+
+static struct scmi_raw_queue *
+scmi_raw_queue_select(struct scmi_raw_mode_info *raw, unsigned int idx,
+ unsigned int chan_id)
+{
+ if (!chan_id)
+ return raw->q[idx];
+
+ return xa_load(&raw->chans_q, chan_id);
+}
+
+static struct scmi_raw_buffer *scmi_raw_buffer_get(struct scmi_raw_queue *q)
+{
+ unsigned long flags;
+ struct scmi_raw_buffer *rb = NULL;
+ struct list_head *head = &q->free_bufs;
+
+ spin_lock_irqsave(&q->free_bufs_lock, flags);
+ if (!list_empty(head)) {
+ rb = list_first_entry(head, struct scmi_raw_buffer, node);
+ list_del_init(&rb->node);
+ }
+ spin_unlock_irqrestore(&q->free_bufs_lock, flags);
+
+ return rb;
+}
+
+static void scmi_raw_buffer_put(struct scmi_raw_queue *q,
+ struct scmi_raw_buffer *rb)
+{
+ unsigned long flags;
+
+ /* Reset to full buffer length */
+ rb->msg.len = rb->max_len;
+
+ spin_lock_irqsave(&q->free_bufs_lock, flags);
+ list_add_tail(&rb->node, &q->free_bufs);
+ spin_unlock_irqrestore(&q->free_bufs_lock, flags);
+}
+
+static void scmi_raw_buffer_enqueue(struct scmi_raw_queue *q,
+ struct scmi_raw_buffer *rb)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&q->msg_q_lock, flags);
+ list_add_tail(&rb->node, &q->msg_q);
+ spin_unlock_irqrestore(&q->msg_q_lock, flags);
+
+ wake_up_interruptible(&q->wq);
+}
+
+static struct scmi_raw_buffer*
+scmi_raw_buffer_dequeue_unlocked(struct scmi_raw_queue *q)
+{
+ struct scmi_raw_buffer *rb = NULL;
+
+ if (!list_empty(&q->msg_q)) {
+ rb = list_first_entry(&q->msg_q, struct scmi_raw_buffer, node);
+ list_del_init(&rb->node);
+ }
+
+ return rb;
+}
+
+static struct scmi_raw_buffer *scmi_raw_buffer_dequeue(struct scmi_raw_queue *q)
+{
+ unsigned long flags;
+ struct scmi_raw_buffer *rb;
+
+ spin_lock_irqsave(&q->msg_q_lock, flags);
+ rb = scmi_raw_buffer_dequeue_unlocked(q);
+ spin_unlock_irqrestore(&q->msg_q_lock, flags);
+
+ return rb;
+}
+
+static void scmi_raw_buffer_queue_flush(struct scmi_raw_queue *q)
+{
+ struct scmi_raw_buffer *rb;
+
+ do {
+ rb = scmi_raw_buffer_dequeue(q);
+ if (rb)
+ scmi_raw_buffer_put(q, rb);
+ } while (rb);
+}
+
+static struct scmi_xfer_raw_waiter *
+scmi_xfer_raw_waiter_get(struct scmi_raw_mode_info *raw, struct scmi_xfer *xfer,
+ struct scmi_chan_info *cinfo, bool async)
+{
+ struct scmi_xfer_raw_waiter *rw = NULL;
+
+ mutex_lock(&raw->free_mtx);
+ if (!list_empty(&raw->free_waiters)) {
+ rw = list_first_entry(&raw->free_waiters,
+ struct scmi_xfer_raw_waiter, node);
+ list_del_init(&rw->node);
+
+ if (async) {
+ reinit_completion(&rw->async_response);
+ xfer->async_done = &rw->async_response;
+ }
+
+ rw->cinfo = cinfo;
+ rw->xfer = xfer;
+ }
+ mutex_unlock(&raw->free_mtx);
+
+ return rw;
+}
+
+static void scmi_xfer_raw_waiter_put(struct scmi_raw_mode_info *raw,
+ struct scmi_xfer_raw_waiter *rw)
+{
+ if (rw->xfer) {
+ rw->xfer->async_done = NULL;
+ rw->xfer = NULL;
+ }
+
+ mutex_lock(&raw->free_mtx);
+ list_add_tail(&rw->node, &raw->free_waiters);
+ mutex_unlock(&raw->free_mtx);
+}
+
+static void scmi_xfer_raw_waiter_enqueue(struct scmi_raw_mode_info *raw,
+ struct scmi_xfer_raw_waiter *rw)
+{
+ /* A timestamp for the deferred worker to know how much this has aged */
+ rw->start_jiffies = jiffies;
+
+ trace_scmi_xfer_response_wait(rw->xfer->transfer_id, rw->xfer->hdr.id,
+ rw->xfer->hdr.protocol_id,
+ rw->xfer->hdr.seq,
+ raw->desc->max_rx_timeout_ms,
+ rw->xfer->hdr.poll_completion);
+
+ mutex_lock(&raw->active_mtx);
+ list_add_tail(&rw->node, &raw->active_waiters);
+ mutex_unlock(&raw->active_mtx);
+
+ /* kick waiter work */
+ queue_work(raw->wait_wq, &raw->waiters_work);
+}
+
+static struct scmi_xfer_raw_waiter *
+scmi_xfer_raw_waiter_dequeue(struct scmi_raw_mode_info *raw)
+{
+ struct scmi_xfer_raw_waiter *rw = NULL;
+
+ mutex_lock(&raw->active_mtx);
+ if (!list_empty(&raw->active_waiters)) {
+ rw = list_first_entry(&raw->active_waiters,
+ struct scmi_xfer_raw_waiter, node);
+ list_del_init(&rw->node);
+ }
+ mutex_unlock(&raw->active_mtx);
+
+ return rw;
+}
+
+/**
+ * scmi_xfer_raw_worker - Work function to wait for Raw xfers completions
+ *
+ * @work: A reference to the work.
+ *
+ * In SCMI Raw mode, once a user-provided injected SCMI message is sent, we
+ * cannot wait to receive its response (if any) in the context of the injection
+ * routines so as not to leave the userspace write syscall, which delivered the
+ * SCMI message to send, pending till eventually a reply is received.
+ * Userspace should and will poll/wait instead on the read syscalls which will
+ * be in charge of reading a received reply (if any).
+ *
+ * Even though reply messages are collected and reported into the SCMI Raw layer
+ * on the RX path, nonetheless we have to properly wait for their completion as
+ * usual (and async_completion too if needed) in order to properly release the
+ * xfer structure at the end: to do this out of the context of the write/send
+ * these waiting jobs are delegated to this deferred worker.
+ *
+ * Any sent xfer, to be waited for, is timestamped and queued for later
+ * consumption by this worker: queue aging is accounted for while choosing a
+ * timeout for the completion, BUT we do not really care here if we end up
+ * accidentally waiting for a bit too long.
+ */
+static void scmi_xfer_raw_worker(struct work_struct *work)
+{
+ struct scmi_raw_mode_info *raw;
+ struct device *dev;
+ unsigned long max_tmo;
+
+ raw = container_of(work, struct scmi_raw_mode_info, waiters_work);
+ dev = raw->handle->dev;
+ max_tmo = msecs_to_jiffies(raw->desc->max_rx_timeout_ms);
+
+ do {
+ int ret = 0;
+ unsigned int timeout_ms;
+ unsigned long aging;
+ struct scmi_xfer *xfer;
+ struct scmi_xfer_raw_waiter *rw;
+ struct scmi_chan_info *cinfo;
+
+ rw = scmi_xfer_raw_waiter_dequeue(raw);
+ if (!rw)
+ return;
+
+ cinfo = rw->cinfo;
+ xfer = rw->xfer;
+ /*
+ * Waiters are queued by wait-deadline at the end, so some of
+ * them could have been already expired when processed, BUT we
+ * have to check the completion status anyway just in case a
+ * virtually expired (aged) transaction was indeed completed
+ * fine and we'll have to wait for the asynchronous part (if
+ * any): for this reason a 1 ms timeout is used for already
+ * expired/aged xfers.
+ */
+ aging = jiffies - rw->start_jiffies;
+ timeout_ms = max_tmo > aging ?
+ jiffies_to_msecs(max_tmo - aging) : 1;
+
+ ret = scmi_xfer_raw_wait_for_message_response(cinfo, xfer,
+ timeout_ms);
+ if (!ret && xfer->hdr.status)
+ ret = scmi_to_linux_errno(xfer->hdr.status);
+
+ if (raw->desc->ops->mark_txdone)
+ raw->desc->ops->mark_txdone(rw->cinfo, ret, xfer);
+
+ trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
+ xfer->hdr.protocol_id, xfer->hdr.seq, ret);
+
+ /* Wait also for an async delayed response if needed */
+ if (!ret && xfer->async_done) {
+ unsigned long tmo = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
+
+ if (!wait_for_completion_timeout(xfer->async_done, tmo))
+ dev_err(dev,
+ "timed out in RAW delayed resp - HDR:%08X\n",
+ pack_scmi_header(&xfer->hdr));
+ }
+
+ /* Release waiter and xfer */
+ scmi_xfer_raw_put(raw->handle, xfer);
+ scmi_xfer_raw_waiter_put(raw, rw);
+ } while (1);
+}
+
+static void scmi_xfer_raw_reset(struct scmi_raw_mode_info *raw)
+{
+ int i;
+
+ dev_info(raw->handle->dev, "Resetting SCMI Raw stack.\n");
+
+ for (i = 0; i < SCMI_RAW_MAX_QUEUE; i++)
+ scmi_raw_buffer_queue_flush(raw->q[i]);
+}
+
+/**
+ * scmi_xfer_raw_get_init - An helper to build a valid xfer from the provided
+ * bare SCMI message.
+ *
+ * @raw: A reference to the Raw instance.
+ * @buf: A buffer containing the whole SCMI message to send (including the
+ * header) in little-endian binary formmat.
+ * @len: Length of the message in @buf.
+ * @p: A pointer to return the initialized Raw xfer.
+ *
+ * After an xfer is picked from the TX pool and filled in with the message
+ * content, the xfer is registered as pending with the core in the usual way
+ * using the original sequence number provided by the user with the message.
+ *
+ * Note that, in case the testing user application is NOT using distinct
+ * sequence-numbers between successive SCMI messages such registration could
+ * fail temporarily if the previous message, using the same sequence number,
+ * had still not released; in such a case we just wait and retry.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_xfer_raw_get_init(struct scmi_raw_mode_info *raw, void *buf,
+ size_t len, struct scmi_xfer **p)
+{
+ u32 msg_hdr;
+ size_t tx_size;
+ struct scmi_xfer *xfer;
+ int ret, retry = SCMI_XFER_RAW_MAX_RETRIES;
+ struct device *dev = raw->handle->dev;
+
+ if (!buf || len < sizeof(u32))
+ return -EINVAL;
+
+ tx_size = len - sizeof(u32);
+ /* Ensure we have sane transfer sizes */
+ if (tx_size > raw->desc->max_msg_size)
+ return -ERANGE;
+
+ xfer = scmi_xfer_raw_get(raw->handle);
+ if (IS_ERR(xfer)) {
+ dev_warn(dev, "RAW - Cannot get a free RAW xfer !\n");
+ return PTR_ERR(xfer);
+ }
+
+ /* Build xfer from the provided SCMI bare LE message */
+ msg_hdr = le32_to_cpu(*((__le32 *)buf));
+ unpack_scmi_header(msg_hdr, &xfer->hdr);
+ xfer->hdr.seq = (u16)MSG_XTRACT_TOKEN(msg_hdr);
+ /* Polling not supported */
+ xfer->hdr.poll_completion = false;
+ xfer->hdr.status = SCMI_SUCCESS;
+ xfer->tx.len = tx_size;
+ xfer->rx.len = raw->desc->max_msg_size;
+ /* Clear the whole TX buffer */
+ memset(xfer->tx.buf, 0x00, raw->desc->max_msg_size);
+ if (xfer->tx.len)
+ memcpy(xfer->tx.buf, (u8 *)buf + sizeof(msg_hdr), xfer->tx.len);
+ *p = xfer;
+
+ /*
+ * In flight registration can temporarily fail in case of Raw messages
+ * if the user injects messages without using monotonically increasing
+ * sequence numbers since, in Raw mode, the xfer (and the token) is
+ * finally released later by a deferred worker. Just retry for a while.
+ */
+ do {
+ ret = scmi_xfer_raw_inflight_register(raw->handle, xfer);
+ if (ret) {
+ dev_dbg(dev,
+ "...retrying[%d] inflight registration\n",
+ retry);
+ msleep(raw->desc->max_rx_timeout_ms /
+ SCMI_XFER_RAW_MAX_RETRIES);
+ }
+ } while (ret && --retry);
+
+ if (ret) {
+ dev_warn(dev,
+ "RAW - Could NOT register xfer %d in-flight HDR:0x%08X\n",
+ xfer->hdr.seq, msg_hdr);
+ scmi_xfer_raw_put(raw->handle, xfer);
+ }
+
+ return ret;
+}
+
+/**
+ * scmi_do_xfer_raw_start - An helper to send a valid raw xfer
+ *
+ * @raw: A reference to the Raw instance.
+ * @xfer: The xfer to send
+ * @chan_id: The channel ID to use, if zero the channels is automatically
+ * selected based on the protocol used.
+ * @async: A flag stating if an asynchronous command is required.
+ *
+ * This function send a previously built raw xfer using an appropriate channel
+ * and queues the related waiting work.
+ *
+ * Note that we need to know explicitly if the required command is meant to be
+ * asynchronous in kind since we have to properly setup the waiter.
+ * (and deducing this from the payload is weak and do not scale given there is
+ * NOT a common header-flag stating if the command is asynchronous or not)
+ *
+ * Return: 0 on Success
+ */
+static int scmi_do_xfer_raw_start(struct scmi_raw_mode_info *raw,
+ struct scmi_xfer *xfer, u8 chan_id,
+ bool async)
+{
+ int ret;
+ struct scmi_chan_info *cinfo;
+ struct scmi_xfer_raw_waiter *rw;
+ struct device *dev = raw->handle->dev;
+
+ if (!chan_id)
+ chan_id = xfer->hdr.protocol_id;
+ else
+ xfer->flags |= SCMI_XFER_FLAG_CHAN_SET;
+
+ cinfo = scmi_xfer_raw_channel_get(raw->handle, chan_id);
+ if (IS_ERR(cinfo))
+ return PTR_ERR(cinfo);
+
+ rw = scmi_xfer_raw_waiter_get(raw, xfer, cinfo, async);
+ if (!rw) {
+ dev_warn(dev, "RAW - Cannot get a free waiter !\n");
+ return -ENOMEM;
+ }
+
+ /* True ONLY if also supported by transport. */
+ if (is_polling_enabled(cinfo, raw->desc))
+ xfer->hdr.poll_completion = true;
+
+ reinit_completion(&xfer->done);
+ /* Make sure xfer state update is visible before sending */
+ smp_store_mb(xfer->state, SCMI_XFER_SENT_OK);
+
+ trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
+ xfer->hdr.protocol_id, xfer->hdr.seq,
+ xfer->hdr.poll_completion);
+
+ ret = raw->desc->ops->send_message(rw->cinfo, xfer);
+ if (ret) {
+ dev_err(dev, "Failed to send RAW message %d\n", ret);
+ scmi_xfer_raw_waiter_put(raw, rw);
+ return ret;
+ }
+
+ trace_scmi_msg_dump(raw->id, cinfo->id, xfer->hdr.protocol_id,
+ xfer->hdr.id, "cmnd", xfer->hdr.seq,
+ xfer->hdr.status,
+ xfer->tx.buf, xfer->tx.len);
+
+ scmi_xfer_raw_waiter_enqueue(raw, rw);
+
+ return ret;
+}
+
+/**
+ * scmi_raw_message_send - An helper to build and send an SCMI command using
+ * the provided SCMI bare message buffer
+ *
+ * @raw: A reference to the Raw instance.
+ * @buf: A buffer containing the whole SCMI message to send (including the
+ * header) in little-endian binary format.
+ * @len: Length of the message in @buf.
+ * @chan_id: The channel ID to use.
+ * @async: A flag stating if an asynchronous command is required.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_raw_message_send(struct scmi_raw_mode_info *raw,
+ void *buf, size_t len, u8 chan_id, bool async)
+{
+ int ret;
+ struct scmi_xfer *xfer;
+
+ ret = scmi_xfer_raw_get_init(raw, buf, len, &xfer);
+ if (ret)
+ return ret;
+
+ ret = scmi_do_xfer_raw_start(raw, xfer, chan_id, async);
+ if (ret)
+ scmi_xfer_raw_put(raw->handle, xfer);
+
+ return ret;
+}
+
+static struct scmi_raw_buffer *
+scmi_raw_message_dequeue(struct scmi_raw_queue *q, bool o_nonblock)
+{
+ unsigned long flags;
+ struct scmi_raw_buffer *rb;
+
+ spin_lock_irqsave(&q->msg_q_lock, flags);
+ while (list_empty(&q->msg_q)) {
+ spin_unlock_irqrestore(&q->msg_q_lock, flags);
+
+ if (o_nonblock)
+ return ERR_PTR(-EAGAIN);
+
+ if (wait_event_interruptible(q->wq, !list_empty(&q->msg_q)))
+ return ERR_PTR(-ERESTARTSYS);
+
+ spin_lock_irqsave(&q->msg_q_lock, flags);
+ }
+
+ rb = scmi_raw_buffer_dequeue_unlocked(q);
+
+ spin_unlock_irqrestore(&q->msg_q_lock, flags);
+
+ return rb;
+}
+
+/**
+ * scmi_raw_message_receive - An helper to dequeue and report the next
+ * available enqueued raw message payload that has been collected.
+ *
+ * @raw: A reference to the Raw instance.
+ * @buf: A buffer to get hold of the whole SCMI message received and represented
+ * in little-endian binary format.
+ * @len: Length of @buf.
+ * @size: The effective size of the message copied into @buf
+ * @idx: The index of the queue to pick the next queued message from.
+ * @chan_id: The channel ID to use.
+ * @o_nonblock: A flag to request a non-blocking message dequeue.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_raw_message_receive(struct scmi_raw_mode_info *raw,
+ void *buf, size_t len, size_t *size,
+ unsigned int idx, unsigned int chan_id,
+ bool o_nonblock)
+{
+ int ret = 0;
+ struct scmi_raw_buffer *rb;
+ struct scmi_raw_queue *q;
+
+ q = scmi_raw_queue_select(raw, idx, chan_id);
+ if (!q)
+ return -ENODEV;
+
+ rb = scmi_raw_message_dequeue(q, o_nonblock);
+ if (IS_ERR(rb)) {
+ dev_dbg(raw->handle->dev, "RAW - No message available!\n");
+ return PTR_ERR(rb);
+ }
+
+ if (rb->msg.len <= len) {
+ memcpy(buf, rb->msg.buf, rb->msg.len);
+ *size = rb->msg.len;
+ } else {
+ ret = -ENOSPC;
+ }
+
+ scmi_raw_buffer_put(q, rb);
+
+ return ret;
+}
+
+/* SCMI Raw debugfs helpers */
+
+static ssize_t scmi_dbg_raw_mode_common_read(struct file *filp,
+ char __user *buf,
+ size_t count, loff_t *ppos,
+ unsigned int idx)
+{
+ ssize_t cnt;
+ struct scmi_dbg_raw_data *rd = filp->private_data;
+
+ if (!rd->rx_size) {
+ int ret;
+
+ ret = scmi_raw_message_receive(rd->raw, rd->rx.buf, rd->rx.len,
+ &rd->rx_size, idx, rd->chan_id,
+ filp->f_flags & O_NONBLOCK);
+ if (ret) {
+ rd->rx_size = 0;
+ return ret;
+ }
+
+ /* Reset any previous filepos change, including writes */
+ *ppos = 0;
+ } else if (*ppos == rd->rx_size) {
+ /* Return EOF once all the message has been read-out */
+ rd->rx_size = 0;
+ return 0;
+ }
+
+ cnt = simple_read_from_buffer(buf, count, ppos,
+ rd->rx.buf, rd->rx_size);
+
+ return cnt;
+}
+
+static ssize_t scmi_dbg_raw_mode_common_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos,
+ bool async)
+{
+ int ret;
+ struct scmi_dbg_raw_data *rd = filp->private_data;
+
+ if (count > rd->tx.len - rd->tx_size)
+ return -ENOSPC;
+
+ /* On first write attempt @count carries the total full message size. */
+ if (!rd->tx_size)
+ rd->tx_req_size = count;
+
+ /*
+ * Gather a full message, possibly across multiple interrupted wrrtes,
+ * before sending it with a single RAW xfer.
+ */
+ if (rd->tx_size < rd->tx_req_size) {
+ size_t cnt;
+
+ cnt = simple_write_to_buffer(rd->tx.buf, rd->tx.len, ppos,
+ buf, count);
+ rd->tx_size += cnt;
+ if (cnt < count)
+ return cnt;
+ }
+
+ ret = scmi_raw_message_send(rd->raw, rd->tx.buf, rd->tx_size,
+ rd->chan_id, async);
+
+ /* Reset ppos for next message ... */
+ rd->tx_size = 0;
+ *ppos = 0;
+
+ return ret ?: count;
+}
+
+static __poll_t scmi_test_dbg_raw_common_poll(struct file *filp,
+ struct poll_table_struct *wait,
+ unsigned int idx)
+{
+ unsigned long flags;
+ struct scmi_dbg_raw_data *rd = filp->private_data;
+ struct scmi_raw_queue *q;
+ __poll_t mask = 0;
+
+ q = scmi_raw_queue_select(rd->raw, idx, rd->chan_id);
+ if (!q)
+ return mask;
+
+ poll_wait(filp, &q->wq, wait);
+
+ spin_lock_irqsave(&q->msg_q_lock, flags);
+ if (!list_empty(&q->msg_q))
+ mask = EPOLLIN | EPOLLRDNORM;
+ spin_unlock_irqrestore(&q->msg_q_lock, flags);
+
+ return mask;
+}
+
+static ssize_t scmi_dbg_raw_mode_message_read(struct file *filp,
+ char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return scmi_dbg_raw_mode_common_read(filp, buf, count, ppos,
+ SCMI_RAW_REPLY_QUEUE);
+}
+
+static ssize_t scmi_dbg_raw_mode_message_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, false);
+}
+
+static __poll_t scmi_dbg_raw_mode_message_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ return scmi_test_dbg_raw_common_poll(filp, wait, SCMI_RAW_REPLY_QUEUE);
+}
+
+static int scmi_dbg_raw_mode_open(struct inode *inode, struct file *filp)
+{
+ u8 id;
+ struct scmi_raw_mode_info *raw;
+ struct scmi_dbg_raw_data *rd;
+ const char *id_str = filp->f_path.dentry->d_parent->d_name.name;
+
+ if (!inode->i_private)
+ return -ENODEV;
+
+ raw = inode->i_private;
+ rd = kzalloc(sizeof(*rd), GFP_KERNEL);
+ if (!rd)
+ return -ENOMEM;
+
+ rd->rx.len = raw->desc->max_msg_size + sizeof(u32);
+ rd->rx.buf = kzalloc(rd->rx.len, GFP_KERNEL);
+ if (!rd->rx.buf) {
+ kfree(rd);
+ return -ENOMEM;
+ }
+
+ rd->tx.len = raw->desc->max_msg_size + sizeof(u32);
+ rd->tx.buf = kzalloc(rd->tx.len, GFP_KERNEL);
+ if (!rd->tx.buf) {
+ kfree(rd->rx.buf);
+ kfree(rd);
+ return -ENOMEM;
+ }
+
+ /* Grab channel ID from debugfs entry naming if any */
+ if (!kstrtou8(id_str, 16, &id))
+ rd->chan_id = id;
+
+ rd->raw = raw;
+ filp->private_data = rd;
+
+ return 0;
+}
+
+static int scmi_dbg_raw_mode_release(struct inode *inode, struct file *filp)
+{
+ struct scmi_dbg_raw_data *rd = filp->private_data;
+
+ kfree(rd->rx.buf);
+ kfree(rd->tx.buf);
+ kfree(rd);
+
+ return 0;
+}
+
+static ssize_t scmi_dbg_raw_mode_reset_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct scmi_dbg_raw_data *rd = filp->private_data;
+
+ scmi_xfer_raw_reset(rd->raw);
+
+ return count;
+}
+
+static const struct file_operations scmi_dbg_raw_mode_reset_fops = {
+ .open = scmi_dbg_raw_mode_open,
+ .release = scmi_dbg_raw_mode_release,
+ .write = scmi_dbg_raw_mode_reset_write,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations scmi_dbg_raw_mode_message_fops = {
+ .open = scmi_dbg_raw_mode_open,
+ .release = scmi_dbg_raw_mode_release,
+ .read = scmi_dbg_raw_mode_message_read,
+ .write = scmi_dbg_raw_mode_message_write,
+ .poll = scmi_dbg_raw_mode_message_poll,
+ .owner = THIS_MODULE,
+};
+
+static ssize_t scmi_dbg_raw_mode_message_async_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, true);
+}
+
+static const struct file_operations scmi_dbg_raw_mode_message_async_fops = {
+ .open = scmi_dbg_raw_mode_open,
+ .release = scmi_dbg_raw_mode_release,
+ .read = scmi_dbg_raw_mode_message_read,
+ .write = scmi_dbg_raw_mode_message_async_write,
+ .poll = scmi_dbg_raw_mode_message_poll,
+ .owner = THIS_MODULE,
+};
+
+static ssize_t scmi_test_dbg_raw_mode_notif_read(struct file *filp,
+ char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return scmi_dbg_raw_mode_common_read(filp, buf, count, ppos,
+ SCMI_RAW_NOTIF_QUEUE);
+}
+
+static __poll_t
+scmi_test_dbg_raw_mode_notif_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ return scmi_test_dbg_raw_common_poll(filp, wait, SCMI_RAW_NOTIF_QUEUE);
+}
+
+static const struct file_operations scmi_dbg_raw_mode_notification_fops = {
+ .open = scmi_dbg_raw_mode_open,
+ .release = scmi_dbg_raw_mode_release,
+ .read = scmi_test_dbg_raw_mode_notif_read,
+ .poll = scmi_test_dbg_raw_mode_notif_poll,
+ .owner = THIS_MODULE,
+};
+
+static ssize_t scmi_test_dbg_raw_mode_errors_read(struct file *filp,
+ char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return scmi_dbg_raw_mode_common_read(filp, buf, count, ppos,
+ SCMI_RAW_ERRS_QUEUE);
+}
+
+static __poll_t
+scmi_test_dbg_raw_mode_errors_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ return scmi_test_dbg_raw_common_poll(filp, wait, SCMI_RAW_ERRS_QUEUE);
+}
+
+static const struct file_operations scmi_dbg_raw_mode_errors_fops = {
+ .open = scmi_dbg_raw_mode_open,
+ .release = scmi_dbg_raw_mode_release,
+ .read = scmi_test_dbg_raw_mode_errors_read,
+ .poll = scmi_test_dbg_raw_mode_errors_poll,
+ .owner = THIS_MODULE,
+};
+
+static struct scmi_raw_queue *
+scmi_raw_queue_init(struct scmi_raw_mode_info *raw)
+{
+ int i;
+ struct scmi_raw_buffer *rb;
+ struct device *dev = raw->handle->dev;
+ struct scmi_raw_queue *q;
+
+ q = devm_kzalloc(dev, sizeof(*q), GFP_KERNEL);
+ if (!q)
+ return ERR_PTR(-ENOMEM);
+
+ rb = devm_kcalloc(dev, raw->tx_max_msg, sizeof(*rb), GFP_KERNEL);
+ if (!rb)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&q->free_bufs_lock);
+ INIT_LIST_HEAD(&q->free_bufs);
+ for (i = 0; i < raw->tx_max_msg; i++, rb++) {
+ rb->max_len = raw->desc->max_msg_size + sizeof(u32);
+ rb->msg.buf = devm_kzalloc(dev, rb->max_len, GFP_KERNEL);
+ if (!rb->msg.buf)
+ return ERR_PTR(-ENOMEM);
+ scmi_raw_buffer_put(q, rb);
+ }
+
+ spin_lock_init(&q->msg_q_lock);
+ INIT_LIST_HEAD(&q->msg_q);
+ init_waitqueue_head(&q->wq);
+
+ return q;
+}
+
+static int scmi_xfer_raw_worker_init(struct scmi_raw_mode_info *raw)
+{
+ int i;
+ struct scmi_xfer_raw_waiter *rw;
+ struct device *dev = raw->handle->dev;
+
+ rw = devm_kcalloc(dev, raw->tx_max_msg, sizeof(*rw), GFP_KERNEL);
+ if (!rw)
+ return -ENOMEM;
+
+ raw->wait_wq = alloc_workqueue("scmi-raw-wait-wq-%d",
+ WQ_UNBOUND | WQ_FREEZABLE |
+ WQ_HIGHPRI, WQ_SYSFS, raw->id);
+ if (!raw->wait_wq)
+ return -ENOMEM;
+
+ mutex_init(&raw->free_mtx);
+ INIT_LIST_HEAD(&raw->free_waiters);
+ mutex_init(&raw->active_mtx);
+ INIT_LIST_HEAD(&raw->active_waiters);
+
+ for (i = 0; i < raw->tx_max_msg; i++, rw++) {
+ init_completion(&rw->async_response);
+ scmi_xfer_raw_waiter_put(raw, rw);
+ }
+ INIT_WORK(&raw->waiters_work, scmi_xfer_raw_worker);
+
+ return 0;
+}
+
+static int scmi_raw_mode_setup(struct scmi_raw_mode_info *raw,
+ u8 *channels, int num_chans)
+{
+ int ret, idx;
+ void *gid;
+ struct device *dev = raw->handle->dev;
+
+ gid = devres_open_group(dev, NULL, GFP_KERNEL);
+ if (!gid)
+ return -ENOMEM;
+
+ for (idx = 0; idx < SCMI_RAW_MAX_QUEUE; idx++) {
+ raw->q[idx] = scmi_raw_queue_init(raw);
+ if (IS_ERR(raw->q[idx])) {
+ ret = PTR_ERR(raw->q[idx]);
+ goto err;
+ }
+ }
+
+ xa_init(&raw->chans_q);
+ if (num_chans > 1) {
+ int i;
+
+ for (i = 0; i < num_chans; i++) {
+ void *xret;
+ struct scmi_raw_queue *q;
+
+ q = scmi_raw_queue_init(raw);
+ if (IS_ERR(q)) {
+ ret = PTR_ERR(q);
+ goto err_xa;
+ }
+
+ xret = xa_store(&raw->chans_q, channels[i], q,
+ GFP_KERNEL);
+ if (xa_err(xret)) {
+ dev_err(dev,
+ "Fail to allocate Raw queue 0x%02X\n",
+ channels[i]);
+ ret = xa_err(xret);
+ goto err_xa;
+ }
+ }
+ }
+
+ ret = scmi_xfer_raw_worker_init(raw);
+ if (ret)
+ goto err_xa;
+
+ devres_close_group(dev, gid);
+ raw->gid = gid;
+
+ return 0;
+
+err_xa:
+ xa_destroy(&raw->chans_q);
+err:
+ devres_release_group(dev, gid);
+ return ret;
+}
+
+/**
+ * scmi_raw_mode_init - Function to initialize the SCMI Raw stack
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @top_dentry: A reference to the top Raw debugfs dentry
+ * @instance_id: The ID of the underlying SCMI platform instance represented by
+ * this Raw instance
+ * @channels: The list of the existing channels
+ * @num_chans: The number of entries in @channels
+ * @desc: Reference to the transport operations
+ * @tx_max_msg: Max number of in-flight messages allowed by the transport
+ *
+ * This function prepare the SCMI Raw stack and creates the debugfs API.
+ *
+ * Return: An opaque handle to the Raw instance on Success, an ERR_PTR otherwise
+ */
+void *scmi_raw_mode_init(const struct scmi_handle *handle,
+ struct dentry *top_dentry, int instance_id,
+ u8 *channels, int num_chans,
+ const struct scmi_desc *desc, int tx_max_msg)
+{
+ int ret;
+ struct scmi_raw_mode_info *raw;
+ struct device *dev;
+
+ if (!handle || !desc)
+ return ERR_PTR(-EINVAL);
+
+ dev = handle->dev;
+ raw = devm_kzalloc(dev, sizeof(*raw), GFP_KERNEL);
+ if (!raw)
+ return ERR_PTR(-ENOMEM);
+
+ raw->handle = handle;
+ raw->desc = desc;
+ raw->tx_max_msg = tx_max_msg;
+ raw->id = instance_id;
+
+ ret = scmi_raw_mode_setup(raw, channels, num_chans);
+ if (ret) {
+ devm_kfree(dev, raw);
+ return ERR_PTR(ret);
+ }
+
+ raw->dentry = debugfs_create_dir("raw", top_dentry);
+
+ debugfs_create_file("reset", 0200, raw->dentry, raw,
+ &scmi_dbg_raw_mode_reset_fops);
+
+ debugfs_create_file("message", 0600, raw->dentry, raw,
+ &scmi_dbg_raw_mode_message_fops);
+
+ debugfs_create_file("message_async", 0600, raw->dentry, raw,
+ &scmi_dbg_raw_mode_message_async_fops);
+
+ debugfs_create_file("notification", 0400, raw->dentry, raw,
+ &scmi_dbg_raw_mode_notification_fops);
+
+ debugfs_create_file("errors", 0400, raw->dentry, raw,
+ &scmi_dbg_raw_mode_errors_fops);
+
+ /*
+ * Expose per-channel entries if multiple channels available.
+ * Just ignore errors while setting up these interfaces since we
+ * have anyway already a working core Raw support.
+ */
+ if (num_chans > 1) {
+ int i;
+ struct dentry *top_chans;
+
+ top_chans = debugfs_create_dir("channels", raw->dentry);
+
+ for (i = 0; i < num_chans; i++) {
+ char cdir[8];
+ struct dentry *chd;
+
+ snprintf(cdir, 8, "0x%02X", channels[i]);
+ chd = debugfs_create_dir(cdir, top_chans);
+
+ debugfs_create_file("message", 0600, chd, raw,
+ &scmi_dbg_raw_mode_message_fops);
+
+ debugfs_create_file("message_async", 0600, chd, raw,
+ &scmi_dbg_raw_mode_message_async_fops);
+ }
+ }
+
+ dev_info(dev, "SCMI RAW Mode initialized for instance %d\n", raw->id);
+
+ return raw;
+}
+
+/**
+ * scmi_raw_mode_cleanup - Function to cleanup the SCMI Raw stack
+ *
+ * @r: An opaque handle to an initialized SCMI Raw instance
+ */
+void scmi_raw_mode_cleanup(void *r)
+{
+ struct scmi_raw_mode_info *raw = r;
+
+ if (!raw)
+ return;
+
+ debugfs_remove_recursive(raw->dentry);
+
+ cancel_work_sync(&raw->waiters_work);
+ destroy_workqueue(raw->wait_wq);
+ xa_destroy(&raw->chans_q);
+}
+
+static int scmi_xfer_raw_collect(void *msg, size_t *msg_len,
+ struct scmi_xfer *xfer)
+{
+ __le32 *m;
+ size_t msg_size;
+
+ if (!xfer || !msg || !msg_len)
+ return -EINVAL;
+
+ /* Account for hdr ...*/
+ msg_size = xfer->rx.len + sizeof(u32);
+ /* ... and status if needed */
+ if (xfer->hdr.type != MSG_TYPE_NOTIFICATION)
+ msg_size += sizeof(u32);
+
+ if (msg_size > *msg_len)
+ return -ENOSPC;
+
+ m = msg;
+ *m = cpu_to_le32(pack_scmi_header(&xfer->hdr));
+ if (xfer->hdr.type != MSG_TYPE_NOTIFICATION)
+ *++m = cpu_to_le32(xfer->hdr.status);
+
+ memcpy(++m, xfer->rx.buf, xfer->rx.len);
+
+ *msg_len = msg_size;
+
+ return 0;
+}
+
+/**
+ * scmi_raw_message_report - Helper to report back valid reponses/notifications
+ * to raw message requests.
+ *
+ * @r: An opaque reference to the raw instance configuration
+ * @xfer: The xfer containing the message to be reported
+ * @idx: The index of the queue.
+ * @chan_id: The channel ID to use.
+ *
+ * If Raw mode is enabled, this is called from the SCMI core on the regular RX
+ * path to save and enqueue the response/notification payload carried by this
+ * xfer into a dedicated scmi_raw_buffer for later consumption by the user.
+ *
+ * This way the caller can free the related xfer immediately afterwards and the
+ * user can read back the raw message payload at its own pace (if ever) without
+ * holding an xfer for too long.
+ */
+void scmi_raw_message_report(void *r, struct scmi_xfer *xfer,
+ unsigned int idx, unsigned int chan_id)
+{
+ int ret;
+ unsigned long flags;
+ struct scmi_raw_buffer *rb;
+ struct device *dev;
+ struct scmi_raw_queue *q;
+ struct scmi_raw_mode_info *raw = r;
+
+ if (!raw || (idx == SCMI_RAW_REPLY_QUEUE && !SCMI_XFER_IS_RAW(xfer)))
+ return;
+
+ dev = raw->handle->dev;
+ q = scmi_raw_queue_select(raw, idx,
+ SCMI_XFER_IS_CHAN_SET(xfer) ? chan_id : 0);
+
+ /*
+ * Grab the msg_q_lock upfront to avoid a possible race between
+ * realizing the free list was empty and effectively picking the next
+ * buffer to use from the oldest one enqueued and still unread on this
+ * msg_q.
+ *
+ * Note that nowhere else these locks are taken together, so no risk of
+ * deadlocks du eto inversion.
+ */
+ spin_lock_irqsave(&q->msg_q_lock, flags);
+ rb = scmi_raw_buffer_get(q);
+ if (!rb) {
+ /*
+ * Immediate and delayed replies to previously injected Raw
+ * commands MUST be read back from userspace to free the buffers:
+ * if this is not happening something is seriously broken and
+ * must be fixed at the application level: complain loudly.
+ */
+ if (idx == SCMI_RAW_REPLY_QUEUE) {
+ spin_unlock_irqrestore(&q->msg_q_lock, flags);
+ dev_warn(dev,
+ "RAW[%d] - Buffers exhausted. Dropping report.\n",
+ idx);
+ return;
+ }
+
+ /*
+ * Notifications and errors queues are instead handled in a
+ * circular manner: unread old buffers are just overwritten by
+ * newer ones.
+ *
+ * The main reason for this is that notifications originated
+ * by Raw requests cannot be distinguished from normal ones, so
+ * your Raw buffers queues risk to be flooded and depleted by
+ * notifications if you left it mistakenly enabled or when in
+ * coexistence mode.
+ */
+ rb = scmi_raw_buffer_dequeue_unlocked(q);
+ if (WARN_ON(!rb)) {
+ spin_unlock_irqrestore(&q->msg_q_lock, flags);
+ return;
+ }
+
+ /* Reset to full buffer length */
+ rb->msg.len = rb->max_len;
+
+ dev_warn_once(dev,
+ "RAW[%d] - Buffers exhausted. Re-using oldest.\n",
+ idx);
+ }
+ spin_unlock_irqrestore(&q->msg_q_lock, flags);
+
+ ret = scmi_xfer_raw_collect(rb->msg.buf, &rb->msg.len, xfer);
+ if (ret) {
+ dev_warn(dev, "RAW - Cannot collect xfer into buffer !\n");
+ scmi_raw_buffer_put(q, rb);
+ return;
+ }
+
+ scmi_raw_buffer_enqueue(q, rb);
+}
+
+static void scmi_xfer_raw_fill(struct scmi_raw_mode_info *raw,
+ struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer, u32 msg_hdr)
+{
+ /* Unpack received HDR as it is */
+ unpack_scmi_header(msg_hdr, &xfer->hdr);
+ xfer->hdr.seq = MSG_XTRACT_TOKEN(msg_hdr);
+
+ memset(xfer->rx.buf, 0x00, xfer->rx.len);
+
+ raw->desc->ops->fetch_response(cinfo, xfer);
+}
+
+/**
+ * scmi_raw_error_report - Helper to report back timed-out or generally
+ * unexpected replies.
+ *
+ * @r: An opaque reference to the raw instance configuration
+ * @cinfo: A reference to the channel to use to retrieve the broken xfer
+ * @msg_hdr: The SCMI message header of the message to fetch and report
+ * @priv: Any private data related to the xfer.
+ *
+ * If Raw mode is enabled, this is called from the SCMI core on the RX path in
+ * case of errors to save and enqueue the bad message payload carried by the
+ * message that has just been received.
+ *
+ * Note that we have to manually fetch any available payload into a temporary
+ * xfer to be able to save and enqueue the message, since the regular RX error
+ * path which had called this would have not fetched the message payload having
+ * classified it as an error.
+ */
+void scmi_raw_error_report(void *r, struct scmi_chan_info *cinfo,
+ u32 msg_hdr, void *priv)
+{
+ struct scmi_xfer xfer;
+ struct scmi_raw_mode_info *raw = r;
+
+ if (!raw)
+ return;
+
+ xfer.rx.len = raw->desc->max_msg_size;
+ xfer.rx.buf = kzalloc(xfer.rx.len, GFP_ATOMIC);
+ if (!xfer.rx.buf) {
+ dev_info(raw->handle->dev,
+ "Cannot report Raw error for HDR:0x%X - ENOMEM\n",
+ msg_hdr);
+ return;
+ }
+
+ /* Any transport-provided priv must be passed back down to transport */
+ if (priv)
+ /* Ensure priv is visible */
+ smp_store_mb(xfer.priv, priv);
+
+ scmi_xfer_raw_fill(raw, cinfo, &xfer, msg_hdr);
+ scmi_raw_message_report(raw, &xfer, SCMI_RAW_ERRS_QUEUE, 0);
+
+ kfree(xfer.rx.buf);
+}
diff --git a/drivers/firmware/arm_scmi/raw_mode.h b/drivers/firmware/arm_scmi/raw_mode.h
new file mode 100644
index 000000000000..8af756a83fd1
--- /dev/null
+++ b/drivers/firmware/arm_scmi/raw_mode.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * System Control and Management Interface (SCMI) Message Protocol
+ * Raw mode support header.
+ *
+ * Copyright (C) 2022 ARM Ltd.
+ */
+#ifndef _SCMI_RAW_MODE_H
+#define _SCMI_RAW_MODE_H
+
+#include "common.h"
+
+enum {
+ SCMI_RAW_REPLY_QUEUE,
+ SCMI_RAW_NOTIF_QUEUE,
+ SCMI_RAW_ERRS_QUEUE,
+ SCMI_RAW_MAX_QUEUE
+};
+
+void *scmi_raw_mode_init(const struct scmi_handle *handle,
+ struct dentry *top_dentry, int instance_id,
+ u8 *channels, int num_chans,
+ const struct scmi_desc *desc, int tx_max_msg);
+void scmi_raw_mode_cleanup(void *raw);
+
+void scmi_raw_message_report(void *raw, struct scmi_xfer *xfer,
+ unsigned int idx, unsigned int chan_id);
+void scmi_raw_error_report(void *raw, struct scmi_chan_info *cinfo,
+ u32 msg_hdr, void *priv);
+
+#endif /* _SCMI_RAW_MODE_H */
diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c
index 1dfe534b8518..87b4f4d35f06 100644
--- a/drivers/firmware/arm_scmi/shmem.c
+++ b/drivers/firmware/arm_scmi/shmem.c
@@ -81,10 +81,11 @@ u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem)
void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer)
{
+ size_t len = ioread32(&shmem->length);
+
xfer->hdr.status = ioread32(shmem->msg_payload);
/* Skip the length of header and status in shmem area i.e 8 bytes */
- xfer->rx.len = min_t(size_t, xfer->rx.len,
- ioread32(&shmem->length) - 8);
+ xfer->rx.len = min_t(size_t, xfer->rx.len, len > 8 ? len - 8 : 0);
/* Take a copy to the rx buffer.. */
memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len);
@@ -93,8 +94,10 @@ void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
size_t max_len, struct scmi_xfer *xfer)
{
+ size_t len = ioread32(&shmem->length);
+
/* Skip only the length of header in shmem area i.e 4 bytes */
- xfer->rx.len = min_t(size_t, max_len, ioread32(&shmem->length) - 4);
+ xfer->rx.len = min_t(size_t, max_len, len > 4 ? len - 4 : 0);
/* Take a copy to the rx buffer.. */
memcpy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len);
diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c
index 87a7b13cf868..93272e4bbd12 100644
--- a/drivers/firmware/arm_scmi/smc.c
+++ b/drivers/firmware/arm_scmi/smc.c
@@ -52,9 +52,9 @@ static irqreturn_t smc_msg_done_isr(int irq, void *data)
return IRQ_HANDLED;
}
-static bool smc_chan_available(struct device *dev, int idx)
+static bool smc_chan_available(struct device_node *of_node, int idx)
{
- struct device_node *np = of_parse_phandle(dev->of_node, "shmem", 0);
+ struct device_node *np = of_parse_phandle(of_node, "shmem", 0);
if (!np)
return false;
@@ -171,8 +171,6 @@ static int smc_chan_free(int id, void *p, void *data)
cinfo->transport_info = NULL;
scmi_info->cinfo = NULL;
- scmi_free_channel(cinfo, data, id);
-
return 0;
}
diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c
index 33c9b81a55cd..d68c01cb7aa0 100644
--- a/drivers/firmware/arm_scmi/virtio.c
+++ b/drivers/firmware/arm_scmi/virtio.c
@@ -160,7 +160,6 @@ static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch)
}
vioch->shutdown_done = &vioch_shutdown_done;
- virtio_break_device(vioch->vqueue->vdev);
if (!vioch->is_rx && vioch->deferred_tx_wq)
/* Cannot be kicked anymore after this...*/
vioch->deferred_tx_wq = NULL;
@@ -386,7 +385,7 @@ static int virtio_link_supplier(struct device *dev)
return 0;
}
-static bool virtio_chan_available(struct device *dev, int idx)
+static bool virtio_chan_available(struct device_node *of_node, int idx)
{
struct scmi_vio_channel *channels, *vioch = NULL;
@@ -482,10 +481,14 @@ static int virtio_chan_free(int id, void *p, void *data)
struct scmi_chan_info *cinfo = p;
struct scmi_vio_channel *vioch = cinfo->transport_info;
+ /*
+ * Break device to inhibit further traffic flowing while shutting down
+ * the channels: doing it later holding vioch->lock creates unsafe
+ * locking dependency chains as reported by LOCKDEP.
+ */
+ virtio_break_device(vioch->vqueue->vdev);
scmi_vio_channel_cleanup_sync(vioch);
- scmi_free_channel(cinfo, data, id);
-
return 0;
}
diff --git a/drivers/firmware/dmi-sysfs.c b/drivers/firmware/dmi-sysfs.c
index 66727ad3361b..ed5aff0a4204 100644
--- a/drivers/firmware/dmi-sysfs.c
+++ b/drivers/firmware/dmi-sysfs.c
@@ -418,10 +418,10 @@ static ssize_t dmi_sel_raw_read_helper(struct dmi_sysfs_entry *entry,
return dmi_sel_raw_read_phys32(entry, &sel, state->buf,
state->pos, state->count);
case DMI_SEL_ACCESS_METHOD_GPNV:
- pr_info("dmi-sysfs: GPNV support missing.\n");
+ pr_info_ratelimited("dmi-sysfs: GPNV support missing.\n");
return -EIO;
default:
- pr_info("dmi-sysfs: Unknown access method %02x\n",
+ pr_info_ratelimited("dmi-sysfs: Unknown access method %02x\n",
sel.access_method);
return -EIO;
}
@@ -603,16 +603,16 @@ static void __init dmi_sysfs_register_handle(const struct dmi_header *dh,
*ret = kobject_init_and_add(&entry->kobj, &dmi_sysfs_entry_ktype, NULL,
"%d-%d", dh->type, entry->instance);
- if (*ret) {
- kobject_put(&entry->kobj);
- return;
- }
-
/* Thread on the global list for cleanup */
spin_lock(&entry_list_lock);
list_add_tail(&entry->list, &entry_list);
spin_unlock(&entry_list_lock);
+ if (*ret) {
+ kobject_put(&entry->kobj);
+ return;
+ }
+
/* Handle specializations by type */
switch (dh->type) {
case DMI_ENTRY_SYSTEM_EVENT_LOG:
diff --git a/drivers/firmware/efi/cper_cxl.c b/drivers/firmware/efi/cper_cxl.c
index 53e435c4f310..a55771b99a97 100644
--- a/drivers/firmware/efi/cper_cxl.c
+++ b/drivers/firmware/efi/cper_cxl.c
@@ -9,7 +9,6 @@
#include <linux/cper.h>
#include "cper_cxl.h"
-#include <linux/cxl_err.h>
#define PROT_ERR_VALID_AGENT_TYPE BIT_ULL(0)
#define PROT_ERR_VALID_AGENT_ADDRESS BIT_ULL(1)
@@ -19,6 +18,17 @@
#define PROT_ERR_VALID_DVSEC BIT_ULL(5)
#define PROT_ERR_VALID_ERROR_LOG BIT_ULL(6)
+/* CXL RAS Capability Structure, CXL v3.0 sec 8.2.4.16 */
+struct cxl_ras_capability_regs {
+ u32 uncor_status;
+ u32 uncor_mask;
+ u32 uncor_severity;
+ u32 cor_status;
+ u32 cor_mask;
+ u32 cap_control;
+ u32 header_log[16];
+};
+
static const char * const prot_err_agent_type_strs[] = {
"Restricted CXL Device",
"Restricted CXL Host Downstream Port",
diff --git a/drivers/firmware/efi/earlycon.c b/drivers/firmware/efi/earlycon.c
index 4d6c5327471a..f54e6fdf08e2 100644
--- a/drivers/firmware/efi/earlycon.c
+++ b/drivers/firmware/efi/earlycon.c
@@ -10,11 +10,14 @@
#include <linux/kernel.h>
#include <linux/serial_core.h>
#include <linux/screen_info.h>
+#include <linux/string.h>
#include <asm/early_ioremap.h>
static const struct console *earlycon_console __initdata;
static const struct font_desc *font;
+static u16 cur_line_y, max_line_y;
+static u32 efi_x_array[1024];
static u32 efi_x, efi_y;
static u64 fb_base;
static bool fb_wb;
@@ -85,9 +88,17 @@ static void efi_earlycon_clear_scanline(unsigned int y)
static void efi_earlycon_scroll_up(void)
{
unsigned long *dst, *src;
+ u16 maxlen = 0;
u16 len;
u32 i, height;
+ /* Find the cached maximum x coordinate */
+ for (i = 0; i < max_line_y; i++) {
+ if (efi_x_array[i] > maxlen)
+ maxlen = efi_x_array[i];
+ }
+ maxlen *= 4;
+
len = screen_info.lfb_linelength;
height = screen_info.lfb_height;
@@ -102,7 +113,7 @@ static void efi_earlycon_scroll_up(void)
return;
}
- memmove(dst, src, len);
+ memmove(dst, src, maxlen);
efi_earlycon_unmap(src, len);
efi_earlycon_unmap(dst, len);
@@ -135,6 +146,7 @@ static void
efi_earlycon_write(struct console *con, const char *str, unsigned int num)
{
struct screen_info *si;
+ u32 cur_efi_x = efi_x;
unsigned int len;
const char *s;
void *dst;
@@ -143,16 +155,10 @@ efi_earlycon_write(struct console *con, const char *str, unsigned int num)
len = si->lfb_linelength;
while (num) {
- unsigned int linemax;
- unsigned int h, count = 0;
-
- for (s = str; *s && *s != '\n'; s++) {
- if (count == num)
- break;
- count++;
- }
+ unsigned int linemax = (si->lfb_width - efi_x) / font->width;
+ unsigned int h, count;
- linemax = (si->lfb_width - efi_x) / font->width;
+ count = strnchrnul(str, num, '\n') - str;
if (count > linemax)
count = linemax;
@@ -181,6 +187,7 @@ efi_earlycon_write(struct console *con, const char *str, unsigned int num)
str += count;
if (num > 0 && *s == '\n') {
+ cur_efi_x = efi_x;
efi_x = 0;
efi_y += font->height;
str++;
@@ -188,6 +195,7 @@ efi_earlycon_write(struct console *con, const char *str, unsigned int num)
}
if (efi_x + font->width > si->lfb_width) {
+ cur_efi_x = efi_x;
efi_x = 0;
efi_y += font->height;
}
@@ -195,6 +203,9 @@ efi_earlycon_write(struct console *con, const char *str, unsigned int num)
if (efi_y + font->height > si->lfb_height) {
u32 i;
+ efi_x_array[cur_line_y] = cur_efi_x;
+ cur_line_y = (cur_line_y + 1) % max_line_y;
+
efi_y -= font->height;
efi_earlycon_scroll_up();
@@ -235,7 +246,15 @@ static int __init efi_earlycon_setup(struct earlycon_device *device,
if (!font)
return -ENODEV;
- efi_y = rounddown(yres, font->height) - font->height;
+ /* Fill the cache with maximum possible value of x coordinate */
+ memset32(efi_x_array, rounddown(xres, font->width), ARRAY_SIZE(efi_x_array));
+ efi_y = rounddown(yres, font->height);
+
+ /* Make sure we have cache for the x coordinate for the full screen */
+ max_line_y = efi_y / font->height + 1;
+ cur_line_y = 0;
+
+ efi_y -= font->height;
for (i = 0; i < (yres - efi_y) / font->height; i++)
efi_earlycon_scroll_up();
diff --git a/drivers/firmware/efi/efi-init.c b/drivers/firmware/efi/efi-init.c
index 1639159493e3..2c16080e1f71 100644
--- a/drivers/firmware/efi/efi-init.c
+++ b/drivers/firmware/efi/efi-init.c
@@ -92,7 +92,7 @@ static int __init uefi_init(u64 efi_system_table)
if (IS_ENABLED(CONFIG_64BIT))
set_bit(EFI_64BIT, &efi.flags);
- retval = efi_systab_check_header(&systab->hdr, 2);
+ retval = efi_systab_check_header(&systab->hdr);
if (retval)
goto out;
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 09716eebe8ac..abeff7dc0b58 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -187,8 +187,27 @@ static const struct attribute_group efi_subsys_attr_group = {
static struct efivars generic_efivars;
static struct efivar_operations generic_ops;
+static bool generic_ops_supported(void)
+{
+ unsigned long name_size;
+ efi_status_t status;
+ efi_char16_t name;
+ efi_guid_t guid;
+
+ name_size = sizeof(name);
+
+ status = efi.get_next_variable(&name_size, &name, &guid);
+ if (status == EFI_UNSUPPORTED)
+ return false;
+
+ return true;
+}
+
static int generic_ops_register(void)
{
+ if (!generic_ops_supported())
+ return 0;
+
generic_ops.get_variable = efi.get_variable;
generic_ops.get_next_variable = efi.get_next_variable;
generic_ops.query_variable_store = efi_query_variable_store;
@@ -197,11 +216,14 @@ static int generic_ops_register(void)
generic_ops.set_variable = efi.set_variable;
generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
}
- return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
+ return efivars_register(&generic_efivars, &generic_ops);
}
static void generic_ops_unregister(void)
{
+ if (!generic_ops.get_variable)
+ return;
+
efivars_unregister(&generic_efivars);
}
@@ -394,8 +416,8 @@ static int __init efisubsys_init(void)
efi_kobj = kobject_create_and_add("efi", firmware_kobj);
if (!efi_kobj) {
pr_err("efi: Firmware registration failed.\n");
- destroy_workqueue(efi_rts_wq);
- return -ENOMEM;
+ error = -ENOMEM;
+ goto err_destroy_wq;
}
if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
@@ -443,7 +465,10 @@ err_unregister:
err_put:
kobject_put(efi_kobj);
efi_kobj = NULL;
- destroy_workqueue(efi_rts_wq);
+err_destroy_wq:
+ if (efi_rts_wq)
+ destroy_workqueue(efi_rts_wq);
+
return error;
}
@@ -478,7 +503,7 @@ void __init efi_find_mirror(void)
* and if so, populate the supplied memory descriptor with the appropriate
* data.
*/
-int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
+int __efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
{
efi_memory_desc_t *md;
@@ -496,6 +521,12 @@ int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
u64 size;
u64 end;
+ /* skip bogus entries (including empty ones) */
+ if ((md->phys_addr & (EFI_PAGE_SIZE - 1)) ||
+ (md->num_pages <= 0) ||
+ (md->num_pages > (U64_MAX - md->phys_addr) >> EFI_PAGE_SHIFT))
+ continue;
+
size = md->num_pages << EFI_PAGE_SHIFT;
end = md->phys_addr + size;
if (phys_addr >= md->phys_addr && phys_addr < end) {
@@ -506,6 +537,9 @@ int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
return -ENOENT;
}
+extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
+ __weak __alias(__efi_mem_desc_lookup);
+
/*
* Calculate the highest address of an efi memory descriptor.
*/
@@ -532,6 +566,10 @@ void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {}
*/
void __init efi_mem_reserve(phys_addr_t addr, u64 size)
{
+ /* efi_mem_reserve() does not work under Xen */
+ if (WARN_ON_ONCE(efi_enabled(EFI_PARAVIRT)))
+ return;
+
if (!memblock_is_region_reserved(addr, size))
memblock_reserve(addr, size);
@@ -580,13 +618,20 @@ static __init int match_config_table(const efi_guid_t *guid,
int i;
for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
- if (!efi_guidcmp(*guid, table_types[i].guid)) {
- *(table_types[i].ptr) = table;
+ if (efi_guidcmp(*guid, table_types[i].guid))
+ continue;
+
+ if (!efi_config_table_is_usable(guid, table)) {
if (table_types[i].name[0])
- pr_cont("%s=0x%lx ",
+ pr_cont("(%s=0x%lx unusable) ",
table_types[i].name, table);
return 1;
}
+
+ *(table_types[i].ptr) = table;
+ if (table_types[i].name[0])
+ pr_cont("%s=0x%lx ", table_types[i].name, table);
+ return 1;
}
return 0;
@@ -717,20 +762,13 @@ int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
return 0;
}
-int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr,
- int min_major_version)
+int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr)
{
if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) {
pr_err("System table signature incorrect!\n");
return -EINVAL;
}
- if ((systab_hdr->revision >> 16) < min_major_version)
- pr_err("Warning: System table version %d.%02d, expected %d.00 or greater!\n",
- systab_hdr->revision >> 16,
- systab_hdr->revision & 0xffff,
- min_major_version);
-
return 0;
}
@@ -761,6 +799,7 @@ void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
char vendor[100] = "unknown";
const efi_char16_t *c16;
size_t i;
+ u16 rev;
c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t));
if (c16) {
@@ -771,10 +810,14 @@ void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t));
}
- pr_info("EFI v%u.%.02u by %s\n",
- systab_hdr->revision >> 16,
- systab_hdr->revision & 0xffff,
- vendor);
+ rev = (u16)systab_hdr->revision;
+ pr_info("EFI v%u.%u", systab_hdr->revision >> 16, rev / 10);
+
+ rev %= 10;
+ if (rev)
+ pr_cont(".%u", rev);
+
+ pr_cont(" by %s\n", vendor);
if (IS_ENABLED(CONFIG_X86_64) &&
systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION &&
@@ -1004,6 +1047,8 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
/* first try to find a slot in an existing linked list entry */
for (prsv = efi_memreserve_root->next; prsv; ) {
rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
+ if (!rsv)
+ return -ENOMEM;
index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
if (index < rsv->size) {
rsv->entry[index].base = addr;
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
index 2a2f52b017e7..87729c365be1 100644
--- a/drivers/firmware/efi/esrt.c
+++ b/drivers/firmware/efi/esrt.c
@@ -247,7 +247,7 @@ void __init efi_esrt_init(void)
int rc;
phys_addr_t end;
- if (!efi_enabled(EFI_MEMMAP))
+ if (!efi_enabled(EFI_MEMMAP) && !efi_enabled(EFI_PARAVIRT))
return;
pr_debug("esrt-init: loading.\n");
@@ -258,20 +258,15 @@ void __init efi_esrt_init(void)
if (rc < 0 ||
(!(md.attribute & EFI_MEMORY_RUNTIME) &&
md.type != EFI_BOOT_SERVICES_DATA &&
- md.type != EFI_RUNTIME_SERVICES_DATA)) {
+ md.type != EFI_RUNTIME_SERVICES_DATA &&
+ md.type != EFI_ACPI_RECLAIM_MEMORY &&
+ md.type != EFI_ACPI_MEMORY_NVS)) {
pr_warn("ESRT header is not in the memory map.\n");
return;
}
- max = efi_mem_desc_end(&md);
- if (max < efi.esrt) {
- pr_err("EFI memory descriptor is invalid. (esrt: %p max: %p)\n",
- (void *)efi.esrt, (void *)max);
- return;
- }
-
+ max = efi_mem_desc_end(&md) - efi.esrt;
size = sizeof(*esrt);
- max -= efi.esrt;
if (max < size) {
pr_err("ESRT header doesn't fit on single memory map entry. (size: %zu max: %zu)\n",
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index be8b8c6e8b40..80d85a5169fb 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -87,7 +87,7 @@ lib-$(CONFIG_EFI_GENERIC_STUB) += efi-stub.o string.o intrinsics.o systable.o \
screen_info.o efi-stub-entry.o
lib-$(CONFIG_ARM) += arm32-stub.o
-lib-$(CONFIG_ARM64) += arm64.o arm64-stub.o arm64-entry.o smbios.o
+lib-$(CONFIG_ARM64) += arm64.o arm64-stub.o smbios.o
lib-$(CONFIG_X86) += x86-stub.o
lib-$(CONFIG_RISCV) += riscv.o riscv-stub.o
lib-$(CONFIG_LOONGARCH) += loongarch.o loongarch-stub.o
@@ -141,7 +141,7 @@ STUBCOPY_RELOC-$(CONFIG_ARM) := R_ARM_ABS
#
STUBCOPY_FLAGS-$(CONFIG_ARM64) += --prefix-alloc-sections=.init \
--prefix-symbols=__efistub_
-STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS64
+STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS
# For RISC-V, we don't need anything special other than arm64. Keep all the
# symbols in .init section and make sure that no absolute symbols references
diff --git a/drivers/firmware/efi/libstub/arm64-entry.S b/drivers/firmware/efi/libstub/arm64-entry.S
deleted file mode 100644
index b5c17e89a4fc..000000000000
--- a/drivers/firmware/efi/libstub/arm64-entry.S
+++ /dev/null
@@ -1,67 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * EFI entry point.
- *
- * Copyright (C) 2013, 2014 Red Hat, Inc.
- * Author: Mark Salter <msalter@redhat.com>
- */
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-
- /*
- * The entrypoint of a arm64 bare metal image is at offset #0 of the
- * image, so this is a reasonable default for primary_entry_offset.
- * Only when the EFI stub is integrated into the core kernel, it is not
- * guaranteed that the PE/COFF header has been copied to memory too, so
- * in this case, primary_entry_offset should be overridden by the
- * linker and point to primary_entry() directly.
- */
- .weak primary_entry_offset
-
-SYM_CODE_START(efi_enter_kernel)
- /*
- * efi_pe_entry() will have copied the kernel image if necessary and we
- * end up here with device tree address in x1 and the kernel entry
- * point stored in x0. Save those values in registers which are
- * callee preserved.
- */
- ldr w2, =primary_entry_offset
- add x19, x0, x2 // relocated Image entrypoint
-
- mov x0, x1 // DTB address
- mov x1, xzr
- mov x2, xzr
- mov x3, xzr
-
- /*
- * Clean the remainder of this routine to the PoC
- * so that we can safely disable the MMU and caches.
- */
- adr x4, 1f
- dc civac, x4
- dsb sy
-
- /* Turn off Dcache and MMU */
- mrs x4, CurrentEL
- cmp x4, #CurrentEL_EL2
- mrs x4, sctlr_el1
- b.ne 0f
- mrs x4, sctlr_el2
-0: bic x4, x4, #SCTLR_ELx_M
- bic x4, x4, #SCTLR_ELx_C
- b.eq 1f
- b 2f
-
- .balign 32
-1: pre_disable_mmu_workaround
- msr sctlr_el2, x4
- isb
- br x19 // jump to kernel entrypoint
-
-2: pre_disable_mmu_workaround
- msr sctlr_el1, x4
- isb
- br x19 // jump to kernel entrypoint
-
- .org 1b + 32
-SYM_CODE_END(efi_enter_kernel)
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index 7327b98d8e3f..d4a6b12a8741 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -58,7 +58,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
efi_handle_t image_handle)
{
efi_status_t status;
- unsigned long kernel_size, kernel_memsize = 0;
+ unsigned long kernel_size, kernel_codesize, kernel_memsize;
u32 phys_seed = 0;
u64 min_kimg_align = efi_get_kimg_min_align();
@@ -93,6 +93,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
SEGMENT_ALIGN >> 10);
kernel_size = _edata - _text;
+ kernel_codesize = __inittext_end - _text;
kernel_memsize = kernel_size + (_end - _edata);
*reserve_size = kernel_memsize;
@@ -121,7 +122,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
*/
*image_addr = (u64)_text;
*reserve_size = 0;
- goto clean_image_to_poc;
+ return EFI_SUCCESS;
}
status = efi_allocate_pages_aligned(*reserve_size, reserve_addr,
@@ -137,14 +138,21 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
*image_addr = *reserve_addr;
memcpy((void *)*image_addr, _text, kernel_size);
+ caches_clean_inval_pou(*image_addr, *image_addr + kernel_codesize);
-clean_image_to_poc:
+ return EFI_SUCCESS;
+}
+
+asmlinkage void primary_entry(void);
+
+unsigned long primary_entry_offset(void)
+{
/*
- * Clean the copied Image to the PoC, and ensure it is not shadowed by
- * stale icache entries from before relocation.
+ * When built as part of the kernel, the EFI stub cannot branch to the
+ * kernel proper via the image header, as the PE/COFF header is
+ * strictly not part of the in-memory presentation of the image, only
+ * of the file representation. So instead, we need to jump to the
+ * actual entrypoint in the .text region of the image.
*/
- dcache_clean_poc(*image_addr, *image_addr + kernel_size);
- asm("ic ialluis");
-
- return EFI_SUCCESS;
+ return (char *)primary_entry - _text;
}
diff --git a/drivers/firmware/efi/libstub/arm64.c b/drivers/firmware/efi/libstub/arm64.c
index ff2d18c42ee7..399770266372 100644
--- a/drivers/firmware/efi/libstub/arm64.c
+++ b/drivers/firmware/efi/libstub/arm64.c
@@ -19,10 +19,13 @@ static bool system_needs_vamap(void)
const u8 *type1_family = efi_get_smbios_string(1, family);
/*
- * Ampere Altra machines crash in SetTime() if SetVirtualAddressMap()
- * has not been called prior.
+ * Ampere eMAG, Altra, and Altra Max machines crash in SetTime() if
+ * SetVirtualAddressMap() has not been called prior.
*/
- if (!type1_family || strcmp(type1_family, "Altra"))
+ if (!type1_family || (
+ strcmp(type1_family, "eMAG") &&
+ strcmp(type1_family, "Altra") &&
+ strcmp(type1_family, "Altra Max")))
return false;
efi_warn("Working around broken SetVirtualAddressMap()\n");
@@ -56,6 +59,12 @@ efi_status_t check_platform_features(void)
return EFI_SUCCESS;
}
+#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
+#define DCTYPE "civac"
+#else
+#define DCTYPE "cvau"
+#endif
+
void efi_cache_sync_image(unsigned long image_base,
unsigned long alloc_size,
unsigned long code_size)
@@ -64,13 +73,38 @@ void efi_cache_sync_image(unsigned long image_base,
u64 lsize = 4 << cpuid_feature_extract_unsigned_field(ctr,
CTR_EL0_DminLine_SHIFT);
- do {
- asm("dc civac, %0" :: "r"(image_base));
- image_base += lsize;
- alloc_size -= lsize;
- } while (alloc_size >= lsize);
+ /* only perform the cache maintenance if needed for I/D coherency */
+ if (!(ctr & BIT(CTR_EL0_IDC_SHIFT))) {
+ do {
+ asm("dc " DCTYPE ", %0" :: "r"(image_base));
+ image_base += lsize;
+ code_size -= lsize;
+ } while (code_size >= lsize);
+ }
asm("ic ialluis");
dsb(ish);
isb();
}
+
+unsigned long __weak primary_entry_offset(void)
+{
+ /*
+ * By default, we can invoke the kernel via the branch instruction in
+ * the image header, so offset #0. This will be overridden by the EFI
+ * stub build that is linked into the core kernel, as in that case, the
+ * image header may not have been loaded into memory, or may be mapped
+ * with non-executable permissions.
+ */
+ return 0;
+}
+
+void __noreturn efi_enter_kernel(unsigned long entrypoint,
+ unsigned long fdt_addr,
+ unsigned long fdt_size)
+{
+ void (* __noreturn enter_kernel)(u64, u64, u64, u64);
+
+ enter_kernel = (void *)entrypoint + primary_entry_offset();
+ enter_kernel(fdt_addr, 0, 0, 0);
+}
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index f5a4bdacac64..1e0203d74691 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -651,3 +651,70 @@ efi_status_t efi_wait_for_key(unsigned long usec, efi_input_key_t *key)
return status;
}
+
+/**
+ * efi_remap_image - Remap a loaded image with the appropriate permissions
+ * for code and data
+ *
+ * @image_base: the base of the image in memory
+ * @alloc_size: the size of the area in memory occupied by the image
+ * @code_size: the size of the leading part of the image containing code
+ * and read-only data
+ *
+ * efi_remap_image() uses the EFI memory attribute protocol to remap the code
+ * region of the loaded image read-only/executable, and the remainder
+ * read-write/non-executable. The code region is assumed to start at the base
+ * of the image, and will therefore cover the PE/COFF header as well.
+ */
+void efi_remap_image(unsigned long image_base, unsigned alloc_size,
+ unsigned long code_size)
+{
+ efi_guid_t guid = EFI_MEMORY_ATTRIBUTE_PROTOCOL_GUID;
+ efi_memory_attribute_protocol_t *memattr;
+ efi_status_t status;
+ u64 attr;
+
+ /*
+ * If the firmware implements the EFI_MEMORY_ATTRIBUTE_PROTOCOL, let's
+ * invoke it to remap the text/rodata region of the decompressed image
+ * as read-only and the data/bss region as non-executable.
+ */
+ status = efi_bs_call(locate_protocol, &guid, NULL, (void **)&memattr);
+ if (status != EFI_SUCCESS)
+ return;
+
+ // Get the current attributes for the entire region
+ status = memattr->get_memory_attributes(memattr, image_base,
+ alloc_size, &attr);
+ if (status != EFI_SUCCESS) {
+ efi_warn("Failed to retrieve memory attributes for image region: 0x%lx\n",
+ status);
+ return;
+ }
+
+ // Mark the code region as read-only
+ status = memattr->set_memory_attributes(memattr, image_base, code_size,
+ EFI_MEMORY_RO);
+ if (status != EFI_SUCCESS) {
+ efi_warn("Failed to remap code region read-only\n");
+ return;
+ }
+
+ // If the entire region was already mapped as non-exec, clear the
+ // attribute from the code region. Otherwise, set it on the data
+ // region.
+ if (attr & EFI_MEMORY_XP) {
+ status = memattr->clear_memory_attributes(memattr, image_base,
+ code_size,
+ EFI_MEMORY_XP);
+ if (status != EFI_SUCCESS)
+ efi_warn("Failed to remap code region executable\n");
+ } else {
+ status = memattr->set_memory_attributes(memattr,
+ image_base + code_size,
+ alloc_size - code_size,
+ EFI_MEMORY_XP);
+ if (status != EFI_SUCCESS)
+ efi_warn("Failed to remap data region non-executable\n");
+ }
+}
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 5b8f2c411ed8..6bd3bb86d967 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -442,6 +442,26 @@ union efi_dxe_services_table {
} mixed_mode;
};
+typedef union efi_memory_attribute_protocol efi_memory_attribute_protocol_t;
+
+union efi_memory_attribute_protocol {
+ struct {
+ efi_status_t (__efiapi *get_memory_attributes)(
+ efi_memory_attribute_protocol_t *, efi_physical_addr_t, u64, u64 *);
+
+ efi_status_t (__efiapi *set_memory_attributes)(
+ efi_memory_attribute_protocol_t *, efi_physical_addr_t, u64, u64);
+
+ efi_status_t (__efiapi *clear_memory_attributes)(
+ efi_memory_attribute_protocol_t *, efi_physical_addr_t, u64, u64);
+ };
+ struct {
+ u32 get_memory_attributes;
+ u32 set_memory_attributes;
+ u32 clear_memory_attributes;
+ } mixed_mode;
+};
+
typedef union efi_uga_draw_protocol efi_uga_draw_protocol_t;
union efi_uga_draw_protocol {
@@ -1076,4 +1096,7 @@ struct efi_smbios_type1_record {
const u8 *__efi_get_smbios_string(u8 type, int offset, int recsize);
+void efi_remap_image(unsigned long image_base, unsigned alloc_size,
+ unsigned long code_size);
+
#endif
diff --git a/drivers/firmware/efi/libstub/zboot.c b/drivers/firmware/efi/libstub/zboot.c
index 66be5fdc6b58..ba234e062a1a 100644
--- a/drivers/firmware/efi/libstub/zboot.c
+++ b/drivers/firmware/efi/libstub/zboot.c
@@ -137,6 +137,8 @@ efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab)
efi_cache_sync_image(image_base, alloc_size, code_size);
+ efi_remap_image(image_base, alloc_size, code_size);
+
status = efi_stub_common(handle, image, image_base, cmdline_ptr);
free_image:
diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c
index 0a9aba5f9cef..ab85bf8e165a 100644
--- a/drivers/firmware/efi/memattr.c
+++ b/drivers/firmware/efi/memattr.c
@@ -33,7 +33,7 @@ int __init efi_memattr_init(void)
return -ENOMEM;
}
- if (tbl->version > 1) {
+ if (tbl->version > 2) {
pr_warn("Unexpected EFI Memory Attributes table version %d\n",
tbl->version);
goto unmap;
@@ -129,6 +129,7 @@ int __init efi_memattr_apply_permissions(struct mm_struct *mm,
efi_memattr_perm_setter fn)
{
efi_memory_attributes_table_t *tbl;
+ bool has_bti = false;
int i, ret;
if (tbl_size <= sizeof(*tbl))
@@ -150,6 +151,10 @@ int __init efi_memattr_apply_permissions(struct mm_struct *mm,
return -ENOMEM;
}
+ if (tbl->version > 1 &&
+ (tbl->flags & EFI_MEMORY_ATTRIBUTES_FLAGS_RT_FORWARD_CONTROL_FLOW_GUARD))
+ has_bti = true;
+
if (efi_enabled(EFI_DBG))
pr_info("Processing EFI Memory Attributes table:\n");
@@ -169,7 +174,7 @@ int __init efi_memattr_apply_permissions(struct mm_struct *mm,
efi_md_typeattr_format(buf, sizeof(buf), &md));
if (valid) {
- ret = fn(mm, &md);
+ ret = fn(mm, &md, has_bti);
if (ret)
pr_err("Error updating mappings, skipping subsequent md's\n");
}
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index 7feee3d9c2bf..1fba4e09cdcf 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -62,6 +62,7 @@ struct efi_runtime_work efi_rts_work;
\
if (!efi_enabled(EFI_RUNTIME_SERVICES)) { \
pr_warn_once("EFI Runtime Services are disabled!\n"); \
+ efi_rts_work.status = EFI_DEVICE_ERROR; \
goto exit; \
} \
\
diff --git a/drivers/firmware/efi/sysfb_efi.c b/drivers/firmware/efi/sysfb_efi.c
index 7882d4b3f2be..f06fdacc9bc8 100644
--- a/drivers/firmware/efi/sysfb_efi.c
+++ b/drivers/firmware/efi/sysfb_efi.c
@@ -264,6 +264,14 @@ static const struct dmi_system_id efifb_dmi_swap_width_height[] __initconst = {
"Lenovo ideapad D330-10IGM"),
},
},
+ {
+ /* Lenovo IdeaPad Duet 3 10IGL5 with 1200x1920 portrait screen */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
+ "IdeaPad Duet 3 10IGL5"),
+ },
+ },
{},
};
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index 0ba9f18312f5..bd75b87f5fc1 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -6,6 +6,8 @@
* Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
*/
+#define pr_fmt(fmt) "efivars: " fmt
+
#include <linux/types.h>
#include <linux/sizes.h>
#include <linux/errno.h>
@@ -40,45 +42,47 @@ static efi_status_t check_var_size(bool nonblocking, u32 attributes,
}
/**
- * efivars_kobject - get the kobject for the registered efivars
+ * efivar_is_available - check if efivars is available
*
- * If efivars_register() has not been called we return NULL,
- * otherwise return the kobject used at registration time.
+ * @return true iff evivars is currently registered
*/
-struct kobject *efivars_kobject(void)
+bool efivar_is_available(void)
{
- if (!__efivars)
- return NULL;
-
- return __efivars->kobject;
+ return __efivars != NULL;
}
-EXPORT_SYMBOL_GPL(efivars_kobject);
+EXPORT_SYMBOL_GPL(efivar_is_available);
/**
* efivars_register - register an efivars
* @efivars: efivars to register
* @ops: efivars operations
- * @kobject: @efivars-specific kobject
*
* Only a single efivars can be registered at any time.
*/
int efivars_register(struct efivars *efivars,
- const struct efivar_operations *ops,
- struct kobject *kobject)
+ const struct efivar_operations *ops)
{
+ int rv;
+
if (down_interruptible(&efivars_lock))
return -EINTR;
+ if (__efivars) {
+ pr_warn("efivars already registered\n");
+ rv = -EBUSY;
+ goto out;
+ }
+
efivars->ops = ops;
- efivars->kobject = kobject;
__efivars = efivars;
pr_info("Registered efivars operations\n");
-
+ rv = 0;
+out:
up(&efivars_lock);
- return 0;
+ return rv;
}
EXPORT_SYMBOL_GPL(efivars_register);
@@ -97,7 +101,7 @@ int efivars_unregister(struct efivars *efivars)
return -EINTR;
if (!__efivars) {
- printk(KERN_ERR "efivars not registered\n");
+ pr_err("efivars not registered\n");
rv = -EINVAL;
goto out;
}
@@ -117,7 +121,7 @@ out:
}
EXPORT_SYMBOL_GPL(efivars_unregister);
-int efivar_supports_writes(void)
+bool efivar_supports_writes(void)
{
return __efivars && __efivars->ops->set_variable;
}
diff --git a/drivers/firmware/google/Kconfig b/drivers/firmware/google/Kconfig
index 9f190eab43ed..1bc7cbf2f65d 100644
--- a/drivers/firmware/google/Kconfig
+++ b/drivers/firmware/google/Kconfig
@@ -44,14 +44,6 @@ config GOOGLE_COREBOOT_TABLE
device tree node /firmware/coreboot.
If unsure say N.
-config GOOGLE_COREBOOT_TABLE_ACPI
- tristate
- select GOOGLE_COREBOOT_TABLE
-
-config GOOGLE_COREBOOT_TABLE_OF
- tristate
- select GOOGLE_COREBOOT_TABLE
-
config GOOGLE_MEMCONSOLE
tristate
depends on GOOGLE_MEMCONSOLE_X86_LEGACY || GOOGLE_MEMCONSOLE_COREBOOT
diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c
index 2652c396c423..33ae94745aef 100644
--- a/drivers/firmware/google/coreboot_table.c
+++ b/drivers/firmware/google/coreboot_table.c
@@ -93,14 +93,19 @@ static int coreboot_table_populate(struct device *dev, void *ptr)
for (i = 0; i < header->table_entries; i++) {
entry = ptr_entry;
- device = kzalloc(sizeof(struct device) + entry->size, GFP_KERNEL);
+ if (entry->size < sizeof(*entry)) {
+ dev_warn(dev, "coreboot table entry too small!\n");
+ return -EINVAL;
+ }
+
+ device = kzalloc(sizeof(device->dev) + entry->size, GFP_KERNEL);
if (!device)
return -ENOMEM;
device->dev.parent = dev;
device->dev.bus = &coreboot_bus_type;
device->dev.release = coreboot_device_release;
- memcpy(&device->entry, ptr_entry, entry->size);
+ memcpy(device->raw, ptr_entry, entry->size);
switch (device->entry.tag) {
case LB_TAG_CBMEM_ENTRY:
diff --git a/drivers/firmware/google/coreboot_table.h b/drivers/firmware/google/coreboot_table.h
index 37f4d335a606..d814dca33a08 100644
--- a/drivers/firmware/google/coreboot_table.h
+++ b/drivers/firmware/google/coreboot_table.h
@@ -79,6 +79,7 @@ struct coreboot_device {
struct lb_cbmem_ref cbmem_ref;
struct lb_cbmem_entry cbmem_entry;
struct lb_framebuffer framebuffer;
+ DECLARE_FLEX_ARRAY(u8, raw);
};
};
diff --git a/drivers/firmware/google/framebuffer-coreboot.c b/drivers/firmware/google/framebuffer-coreboot.c
index c6dcc1ef93ac..c323a818805c 100644
--- a/drivers/firmware/google/framebuffer-coreboot.c
+++ b/drivers/firmware/google/framebuffer-coreboot.c
@@ -43,9 +43,7 @@ static int framebuffer_probe(struct coreboot_device *dev)
fb->green_mask_pos == formats[i].green.offset &&
fb->green_mask_size == formats[i].green.length &&
fb->blue_mask_pos == formats[i].blue.offset &&
- fb->blue_mask_size == formats[i].blue.length &&
- fb->reserved_mask_pos == formats[i].transp.offset &&
- fb->reserved_mask_size == formats[i].transp.length)
+ fb->blue_mask_size == formats[i].blue.length)
pdata.format = formats[i].name;
}
if (!pdata.format)
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index 4e2575dfeb90..96ea1fa76d35 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -361,9 +361,10 @@ static efi_status_t gsmi_get_variable(efi_char16_t *name,
memcpy(data, gsmi_dev.data_buf->start, *data_size);
/* All variables are have the following attributes */
- *attr = EFI_VARIABLE_NON_VOLATILE |
- EFI_VARIABLE_BOOTSERVICE_ACCESS |
- EFI_VARIABLE_RUNTIME_ACCESS;
+ if (attr)
+ *attr = EFI_VARIABLE_NON_VOLATILE |
+ EFI_VARIABLE_BOOTSERVICE_ACCESS |
+ EFI_VARIABLE_RUNTIME_ACCESS;
}
spin_unlock_irqrestore(&gsmi_dev.lock, flags);
@@ -1029,7 +1030,7 @@ static __init int gsmi_init(void)
}
#ifdef CONFIG_EFI
- ret = efivars_register(&efivars, &efivar_ops, gsmi_kobj);
+ ret = efivars_register(&efivars, &efivar_ops);
if (ret) {
printk(KERN_INFO "gsmi: Failed to register efivars\n");
sysfs_remove_files(gsmi_kobj, gsmi_attrs);
diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c
index 77aa5c6398aa..3f5ff9ed668e 100644
--- a/drivers/firmware/meson/meson_sm.c
+++ b/drivers/firmware/meson/meson_sm.c
@@ -82,7 +82,7 @@ static void __iomem *meson_sm_map_shmem(u32 cmd_shmem, unsigned int size)
sm_phy_base = __meson_sm_call(cmd_shmem, 0, 0, 0, 0, 0);
if (!sm_phy_base)
- return 0;
+ return NULL;
return ioremap_cache(sm_phy_base, size);
}
diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
index e7bcfca4159f..29619f49873a 100644
--- a/drivers/firmware/psci/psci.c
+++ b/drivers/firmware/psci/psci.c
@@ -108,9 +108,10 @@ bool psci_power_state_is_valid(u32 state)
return !(state & ~valid_mask);
}
-static unsigned long __invoke_psci_fn_hvc(unsigned long function_id,
- unsigned long arg0, unsigned long arg1,
- unsigned long arg2)
+static __always_inline unsigned long
+__invoke_psci_fn_hvc(unsigned long function_id,
+ unsigned long arg0, unsigned long arg1,
+ unsigned long arg2)
{
struct arm_smccc_res res;
@@ -118,9 +119,10 @@ static unsigned long __invoke_psci_fn_hvc(unsigned long function_id,
return res.a0;
}
-static unsigned long __invoke_psci_fn_smc(unsigned long function_id,
- unsigned long arg0, unsigned long arg1,
- unsigned long arg2)
+static __always_inline unsigned long
+__invoke_psci_fn_smc(unsigned long function_id,
+ unsigned long arg0, unsigned long arg1,
+ unsigned long arg2)
{
struct arm_smccc_res res;
@@ -128,7 +130,7 @@ static unsigned long __invoke_psci_fn_smc(unsigned long function_id,
return res.a0;
}
-static int psci_to_linux_errno(int errno)
+static __always_inline int psci_to_linux_errno(int errno)
{
switch (errno) {
case PSCI_RET_SUCCESS:
@@ -169,7 +171,8 @@ int psci_set_osi_mode(bool enable)
return psci_to_linux_errno(err);
}
-static int __psci_cpu_suspend(u32 fn, u32 state, unsigned long entry_point)
+static __always_inline int
+__psci_cpu_suspend(u32 fn, u32 state, unsigned long entry_point)
{
int err;
@@ -177,13 +180,15 @@ static int __psci_cpu_suspend(u32 fn, u32 state, unsigned long entry_point)
return psci_to_linux_errno(err);
}
-static int psci_0_1_cpu_suspend(u32 state, unsigned long entry_point)
+static __always_inline int
+psci_0_1_cpu_suspend(u32 state, unsigned long entry_point)
{
return __psci_cpu_suspend(psci_0_1_function_ids.cpu_suspend,
state, entry_point);
}
-static int psci_0_2_cpu_suspend(u32 state, unsigned long entry_point)
+static __always_inline int
+psci_0_2_cpu_suspend(u32 state, unsigned long entry_point)
{
return __psci_cpu_suspend(PSCI_FN_NATIVE(0_2, CPU_SUSPEND),
state, entry_point);
@@ -440,6 +445,9 @@ static const struct file_operations psci_debugfs_ops = {
static int __init psci_debugfs_init(void)
{
+ if (!invoke_psci_fn || !psci_ops.get_version)
+ return 0;
+
return PTR_ERR_OR_ZERO(debugfs_create_file("psci", 0444, NULL, NULL,
&psci_debugfs_ops));
}
@@ -447,10 +455,12 @@ late_initcall(psci_debugfs_init)
#endif
#ifdef CONFIG_CPU_IDLE
-static int psci_suspend_finisher(unsigned long state)
+static noinstr int psci_suspend_finisher(unsigned long state)
{
u32 power_state = state;
- phys_addr_t pa_cpu_resume = __pa_symbol(cpu_resume);
+ phys_addr_t pa_cpu_resume;
+
+ pa_cpu_resume = __pa_symbol_nodebug((unsigned long)cpu_resume);
return psci_ops.cpu_suspend(power_state, pa_cpu_resume);
}
@@ -462,11 +472,22 @@ int psci_cpu_suspend_enter(u32 state)
if (!psci_power_state_loses_context(state)) {
struct arm_cpuidle_irq_context context;
+ ct_cpuidle_enter();
arm_cpuidle_save_irq_context(&context);
ret = psci_ops.cpu_suspend(state, 0);
arm_cpuidle_restore_irq_context(&context);
+ ct_cpuidle_exit();
} else {
+ /*
+ * ARM64 cpu_suspend() wants to do ct_cpuidle_*() itself.
+ */
+ if (!IS_ENABLED(CONFIG_ARM64))
+ ct_cpuidle_enter();
+
ret = cpu_suspend(state, psci_suspend_finisher);
+
+ if (!IS_ENABLED(CONFIG_ARM64))
+ ct_cpuidle_exit();
}
return ret;
diff --git a/drivers/firmware/qcom_scm-legacy.c b/drivers/firmware/qcom_scm-legacy.c
index 9f918b9e6f8f..029e6d117cb8 100644
--- a/drivers/firmware/qcom_scm-legacy.c
+++ b/drivers/firmware/qcom_scm-legacy.c
@@ -9,7 +9,7 @@
#include <linux/mutex.h>
#include <linux/errno.h>
#include <linux/err.h>
-#include <linux/qcom_scm.h>
+#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/arm-smccc.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/firmware/qcom_scm-smc.c b/drivers/firmware/qcom_scm-smc.c
index d111833364ba..16cf88acfa8e 100644
--- a/drivers/firmware/qcom_scm-smc.c
+++ b/drivers/firmware/qcom_scm-smc.c
@@ -8,7 +8,7 @@
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/types.h>
-#include <linux/qcom_scm.h>
+#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/arm-smccc.h>
#include <linux/dma-mapping.h>
@@ -52,29 +52,97 @@ static void __scm_smc_do_quirk(const struct arm_smccc_args *smc,
} while (res->a0 == QCOM_SCM_INTERRUPTED);
}
-static void __scm_smc_do(const struct arm_smccc_args *smc,
- struct arm_smccc_res *res, bool atomic)
+static void fill_wq_resume_args(struct arm_smccc_args *resume, u32 smc_call_ctx)
{
- int retry_count = 0;
+ memset(resume->args, 0, sizeof(resume->args[0]) * ARRAY_SIZE(resume->args));
+
+ resume->args[0] = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL,
+ ARM_SMCCC_SMC_64, ARM_SMCCC_OWNER_SIP,
+ SCM_SMC_FNID(QCOM_SCM_SVC_WAITQ, QCOM_SCM_WAITQ_RESUME));
+
+ resume->args[1] = QCOM_SCM_ARGS(1);
+
+ resume->args[2] = smc_call_ctx;
+}
+
+int scm_get_wq_ctx(u32 *wq_ctx, u32 *flags, u32 *more_pending)
+{
+ int ret;
+ struct arm_smccc_res get_wq_res;
+ struct arm_smccc_args get_wq_ctx = {0};
+
+ get_wq_ctx.args[0] = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL,
+ ARM_SMCCC_SMC_64, ARM_SMCCC_OWNER_SIP,
+ SCM_SMC_FNID(QCOM_SCM_SVC_WAITQ, QCOM_SCM_WAITQ_GET_WQ_CTX));
+
+ /* Guaranteed to return only success or error, no WAITQ_* */
+ __scm_smc_do_quirk(&get_wq_ctx, &get_wq_res);
+ ret = get_wq_res.a0;
+ if (ret)
+ return ret;
+
+ *wq_ctx = get_wq_res.a1;
+ *flags = get_wq_res.a2;
+ *more_pending = get_wq_res.a3;
+
+ return 0;
+}
+
+static int __scm_smc_do_quirk_handle_waitq(struct device *dev, struct arm_smccc_args *waitq,
+ struct arm_smccc_res *res)
+{
+ int ret;
+ u32 wq_ctx, smc_call_ctx;
+ struct arm_smccc_args resume;
+ struct arm_smccc_args *smc = waitq;
+
+ do {
+ __scm_smc_do_quirk(smc, res);
+
+ if (res->a0 == QCOM_SCM_WAITQ_SLEEP) {
+ wq_ctx = res->a1;
+ smc_call_ctx = res->a2;
+
+ ret = qcom_scm_wait_for_wq_completion(wq_ctx);
+ if (ret)
+ return ret;
+
+ fill_wq_resume_args(&resume, smc_call_ctx);
+ smc = &resume;
+ }
+ } while (res->a0 == QCOM_SCM_WAITQ_SLEEP);
+
+ return 0;
+}
+
+static int __scm_smc_do(struct device *dev, struct arm_smccc_args *smc,
+ struct arm_smccc_res *res, bool atomic)
+{
+ int ret, retry_count = 0;
if (atomic) {
__scm_smc_do_quirk(smc, res);
- return;
+ return 0;
}
do {
mutex_lock(&qcom_scm_lock);
- __scm_smc_do_quirk(smc, res);
+ ret = __scm_smc_do_quirk_handle_waitq(dev, smc, res);
mutex_unlock(&qcom_scm_lock);
+ if (ret)
+ return ret;
+
if (res->a0 == QCOM_SCM_V2_EBUSY) {
if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY)
break;
msleep(QCOM_SCM_EBUSY_WAIT_MS);
}
} while (res->a0 == QCOM_SCM_V2_EBUSY);
+
+ return 0;
}
@@ -83,7 +151,7 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
struct qcom_scm_res *res, bool atomic)
{
int arglen = desc->arginfo & 0xf;
- int i;
+ int i, ret;
dma_addr_t args_phys = 0;
void *args_virt = NULL;
size_t alloc_len;
@@ -135,13 +203,17 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
smc.args[SCM_SMC_LAST_REG_IDX] = args_phys;
}
- __scm_smc_do(&smc, &smc_res, atomic);
+ /* ret error check follows after args_virt cleanup*/
+ ret = __scm_smc_do(dev, &smc, &smc_res, atomic);
if (args_virt) {
dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE);
kfree(args_virt);
}
+ if (ret)
+ return ret;
+
if (res) {
res->result[0] = smc_res.a1;
res->result[1] = smc_res.a2;
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index cdbfe54c8146..468d4d5ab550 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -4,15 +4,18 @@
*/
#include <linux/platform_device.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
#include <linux/cpumask.h>
#include <linux/export.h>
#include <linux/dma-mapping.h>
#include <linux/interconnect.h>
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/qcom_scm.h>
+#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/clk.h>
#include <linux/reset-controller.h>
@@ -33,6 +36,7 @@ struct qcom_scm {
struct clk *iface_clk;
struct clk *bus_clk;
struct icc_path *path;
+ struct completion waitq_comp;
struct reset_controller_dev reset;
/* control access to the interconnect path */
@@ -63,6 +67,9 @@ static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
BIT(2), BIT(1), BIT(4), BIT(6)
};
+#define QCOM_SMC_WAITQ_FLAG_WAKE_ONE BIT(0)
+#define QCOM_SMC_WAITQ_FLAG_WAKE_ALL BIT(1)
+
static const char * const qcom_scm_convention_names[] = {
[SMC_CONVENTION_UNKNOWN] = "unknown",
[SMC_CONVENTION_ARM_32] = "smc arm 32",
@@ -1325,11 +1332,79 @@ bool qcom_scm_is_available(void)
}
EXPORT_SYMBOL(qcom_scm_is_available);
+static int qcom_scm_assert_valid_wq_ctx(u32 wq_ctx)
+{
+ /* FW currently only supports a single wq_ctx (zero).
+ * TODO: Update this logic to include dynamic allocation and lookup of
+ * completion structs when FW supports more wq_ctx values.
+ */
+ if (wq_ctx != 0) {
+ dev_err(__scm->dev, "Firmware unexpectedly passed non-zero wq_ctx\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int qcom_scm_wait_for_wq_completion(u32 wq_ctx)
+{
+ int ret;
+
+ ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
+ if (ret)
+ return ret;
+
+ wait_for_completion(&__scm->waitq_comp);
+
+ return 0;
+}
+
+static int qcom_scm_waitq_wakeup(struct qcom_scm *scm, unsigned int wq_ctx)
+{
+ int ret;
+
+ ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
+ if (ret)
+ return ret;
+
+ complete(&__scm->waitq_comp);
+
+ return 0;
+}
+
+static irqreturn_t qcom_scm_irq_handler(int irq, void *data)
+{
+ int ret;
+ struct qcom_scm *scm = data;
+ u32 wq_ctx, flags, more_pending = 0;
+
+ do {
+ ret = scm_get_wq_ctx(&wq_ctx, &flags, &more_pending);
+ if (ret) {
+ dev_err(scm->dev, "GET_WQ_CTX SMC call failed: %d\n", ret);
+ goto out;
+ }
+
+ if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE &&
+ flags != QCOM_SMC_WAITQ_FLAG_WAKE_ALL) {
+ dev_err(scm->dev, "Invalid flags found for wq_ctx: %u\n", flags);
+ goto out;
+ }
+
+ ret = qcom_scm_waitq_wakeup(scm, wq_ctx);
+ if (ret)
+ goto out;
+ } while (more_pending);
+
+out:
+ return IRQ_HANDLED;
+}
+
static int qcom_scm_probe(struct platform_device *pdev)
{
struct qcom_scm *scm;
unsigned long clks;
- int ret;
+ int irq, ret;
scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
if (!scm)
@@ -1402,6 +1477,19 @@ static int qcom_scm_probe(struct platform_device *pdev)
__scm = scm;
__scm->dev = &pdev->dev;
+ init_completion(&__scm->waitq_comp);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ if (irq != -ENXIO)
+ return irq;
+ } else {
+ ret = devm_request_threaded_irq(__scm->dev, irq, NULL, qcom_scm_irq_handler,
+ IRQF_ONESHOT, "qcom-scm", __scm);
+ if (ret < 0)
+ return dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n");
+ }
+
__get_convention();
/*
diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h
index db3d08a01209..e6e512bd57d1 100644
--- a/drivers/firmware/qcom_scm.h
+++ b/drivers/firmware/qcom_scm.h
@@ -60,6 +60,9 @@ struct qcom_scm_res {
u64 result[MAX_QCOM_SCM_RETS];
};
+int qcom_scm_wait_for_wq_completion(u32 wq_ctx);
+int scm_get_wq_ctx(u32 *wq_ctx, u32 *flags, u32 *more_pending);
+
#define SCM_SMC_FNID(s, c) ((((s) & 0xFF) << 8) | ((c) & 0xFF))
extern int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
enum qcom_scm_convention qcom_convention,
@@ -129,6 +132,10 @@ extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
#define QCOM_SCM_SMMU_CONFIG_ERRATA1 0x03
#define QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL 0x02
+#define QCOM_SCM_SVC_WAITQ 0x24
+#define QCOM_SCM_WAITQ_RESUME 0x02
+#define QCOM_SCM_WAITQ_GET_WQ_CTX 0x03
+
/* common error codes */
#define QCOM_SCM_V2_EBUSY -12
#define QCOM_SCM_ENOMEM -5
@@ -137,6 +144,7 @@ extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
#define QCOM_SCM_EINVAL_ARG -2
#define QCOM_SCM_ERROR -1
#define QCOM_SCM_INTERRUPTED 1
+#define QCOM_SCM_WAITQ_SLEEP 2
static inline int qcom_scm_remap_error(int err)
{
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index b4081f4d88a3..bde1f543f529 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -1138,13 +1138,17 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
/* allocate service controller and supporting channel */
controller = devm_kzalloc(dev, sizeof(*controller), GFP_KERNEL);
- if (!controller)
- return -ENOMEM;
+ if (!controller) {
+ ret = -ENOMEM;
+ goto err_destroy_pool;
+ }
chans = devm_kmalloc_array(dev, SVC_NUM_CHANNEL,
sizeof(*chans), GFP_KERNEL | __GFP_ZERO);
- if (!chans)
- return -ENOMEM;
+ if (!chans) {
+ ret = -ENOMEM;
+ goto err_destroy_pool;
+ }
controller->dev = dev;
controller->num_chans = SVC_NUM_CHANNEL;
@@ -1159,7 +1163,7 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
ret = kfifo_alloc(&controller->svc_fifo, fifo_size, GFP_KERNEL);
if (ret) {
dev_err(dev, "failed to allocate FIFO\n");
- return ret;
+ goto err_destroy_pool;
}
spin_lock_init(&controller->svc_fifo_lock);
@@ -1198,19 +1202,20 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
ret = platform_device_add(svc->stratix10_svc_rsu);
if (ret) {
platform_device_put(svc->stratix10_svc_rsu);
- return ret;
+ goto err_free_kfifo;
}
svc->intel_svc_fcs = platform_device_alloc(INTEL_FCS, 1);
if (!svc->intel_svc_fcs) {
dev_err(dev, "failed to allocate %s device\n", INTEL_FCS);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_unregister_dev;
}
ret = platform_device_add(svc->intel_svc_fcs);
if (ret) {
platform_device_put(svc->intel_svc_fcs);
- return ret;
+ goto err_unregister_dev;
}
dev_set_drvdata(dev, svc);
@@ -1219,8 +1224,12 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
return 0;
+err_unregister_dev:
+ platform_device_unregister(svc->stratix10_svc_rsu);
err_free_kfifo:
kfifo_free(&controller->svc_fifo);
+err_destroy_pool:
+ gen_pool_destroy(genpool);
return ret;
}
diff --git a/drivers/firmware/sysfb_simplefb.c b/drivers/firmware/sysfb_simplefb.c
index a353e27f83f5..ce9c007ed66f 100644
--- a/drivers/firmware/sysfb_simplefb.c
+++ b/drivers/firmware/sysfb_simplefb.c
@@ -27,25 +27,56 @@ static const struct simplefb_format formats[] = SIMPLEFB_FORMATS;
__init bool sysfb_parse_mode(const struct screen_info *si,
struct simplefb_platform_data *mode)
{
- const struct simplefb_format *f;
__u8 type;
+ u32 bits_per_pixel;
unsigned int i;
type = si->orig_video_isVGA;
if (type != VIDEO_TYPE_VLFB && type != VIDEO_TYPE_EFI)
return false;
+ /*
+ * The meaning of depth and bpp for direct-color formats is
+ * inconsistent:
+ *
+ * - DRM format info specifies depth as the number of color
+ * bits; including alpha, but not including filler bits.
+ * - Linux' EFI platform code computes lfb_depth from the
+ * individual color channels, including the reserved bits.
+ * - VBE 1.1 defines lfb_depth for XRGB1555 as 16, but later
+ * versions use 15.
+ * - On the kernel command line, 'bpp' of 32 is usually
+ * XRGB8888 including the filler bits, but 15 is XRGB1555
+ * not including the filler bit.
+ *
+ * It's not easily possible to fix this in struct screen_info,
+ * as this could break UAPI. The best solution is to compute
+ * bits_per_pixel here and ignore lfb_depth. In the loop below,
+ * ignore simplefb formats with alpha bits, as EFI and VESA
+ * don't specify alpha channels.
+ */
+ if (si->lfb_depth > 8) {
+ bits_per_pixel = max(max3(si->red_size + si->red_pos,
+ si->green_size + si->green_pos,
+ si->blue_size + si->blue_pos),
+ si->rsvd_size + si->rsvd_pos);
+ } else {
+ bits_per_pixel = si->lfb_depth;
+ }
+
for (i = 0; i < ARRAY_SIZE(formats); ++i) {
- f = &formats[i];
- if (si->lfb_depth == f->bits_per_pixel &&
+ const struct simplefb_format *f = &formats[i];
+
+ if (f->transp.length)
+ continue; /* transparent formats are unsupported by VESA/EFI */
+
+ if (bits_per_pixel == f->bits_per_pixel &&
si->red_size == f->red.length &&
si->red_pos == f->red.offset &&
si->green_size == f->green.length &&
si->green_pos == f->green.offset &&
si->blue_size == f->blue.length &&
- si->blue_pos == f->blue.offset &&
- si->rsvd_size == f->transp.length &&
- si->rsvd_pos == f->transp.offset) {
+ si->blue_pos == f->blue.offset) {
mode->format = f->name;
mode->width = si->lfb_width;
mode->height = si->lfb_height;
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 129f68d7a6f5..acd83d29c866 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -738,8 +738,31 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_get_pll_frac_data);
*/
int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value)
{
- return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, IOCTL_SET_SD_TAPDELAY,
- type, value, NULL);
+ u32 reg = (type == PM_TAPDELAY_INPUT) ? SD_ITAPDLY : SD_OTAPDLYSEL;
+ u32 mask = (node_id == NODE_SD_0) ? GENMASK(15, 0) : GENMASK(31, 16);
+
+ if (value) {
+ return zynqmp_pm_invoke_fn(PM_IOCTL, node_id,
+ IOCTL_SET_SD_TAPDELAY,
+ type, value, NULL);
+ }
+
+ /*
+ * Work around completely misdesigned firmware API on Xilinx ZynqMP.
+ * The IOCTL_SET_SD_TAPDELAY firmware call allows the caller to only
+ * ever set IOU_SLCR SD_ITAPDLY Register SD0_ITAPDLYENA/SD1_ITAPDLYENA
+ * bits, but there is no matching call to clear those bits. If those
+ * bits are not cleared, SDMMC tuning may fail.
+ *
+ * Luckily, there are PM_MMIO_READ/PM_MMIO_WRITE calls which seem to
+ * allow complete unrestricted access to all address space, including
+ * IOU_SLCR SD_ITAPDLY Register and all the other registers, access
+ * to which was supposed to be protected by the current firmware API.
+ *
+ * Use PM_MMIO_READ/PM_MMIO_WRITE to re-implement the missing counter
+ * part of IOCTL_SET_SD_TAPDELAY which clears SDx_ITAPDLYENA bits.
+ */
+ return zynqmp_pm_invoke_fn(PM_MMIO_WRITE, reg, mask, 0, 0, NULL);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_set_sd_tapdelay);